Dear Members,
I have written a design of programming code for running three different tasks of finding sum of series of N (infinite numbers, i.e. it is devised into N number of chunks as successively). For this reason,
Java 8 includes primitive streams (IntStream, LongStream, and DoubleStream) to avoid poor stream functionality and
operations whenever possible. Program runs on 4 core CPU on my machine with the following results.
I would like to ask our Forum regarding data that we select for concurrency during the parallelization process. Which would be better ordered streams or Should it be unordered streams for parallel processing? Or it depends on the intermediate operation in which we select in the code?
Here are my sample code that evaluated for three tasks: iteratively, sequential, and parallel concepts.
/*
Author: Dr. Noorul Hameed
Lecturer
Malaysia
30.6.2017
*/
package javaforkjoin;
import java.util.function.Function;
import java.util.stream.LongStream;
import java.util.stream.Stream;
public class JavaForkJoin {
public static void main(
String[] args) {
System.setProperty("java.util.concurrent.ForkJoinPool.common.parallelism", "4");
System.out.println("CPU core: " + Runtime.getRuntime().availableProcessors());
System.out.println("Concurrent parallelism: " + System.getProperty("java.util.concurrent.ForkJoinPool.common.parallelism"));
//java.util.concurrent.ForkJoinPool.common.parallelism
System.out.println("Parallel sum done in: "
+ measureSumPerf(JavaForkJoin::parallelSum, 10_000_000) + " msecs\\n");
System.out.println("Sequential sum done in: "
+ measureSumPerf(JavaForkJoin::sequentialSum, 10_000_000) + " msecs\\n");
System.out.println("Iterative sum done in: "
+ measureSumPerf(JavaForkJoin::iterativeSum, 10_000_000) + " msecs\n");
}
public static long iterativeSum(long n) {
long result = 0;
for (long i = 1L; i <= n; i++) {
result += i;
}
return result;
}
public static long sequentialSum(long n) {
return Stream.iterate(1L, i -> i + 1)
.limit(n)
.reduce(0L, Long::sum);
}
public static long parallelSum(long n) {
/* return Stream.iterate(1L, i -> i + 1)
.limit(n)
.parallel()
.reduce(0L, Long::sum);*/
return LongStream.rangeClosed(1, n)
.parallel() //intermediate op.
.reduce(0L, Long::sum); //terminal operation
}
public static long measureSumPerf(Function<Long, Long> adder, long n) {
long fastest = Long.MAX_VALUE;
for (int i = 0; i < 10; i++) {
long start = System.nanoTime();
long sum = adder.apply(n);
long duration = (System.nanoTime() - start) / 1_000_000;
System.out.println("Result: " + sum);
if (duration < fastest) {
fastest = duration;
}
}
return fastest;
}
}
Thanks