OpenJDK / lambda / lambda / jdk
changeset 8287:e71845b83f26
javadoc nits
author | mduigou |
---|---|
date | Mon, 22 Apr 2013 09:40:26 -0700 |
parents | 2783a9b4beaf |
children | 70ed67c54fa6 |
files | src/share/classes/java/util/stream/AbstractPipeline.java src/share/classes/java/util/stream/AbstractSpinedBuffer.java src/share/classes/java/util/stream/SpinedBuffer.java |
diffstat | 3 files changed, 103 insertions(+), 49 deletions(-) [+] |
line wrap: on
line diff
--- a/src/share/classes/java/util/stream/AbstractPipeline.java Mon Apr 22 09:12:51 2013 -0700 +++ b/src/share/classes/java/util/stream/AbstractPipeline.java Mon Apr 22 09:40:26 2013 -0700 @@ -69,25 +69,27 @@ * segment. In all cases, the source data is not consumed until a terminal * operation begins. * - * @param <E_IN> Type of input elements. - * @param <E_OUT> Type of output elements. - * @param <S> Type of the subclass implementing {@code BaseStream} + * @param <E_IN> type of input elements + * @param <E_OUT> type of output elements + * @param <S> type of the subclass implementing {@code BaseStream} * @since 1.8 */ abstract class AbstractPipeline<E_IN, E_OUT, S extends BaseStream<E_OUT, S>> extends PipelineHelper<E_OUT> { /** * Backlink to the head of the pipeline chain (self if this is the source - * stage) + * stage). */ private final AbstractPipeline sourceStage; - /** The "upstream" pipeline, or null if this is the source stage */ + /** + * The "upstream" pipeline, or null if this is the source stage. + */ private final AbstractPipeline previousStage; /** * The operation flags for the intermediate operation represented by this - * pipeline object + * pipeline object. */ protected final int sourceOrOpFlags; @@ -118,6 +120,7 @@ * null. */ private Spliterator<?> sourceSpliterator; + /** * The source supplier. Only valid for the head pipeline. Before the * pipeline is consumed if non-null then {@code sourceSpliterator} must be @@ -125,7 +128,9 @@ */ private Supplier<? extends Spliterator<?>> sourceSupplier; - /** True if this pipeline has been linked or consumed */ + /** + * True if this pipeline has been linked or consumed + */ private boolean linkedOrConsumed; /** @@ -165,9 +170,9 @@ * Constructor for the head of a stream pipeline. * * @param source {@code Spliterator} describing the stream source - * @param sourceFlags The source flags for the stream source, described in + * @param sourceFlags the source flags for the stream source, described in * {@link StreamOpFlag} - * @param parallel True if the pipeline is parallel + * @param parallel {@code true} if the pipeline is parallel */ AbstractPipeline(Spliterator<?> source, int sourceFlags, boolean parallel) { @@ -190,8 +195,7 @@ * @param opFlags the operation flags for the new stage, described in * {@link StreamOpFlag} */ - AbstractPipeline(AbstractPipeline<?, E_IN, ?> previousStage, - int opFlags) { + AbstractPipeline(AbstractPipeline<?, E_IN, ?> previousStage, int opFlags) { if (previousStage.linkedOrConsumed) throw new IllegalStateException("stream has already been operated upon"); previousStage.linkedOrConsumed = true; @@ -212,8 +216,8 @@ /** * Evaluate the pipeline with a terminal operation to produce a result. * + * @param <R> the type of result * @param terminalOp the terminal operation to be applied to the pipeline. - * @param <R> the type of result * @return the result */ final <R> R evaluate(TerminalOp<E_OUT, R> terminalOp) { @@ -282,20 +286,26 @@ // BaseStream - /** Implements {@link BaseStream#sequential()} */ + /** + * Implements {@link BaseStream#sequential()} + */ public final S sequential() { sourceStage.parallel = false; return (S) this; } - /** Implements {@link BaseStream#parallel()} */ + /** + * Implements {@link BaseStream#parallel()} + */ public final S parallel() { sourceStage.parallel = true; return (S) this; } // Primitive specialization use co-variant overrides, hence is not final - /** Implements {@link BaseStream#spliterator()} */ + /** + * Implements {@link BaseStream#spliterator()} + */ public Spliterator<E_OUT> spliterator() { if (linkedOrConsumed) throw new IllegalStateException("stream has already been operated upon"); @@ -321,7 +331,9 @@ } } - /** Implements {@link BaseStream#isParallel()} */ + /** + * Implements {@link BaseStream#isParallel()} + */ public final boolean isParallel() { return sourceStage.parallel; } @@ -346,6 +358,7 @@ * some of these need to be adjusted, as well as adjusting for flags from * the terminal operation (such as back-propagating UNORDERED). * Need not be called for a sequential execution. + * * @param terminalFlags Operation flags for the terminal operation */ private void parallelPrepare(int terminalFlags) { @@ -513,6 +526,7 @@ * then it's output shape corresponds to the shape of the source. * Otherwise, it's output shape corresponds to the output shape of the * associated operation. + * * @return the output shape */ abstract StreamShape getOutputShape(); @@ -556,6 +570,7 @@ * Traverse the elements of a spliterator compatible with this stream shape, * pushing those elements into a sink. If the sink requests cancellation, * no further elements will be pulled or pushed. + * * @param spliterator the spliterator to pull elements from * @param sink the sink to push elements to */ @@ -564,11 +579,11 @@ /** * Make a node builder compatible with this stream shape. * - * @param exactSizeIfKnown if >=0, then a node builder will be created that - * has a fixed capacity of at most sizeIfKnown elements. If < 0, then the - * node builder has an unfixed capacity. A fixed capacity node builder will - * throw exceptions if an element is added after builder has reached - * capacity, or is built before the builder has reached capacity. + * @param exactSizeIfKnown if {@literal >=0}, then a node builder will be created that + * has a fixed capacity of at most sizeIfKnown elements. If {@literal < 0}, + * then the node builder has an unfixed capacity. A fixed capacity node + * builder will throw exceptions if an element is added after builder has + * reached capacity, or is built before the builder has reached capacity. * @param generator the array generator to be used to create instances of a * T[] array. For implementations supporting primitive nodes, this parameter * may be ignored. @@ -586,7 +601,6 @@ * {@link #opEvaluateParallel(PipelineHelper, java.util.Spliterator, java.util.function.IntFunction)} * must be overridden. * - * @implSpec The default implementation returns {@code false}. * @return {@code true} if this operation is stateful */ abstract boolean opIsStateful(); @@ -598,7 +612,7 @@ * the provided {@code Sink}. * * @apiNote - * <p>The implementation may use the {@code flags} parameter to optimize the + * The implementation may use the {@code flags} parameter to optimize the * sink wrapping. For example, if the input is already {@code DISTINCT}, * the implementation for the {@code Stream#distinct()} method could just * return the sink it was passed. @@ -618,6 +632,7 @@ * operations. Only called on stateful operations. If {@link * #opIsStateful()} returns true then implementations must override the * default implementation. + * * @implSpec The default implementation always throw * {@code UnsupportedOperationException}. * @@ -640,9 +655,6 @@ * result here; it is preferable, if possible, to describe the result via a * lazily evaluated spliterator. * - * @param helper the pipeline helper - * @param spliterator the source {@code Spliterator} - * @return a {@code Spliterator} describing the result of the evaluation * @implSpec The default implementation behaves as if: * <pre>{@code * return evaluateParallel(helper, i -> (E_OUT[]) new @@ -650,6 +662,10 @@ * }</pre> * and is suitable for implementations that cannot do better than a full * synchronous evaluation. + * + * @param helper the pipeline helper + * @param spliterator the source {@code Spliterator} + * @return a {@code Spliterator} describing the result of the evaluation */ <P_IN> Spliterator<E_OUT> opEvaluateParallelLazy(PipelineHelper<E_OUT> helper, Spliterator<P_IN> spliterator) {
--- a/src/share/classes/java/util/stream/AbstractSpinedBuffer.java Mon Apr 22 09:12:51 2013 -0700 +++ b/src/share/classes/java/util/stream/AbstractSpinedBuffer.java Mon Apr 22 09:40:26 2013 -0700 @@ -31,46 +31,59 @@ * @since 1.8 */ abstract class AbstractSpinedBuffer { - /** Minimum power-of-two for the first chunk */ + /** + * Minimum power-of-two for the first chunk. + */ public static final int MIN_CHUNK_POWER = 4; - /** Minimum size for the first chunk */ + /** + * Minimum size for the first chunk. + */ public static final int MIN_CHUNK_SIZE = 1 << MIN_CHUNK_POWER; - /** Max power-of-two for chunks */ + /** + * Max power-of-two for chunks. + */ public static final int MAX_CHUNK_POWER = 30; - /** Minimum array size for array-of-chunks */ + /** + * Minimum array size for array-of-chunks. + */ public static final int MIN_SPINE_SIZE = 8; - /** log2 of the size of the first chunk */ + /** + * log2 of the size of the first chunk. + */ protected final int initialChunkPower; /** * Index of the *next* element to write; may point into, or just outside of, - * the current chunk + * the current chunk. */ protected int elementIndex; /** * Index of the *current* chunk in the spine array, if the spine array is - * non-null + * non-null. */ protected int spineIndex; - /* Count of elements in all prior chunks */ + /** + * Count of elements in all prior chunks. + */ protected long[] priorElementCount; /** - * Construct with an initial capacity of 16 + * Construct with an initial capacity of 16. */ protected AbstractSpinedBuffer() { this.initialChunkPower = MIN_CHUNK_POWER; } /** - * Construct with a specified initial capacity + * Construct with a specified initial capacity. + * * @param initialCapacity The minimum expected number of elements */ protected AbstractSpinedBuffer(int initialCapacity) { @@ -81,19 +94,25 @@ Integer.SIZE - Integer.numberOfLeadingZeros(initialCapacity - 1)); } - /** Is the buffer currently empty? */ + /** + * Is the buffer currently empty? + */ public boolean isEmpty() { return (spineIndex == 0) && (elementIndex == 0); } - /** How many elements are currently in the buffer? */ + /** + * How many elements are currently in the buffer? + */ public long count() { return (spineIndex == 0) ? elementIndex : priorElementCount[spineIndex] + elementIndex; } - /** How big should the nth chunk be? */ + /** + * How big should the nth chunk be? + */ protected int chunkSize(int n) { int power = (n == 0 || n == 1) ? initialChunkPower @@ -101,6 +120,8 @@ return 1 << power; } - /** Remove all data from the buffer */ + /** + * Remove all data from the buffer + */ public abstract void clear(); }
--- a/src/share/classes/java/util/stream/SpinedBuffer.java Mon Apr 22 09:12:51 2013 -0700 +++ b/src/share/classes/java/util/stream/SpinedBuffer.java Mon Apr 22 09:40:26 2013 -0700 @@ -48,7 +48,6 @@ * {@link ArrayList}, as when the capacity of the list needs to be increased * no copying of elements is required. This is usually beneficial in the case * where the results will be traversed a small number of times. - * </p> * * @param <E> the type of elements in this list * @since 1.8 @@ -81,7 +80,9 @@ */ protected E[] curChunk; - /** All chunks, or null if there is only one chunk */ + /** + * All chunks, or null if there is only one chunk + */ protected E[][] spine; /** @@ -104,7 +105,9 @@ curChunk = (E[]) new Object[1 << initialChunkPower]; } - /** Returns the current capacity of the buffer */ + /** + * Returns the current capacity of the buffer + */ protected long capacity() { return (spineIndex == 0) ? curChunk.length @@ -119,7 +122,9 @@ } } - /** Ensure that the buffer has at least capacity to hold the target size */ + /** + * Ensure that the buffer has at least capacity to hold the target size + */ protected final void ensureCapacity(long targetSize) { long capacity = capacity(); if (targetSize > capacity) { @@ -138,12 +143,16 @@ } } - /** Force the buffer to increase its capacity */ + /** + * Force the buffer to increase its capacity + */ protected void increaseCapacity() { ensureCapacity(capacity() + 1); } - /** Retrieve the element at the specified index */ + /** + * Retrieve the element at the specified index + */ public E get(long index) { // @@@ can further optimize by caching last seen spineIndex, // which is going to be right most of the time @@ -164,7 +173,10 @@ throw new IndexOutOfBoundsException(Long.toString(index)); } - /** Copy the elements, starting at the specified offset, into the specified array */ + /** + * Copy the elements, starting at the specified offset, into the specified + * array + */ public void copyInto(E[] array, int offset) { long finalOffset = offset + count(); if (finalOffset > array.length || finalOffset < offset) { @@ -184,7 +196,10 @@ } } - /** Create a new array using the specified array factory, and copy the elements into it */ + /** + * Create a new array using the specified array factory, and copy the + * elements into it + */ public E[] asArray(IntFunction<E[]> arrayFactory) { // @@@ will fail for size == MAX_VALUE E[] result = arrayFactory.apply((int) count()); @@ -251,7 +266,9 @@ private static final int SPLITERATOR_CHARACTERISTICS = Spliterator.SIZED | Spliterator.ORDERED | Spliterator.SUBSIZED; - /** Return a {@link Spliterator} describing the contents of the buffer */ + /** + * Return a {@link Spliterator} describing the contents of the buffer + */ public Spliterator<E> spliterator() { return new Spliterator<E>() { // The current spine index