Browse Source

Use Globals Ords in Cardinality aggregation for low cardinality fields (#62560)

New Cardinality aggregator implementation that uses global ords.
Ignacio Vera 5 years ago
parent
commit
1dea28a878

+ 1 - 5
rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/170_cardinality_metric.yml

@@ -280,13 +280,9 @@ setup:
               cardinality:
                 field: string_field
   - match: { aggregations.distinct_string.value: 1 }
+  - match: { profile.shards.0.aggregations.0.type: "GlobalOrdCardinalityAggregator" }
   - gt: { profile.shards.0.aggregations.0.breakdown.initialize: 0 }
   - gt: { profile.shards.0.aggregations.0.breakdown.build_leaf_collector: 0 }
   - gt: { profile.shards.0.aggregations.0.breakdown.collect: 0 }
   - gt: { profile.shards.0.aggregations.0.breakdown.build_aggregation: 0 }
   - gt: { profile.shards.0.aggregations.0.breakdown.post_collection: 0 }
-  - match: { profile.shards.0.aggregations.0.debug.empty_collectors_used: 0 }
-  - match: { profile.shards.0.aggregations.0.debug.numeric_collectors_used: 0 }
-  - gt: { profile.shards.0.aggregations.0.debug.ordinals_collectors_used: 0 }
-  - match: { profile.shards.0.aggregations.0.debug.ordinals_collectors_overhead_too_high: 0 }
-  - match: { profile.shards.0.aggregations.0.debug.string_hashing_collectors_used: 0 }

+ 8 - 0
server/src/main/java/org/elasticsearch/common/util/BitArray.java

@@ -84,6 +84,14 @@ public final class BitArray implements Releasable {
         return Long.MAX_VALUE;
     }
 
+    public long cardinality() {
+        long cardinality = 0;
+        for (int i = 0; i < bits.size(); ++i) {
+            cardinality += Long.bitCount(bits.get(i));
+        }
+        return cardinality;
+    }
+
     /**
      * Clear the {@code index}th bit.
      */

+ 4 - 2
server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java

@@ -67,7 +67,8 @@ public abstract class AbstractHyperLogLogPlusPlus extends AbstractCardinalityAlg
             // we use a sparse structure for linear counting
             AbstractLinearCounting.HashesIterator iterator = getLinearCounting(bucketOrd);
             int size = Math.toIntExact(iterator.size());
-            HyperLogLogPlusPlusSparse clone = new HyperLogLogPlusPlusSparse(precision(), bigArrays, size, 1);
+            HyperLogLogPlusPlusSparse clone = new HyperLogLogPlusPlusSparse(precision(), bigArrays, 1);
+            clone.ensureCapacity(0, size);
             while (iterator.next()) {
                 clone.addEncoded(0, iterator.value());
             }
@@ -127,7 +128,8 @@ public abstract class AbstractHyperLogLogPlusPlus extends AbstractCardinalityAlg
         if (algorithm == LINEAR_COUNTING) {
             // we use a sparse structure for linear counting
             final long size = in.readVLong();
-            HyperLogLogPlusPlusSparse counts  = new HyperLogLogPlusPlusSparse(precision, bigArrays, Math.toIntExact(size), 1);
+            HyperLogLogPlusPlusSparse counts  = new HyperLogLogPlusPlusSparse(precision, bigArrays, 1);
+            counts.ensureCapacity(0, size);
             for (long i = 0; i < size; ++i) {
                 counts.addEncoded(0, in.readInt());
             }

+ 37 - 1
server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java

@@ -19,18 +19,21 @@
 
 package org.elasticsearch.search.aggregations.metrics;
 
+import org.apache.lucene.index.LeafReaderContext;
 import org.elasticsearch.search.aggregations.Aggregator;
 import org.elasticsearch.search.aggregations.AggregatorFactories;
 import org.elasticsearch.search.aggregations.AggregatorFactory;
 import org.elasticsearch.search.aggregations.CardinalityUpperBound;
 import org.elasticsearch.search.aggregations.support.AggregationContext;
 import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
 import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
 import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
 import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
 import org.elasticsearch.search.internal.SearchContext;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.Map;
 
 class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory {
@@ -48,7 +51,40 @@ class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory {
     }
 
     public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
-        builder.register(CardinalityAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.ALL_CORE, CardinalityAggregator::new, true);
+        builder.register(CardinalityAggregationBuilder.REGISTRY_KEY,
+            CoreValuesSourceType.ALL_CORE,
+            (name, valuesSourceConfig, precision, context, parent, metadata) -> {
+                // check global ords
+                if (valuesSourceConfig.hasValues()) {
+                    final ValuesSource valuesSource = valuesSourceConfig.getValuesSource();
+                    if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals) {
+                        final ValuesSource.Bytes.WithOrdinals source = (ValuesSource.Bytes.WithOrdinals) valuesSource;
+                        if (useGlobalOrds(context, source, precision)) {
+                            final long maxOrd = source.globalMaxOrd(context.searcher());
+                            return new GlobalOrdCardinalityAggregator(name, source, precision, Math.toIntExact(maxOrd),
+                                context, parent, metadata);
+                        }
+                    }
+                }
+                // fallback in the default aggregator
+                return new CardinalityAggregator(name, valuesSourceConfig, precision, context, parent, metadata);
+            }, true);
+    }
+
+    private static boolean useGlobalOrds(SearchContext context,
+                                         ValuesSource.Bytes.WithOrdinals source,
+                                         int precision) throws IOException {
+        final List<LeafReaderContext> leaves = context.searcher().getIndexReader().leaves();
+        // we compute the total number of terms across all segments
+        long total = 0;
+        for (LeafReaderContext leaf : leaves) {
+            total += source.ordinalsValues(leaf).getValueCount();
+        }
+        final long countsMemoryUsage = HyperLogLogPlusPlus.memoryUsage(precision);
+        // we assume there are 25% of repeated values when there is more than one leaf
+        final long ordinalsMemoryUsage = leaves.size() == 1 ? total * 4L : total * 3L;
+        // we do not consider the size if the bitSet, I think at most they can be ~1MB per bucket
+        return ordinalsMemoryUsage < countsMemoryUsage;
     }
 
     @Override

+ 172 - 0
server/src/main/java/org/elasticsearch/search/aggregations/metrics/GlobalOrdCardinalityAggregator.java

@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics;
+
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.hash.MurmurHash3;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.BitArray;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.common.util.ObjectArray;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.LeafBucketCollector;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * An aggregator that computes approximate counts of unique values
+ * using global ords.
+ */
+public class GlobalOrdCardinalityAggregator extends NumericMetricsAggregator.SingleValue {
+
+    private final ValuesSource.Bytes.WithOrdinals valuesSource;
+    private final BigArrays bigArrays;
+    private final int maxOrd;
+    private final int precision;
+
+    // Build at post-collection phase
+    @Nullable
+    private HyperLogLogPlusPlusSparse counts;
+    private SortedSetDocValues values;
+    private ObjectArray<BitArray> visitedOrds;
+
+
+    public GlobalOrdCardinalityAggregator(
+            String name,
+            ValuesSource.Bytes.WithOrdinals valuesSource,
+            int precision,
+            int maxOrd,
+            SearchContext context,
+            Aggregator parent,
+            Map<String, Object> metadata) throws IOException {
+        super(name, context, parent, metadata);
+        this.valuesSource = valuesSource;
+        this.precision = precision;
+        this.maxOrd = maxOrd;
+        this.bigArrays = context.bigArrays();
+        this.visitedOrds = bigArrays.newObjectArray(1);
+    }
+
+    @Override
+    public ScoreMode scoreMode() {
+        return valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES;
+    }
+
+    @Override
+    public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
+            final LeafBucketCollector sub) throws IOException {
+        values = valuesSource.globalOrdinalsValues(ctx);
+        return new LeafBucketCollector() {
+            @Override
+            public void collect(int doc, long bucketOrd) throws IOException {
+                visitedOrds = bigArrays.grow(visitedOrds, bucketOrd + 1);
+                BitArray bits = visitedOrds.get(bucketOrd);
+                if (bits == null) {
+                    bits = new BitArray(maxOrd, bigArrays);
+                    visitedOrds.set(bucketOrd, bits);
+                }
+                if (values.advanceExact(doc)) {
+                    for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
+                        bits.set((int) ord);
+                    }
+                }
+            }
+        };
+    }
+
+    @Override
+    protected void doPostCollection() throws IOException {
+        counts = new HyperLogLogPlusPlusSparse(precision, bigArrays, visitedOrds.size());
+        try (LongArray hashes = bigArrays.newLongArray(maxOrd, false)) {
+            try (BitArray allVisitedOrds = new BitArray(maxOrd, bigArrays)) {
+                for (long bucket = visitedOrds.size() - 1; bucket >= 0; --bucket) {
+                    final BitArray bits = visitedOrds.get(bucket);
+                    if (bits != null) {
+                        allVisitedOrds.or(bits);
+                    }
+                }
+
+                final MurmurHash3.Hash128 hash = new MurmurHash3.Hash128();
+                for (long ord = allVisitedOrds.nextSetBit(0); ord < Long.MAX_VALUE;
+                     ord = ord + 1 < maxOrd ? allVisitedOrds.nextSetBit(ord + 1) : Long.MAX_VALUE) {
+                    final BytesRef value = values.lookupOrd(ord);
+                    MurmurHash3.hash128(value.bytes, value.offset, value.length, 0, hash);
+                    hashes.set(ord, hash.h1);
+                }
+            }
+            for (long bucket = visitedOrds.size() - 1; bucket >= 0; --bucket) {
+                try (BitArray bits = visitedOrds.get(bucket)) {
+                    if (bits != null) {
+                        visitedOrds.set(bucket, null); // remove bitset from array
+                        counts.ensureCapacity(bucket, bits.cardinality());
+                        for (long ord = bits.nextSetBit(0); ord < Long.MAX_VALUE;
+                             ord = ord + 1 < maxOrd ? bits.nextSetBit(ord + 1) : Long.MAX_VALUE) {
+                            counts.collect(bucket, hashes.get(ord));
+                        }
+                    }
+                }
+            }
+            // free resources
+            Releasables.close(visitedOrds);
+            visitedOrds = null;
+        }
+    }
+
+    @Override
+    public double metric(long owningBucketOrd) {
+        return counts == null ? 0 : counts.cardinality(owningBucketOrd);
+    }
+
+    @Override
+    public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+        if (counts == null || owningBucketOrdinal >= counts.maxOrd() || counts.cardinality(owningBucketOrdinal) == 0) {
+            return buildEmptyAggregation();
+        }
+        // We need to build a copy because the returned Aggregation needs remain usable after
+        // this Aggregator (and its HLL++ counters) is released.
+        AbstractHyperLogLogPlusPlus copy = counts.clone(owningBucketOrdinal, BigArrays.NON_RECYCLING_INSTANCE);
+        return new InternalCardinality(name, copy, metadata());
+    }
+
+    @Override
+    public InternalAggregation buildEmptyAggregation() {
+        return new InternalCardinality(name, null, metadata());
+    }
+
+    @Override
+    protected void doClose() {
+        if (visitedOrds != null) {
+            for (int i = 0; i < visitedOrds.size(); i++) {
+                Releasables.close(visitedOrds.get(i));
+            }
+        }
+        Releasables.close(visitedOrds, counts);
+    }
+}

+ 52 - 47
server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java

@@ -23,6 +23,7 @@ import org.elasticsearch.common.lease.Releasable;
 import org.elasticsearch.common.lease.Releasables;
 import org.elasticsearch.common.util.BigArrays;
 import org.elasticsearch.common.util.IntArray;
+import org.elasticsearch.common.util.ObjectArray;
 
 /**
  * AbstractHyperLogLogPlusPlus instance that only supports linear counting. The maximum number of hashes supported
@@ -33,15 +34,16 @@ import org.elasticsearch.common.util.IntArray;
  */
 final class HyperLogLogPlusPlusSparse extends AbstractHyperLogLogPlusPlus implements Releasable {
 
+    // TODO: consider a hll sparse structure
     private final LinearCounting lc;
 
     /**
      * Create an sparse HLL++ algorithm where capacity is the maximum number of hashes this structure can hold
      * per bucket.
      */
-    HyperLogLogPlusPlusSparse(int precision, BigArrays bigArrays, int capacity, int initialSize) {
+    HyperLogLogPlusPlusSparse(int precision, BigArrays bigArrays, long initialBuckets) {
         super(precision);
-        this.lc = new LinearCounting(precision, bigArrays, capacity, initialSize);
+        this.lc = new LinearCounting(precision, bigArrays, initialBuckets);
     }
 
     @Override
@@ -49,6 +51,11 @@ final class HyperLogLogPlusPlusSparse extends AbstractHyperLogLogPlusPlus implem
         return lc.sizes.size();
     }
 
+    /** Needs to be called before adding elements into a bucket */
+    protected void ensureCapacity(long bucketOrd, long size) {
+        lc.ensureCapacity(bucketOrd, size);
+    }
+
     @Override
     public long cardinality(long bucketOrd) {
         return lc.cardinality(bucketOrd);
@@ -85,24 +92,22 @@ final class HyperLogLogPlusPlusSparse extends AbstractHyperLogLogPlusPlus implem
 
     private static class LinearCounting extends AbstractLinearCounting implements Releasable {
 
-        private final int capacity;
         private final BigArrays bigArrays;
         private final LinearCountingIterator iterator;
         // We are actually using HyperLogLog's runLens array but interpreting it as a hash set for linear counting.
         // Number of elements stored.
-        private IntArray values;
+        private ObjectArray<IntArray> values;
         private IntArray sizes;
 
-        LinearCounting(int p, BigArrays bigArrays, int capacity, int initialSize) {
+        LinearCounting(int p, BigArrays bigArrays, long initialBuckets) {
             super(p);
             this.bigArrays = bigArrays;
-            this.capacity = capacity;
-            IntArray values = null;
+            ObjectArray<IntArray> values = null;
             IntArray sizes = null;
             boolean success = false;
             try {
-                values = bigArrays.newIntArray(initialSize * capacity);
-                sizes = bigArrays.newIntArray(initialSize);
+                values = bigArrays.newObjectArray(initialBuckets);
+                sizes = bigArrays.newIntArray(initialBuckets);
                 success = true;
             } finally {
                 if (success == false) {
@@ -111,7 +116,7 @@ final class HyperLogLogPlusPlusSparse extends AbstractHyperLogLogPlusPlus implem
             }
             this.values = values;
             this.sizes = sizes;
-            iterator = new LinearCountingIterator(this, capacity);
+            iterator = new LinearCountingIterator();
         }
 
         @Override
@@ -120,6 +125,18 @@ final class HyperLogLogPlusPlusSparse extends AbstractHyperLogLogPlusPlus implem
             return set(bucketOrd, encoded);
         }
 
+        protected void ensureCapacity(long bucketOrd, long size) {
+            values = bigArrays.grow(values, bucketOrd + 1);
+            sizes = bigArrays.grow(sizes, bucketOrd + 1);
+            IntArray value = values.get(bucketOrd);
+            if (value == null) {
+                value = bigArrays.newIntArray(size);
+            } else {
+                value = bigArrays.grow(value, size);
+            }
+            values.set(bucketOrd, value);
+        }
+
         @Override
         protected int size(long bucketOrd) {
             if (bucketOrd >= sizes.size()) {
@@ -132,67 +149,55 @@ final class HyperLogLogPlusPlusSparse extends AbstractHyperLogLogPlusPlus implem
 
         @Override
         protected HashesIterator values(long bucketOrd) {
-            iterator.reset(bucketOrd, size(bucketOrd));
+            iterator.reset(values.get(bucketOrd), size(bucketOrd));
             return iterator;
         }
 
-        private long index(long bucketOrd, int index) {
-            return (bucketOrd * capacity) + index;
-        }
-
-        private int get(long bucketOrd, int index) {
-            long globalIndex = index(bucketOrd, index);
-            if (values.size() < globalIndex) {
-                return 0;
-            }
-            return values.get(globalIndex);
-        }
-
         private int set(long bucketOrd, int value) {
-            int size = size(bucketOrd);
-            if (size == 0) {
-                sizes = bigArrays.grow(sizes, bucketOrd + 1);
-                values = bigArrays.grow(values, (bucketOrd + 1) * capacity);
-            }
-            values.set(index(bucketOrd, size), value);
+            // This assumes that ensureCapacity has been called before
+            assert values.get(bucketOrd) != null : "Added a value without calling ensureCapacity";
+            IntArray array = values.get(bucketOrd);
+            int size = sizes.get(bucketOrd);
+            array.set(size, value);
             return sizes.increment(bucketOrd, 1);
         }
 
         private int recomputedSize(long bucketOrd) {
-            for (int i = 0; i < capacity; ++i) {
-                final int v = get(bucketOrd, i);
+            IntArray array = values.get(bucketOrd);
+            if (array == null) {
+                return 0;
+            }
+            for (int i = 0; i < array.size(); ++i) {
+                final int v = array.get(i);
                 if (v == 0) {
                     return i;
                 }
             }
-            return capacity;
+            return Math.toIntExact(array.size());
         }
 
         @Override
         public void close() {
+            for (int i = 0; i < values.size(); i++) {
+                Releasables.close(values.get(i));
+            }
             Releasables.close(values, sizes);
         }
     }
 
     private static class LinearCountingIterator implements AbstractLinearCounting.HashesIterator {
 
-        private final LinearCounting lc;
-        private final int capacity;
-        long start;
-        long end;
-        private int value, size;
+        IntArray values;
+        int size, value;
         private long pos;
 
-        LinearCountingIterator(LinearCounting lc, int capacity) {
-            this.lc = lc;
-            this.capacity = capacity;
+        LinearCountingIterator() {
         }
 
-        void reset(long bucketOrd, int size) {
-            this.start = bucketOrd * capacity;
+        void reset(IntArray values, int size) {
+            this.values = values;
             this.size = size;
-            this.end = start + size;
-            this.pos = start;
+            this.pos = 0;
         }
 
         @Override
@@ -202,9 +207,9 @@ final class HyperLogLogPlusPlusSparse extends AbstractHyperLogLogPlusPlus implem
 
         @Override
         public boolean next() {
-            if (pos < end) {
-               value = lc.values.get(pos++);
-               return true;
+            if (pos < size) {
+                value = values.get(pos++);
+                return true;
             }
             return false;
         }

+ 19 - 0
server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java

@@ -37,6 +37,7 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 public class BitArrayTests extends ESTestCase {
+
     public void testRandom() {
         try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) {
             int numBits = randomIntBetween(1000, 10000);
@@ -164,4 +165,22 @@ public class BitArrayTests extends ESTestCase {
             }
         }
     }
+
+    public void testCardinality() {
+        try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) {
+            int numBits = randomIntBetween(1000, 10000);
+            long cardinality = 0;
+            for (int step = 0; step < 3; step++) {
+                for (int i = 0; i < numBits; i++) {
+                    if (randomBoolean()) {
+                        if (bitArray.get(i) == false) {
+                            cardinality++;
+                        }
+                        bitArray.set(i);
+                    }
+                }
+                assertEquals(cardinality, bitArray.cardinality());
+            }
+        }
+    }
 }

+ 1 - 13
server/src/test/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlusSparseTests.java

@@ -28,7 +28,6 @@ import org.elasticsearch.common.util.BigArrays;
 import org.elasticsearch.indices.breaker.CircuitBreakerService;
 import org.elasticsearch.test.ESTestCase;
 import org.hamcrest.CoreMatchers;
-import org.hamcrest.Matchers;
 
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicLong;
@@ -40,16 +39,6 @@ import static org.mockito.Mockito.when;
 
 public class HyperLogLogPlusPlusSparseTests extends ESTestCase {
 
-    public void testBasic()  {
-        final int p = randomIntBetween(MIN_PRECISION, MAX_PRECISION);
-        HyperLogLogPlusPlusSparse sparse  = new HyperLogLogPlusPlusSparse(p, BigArrays.NON_RECYCLING_INSTANCE, 10, 1);
-        AbstractLinearCounting.HashesIterator iterator = sparse.getLinearCounting(randomIntBetween(1, 10));
-        assertEquals(0, iterator.size());
-        IllegalArgumentException ex =
-            expectThrows(IllegalArgumentException.class, () -> sparse.getHyperLogLog(randomIntBetween(1, 10)));
-        assertThat(ex.getMessage(), Matchers.containsString("Implementation does not support HLL structures"));
-    }
-
     public void testEquivalence() throws IOException {
         final int p = randomIntBetween(MIN_PRECISION, MAX_PRECISION);
         final HyperLogLogPlusPlus single = new HyperLogLogPlusPlus(p, BigArrays.NON_RECYCLING_INSTANCE, 0);
@@ -120,7 +109,7 @@ public class HyperLogLogPlusPlusSparseTests extends ESTestCase {
         final int p = randomIntBetween(AbstractCardinalityAlgorithm.MIN_PRECISION, AbstractCardinalityAlgorithm.MAX_PRECISION);
         try {
             for (int i = 0; i < whenToBreak + 1; ++i) {
-                final HyperLogLogPlusPlusSparse subject = new HyperLogLogPlusPlusSparse(p, bigArrays, 1, 1);
+                final HyperLogLogPlusPlusSparse subject = new HyperLogLogPlusPlusSparse(p, bigArrays, 1);
                 subject.close();
             }
             fail("Must fail");
@@ -130,5 +119,4 @@ public class HyperLogLogPlusPlusSparseTests extends ESTestCase {
 
         assertThat(total.get(), CoreMatchers.equalTo(0L));
     }
-
 }