Browse Source

Add bulk processing capabilities to ES91Int4VectorsScorer (#131202)

It uses the same approach as the one taken in ES91OSQVectorsScorer
Ignacio Vera 3 tháng trước cách đây
mục cha
commit
da218c8afd

+ 72 - 6
benchmarks/src/main/java/org/elasticsearch/benchmark/vector/Int4ScorerBenchmark.java

@@ -8,12 +8,14 @@
  */
 package org.elasticsearch.benchmark.vector;
 
+import org.apache.lucene.index.VectorSimilarityFunction;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.MMapDirectory;
 import org.apache.lucene.util.VectorUtil;
+import org.apache.lucene.util.quantization.OptimizedScalarQuantizer;
 import org.elasticsearch.common.logging.LogConfigurator;
 import org.elasticsearch.core.IOUtils;
 import org.elasticsearch.simdvec.ES91Int4VectorsScorer;
@@ -52,20 +54,26 @@ public class Int4ScorerBenchmark {
         LogConfigurator.configureESLogging(); // native access requires logging to be initialized
     }
 
-    @Param({ "384", "702", "1024" })
+    @Param({ "384", "782", "1024" })
     int dims;
 
-    int numVectors = 200;
-    int numQueries = 10;
+    int numVectors = 20 * ES91Int4VectorsScorer.BULK_SIZE;
+    int numQueries = 5;
 
     byte[] scratch;
     byte[][] binaryVectors;
     byte[][] binaryQueries;
+    float[] scores = new float[ES91Int4VectorsScorer.BULK_SIZE];
+
+    float[] scratchFloats = new float[3];
 
     ES91Int4VectorsScorer scorer;
     Directory dir;
     IndexInput in;
 
+    OptimizedScalarQuantizer.QuantizationResult queryCorrections;
+    float centroidDp;
+
     @Setup
     public void setup() throws IOException {
         binaryVectors = new byte[numVectors][dims];
@@ -77,9 +85,19 @@ public class Int4ScorerBenchmark {
                     binaryVector[i] = (byte) ThreadLocalRandom.current().nextInt(16);
                 }
                 out.writeBytes(binaryVector, 0, binaryVector.length);
+                ThreadLocalRandom.current().nextBytes(binaryVector);
+                out.writeBytes(binaryVector, 0, 14); // corrections
             }
         }
 
+        queryCorrections = new OptimizedScalarQuantizer.QuantizationResult(
+            ThreadLocalRandom.current().nextFloat(),
+            ThreadLocalRandom.current().nextFloat(),
+            ThreadLocalRandom.current().nextFloat(),
+            Short.toUnsignedInt((short) ThreadLocalRandom.current().nextInt())
+        );
+        centroidDp = ThreadLocalRandom.current().nextFloat();
+
         in = dir.openInput("vectors", IOContext.DEFAULT);
         binaryQueries = new byte[numVectors][dims];
         for (byte[] binaryVector : binaryVectors) {
@@ -105,18 +123,66 @@ public class Int4ScorerBenchmark {
             in.seek(0);
             for (int i = 0; i < numVectors; i++) {
                 in.readBytes(scratch, 0, dims);
-                bh.consume(VectorUtil.int4DotProduct(binaryQueries[j], scratch));
+                int dp = VectorUtil.int4DotProduct(binaryQueries[j], scratch);
+                in.readFloats(scratchFloats, 0, 3);
+                float score = scorer.applyCorrections(
+                    queryCorrections.lowerInterval(),
+                    queryCorrections.upperInterval(),
+                    queryCorrections.quantizedComponentSum(),
+                    queryCorrections.additionalCorrection(),
+                    VectorSimilarityFunction.EUCLIDEAN,
+                    centroidDp, // assuming no centroid dot product for this benchmark
+                    scratchFloats[0],
+                    scratchFloats[1],
+                    Short.toUnsignedInt(in.readShort()),
+                    scratchFloats[2],
+                    dp
+                );
+                bh.consume(score);
             }
         }
     }
 
     @Benchmark
     @Fork(jvmArgsPrepend = { "--add-modules=jdk.incubator.vector" })
-    public void scoreFromMemorySegmentOnlyVector(Blackhole bh) throws IOException {
+    public void scoreFromMemorySegment(Blackhole bh) throws IOException {
         for (int j = 0; j < numQueries; j++) {
             in.seek(0);
             for (int i = 0; i < numVectors; i++) {
-                bh.consume(scorer.int4DotProduct(binaryQueries[j]));
+                bh.consume(
+                    scorer.score(
+                        binaryQueries[j],
+                        queryCorrections.lowerInterval(),
+                        queryCorrections.upperInterval(),
+                        queryCorrections.quantizedComponentSum(),
+                        queryCorrections.additionalCorrection(),
+                        VectorSimilarityFunction.EUCLIDEAN,
+                        centroidDp
+                    )
+                );
+            }
+        }
+    }
+
+    @Benchmark
+    @Fork(jvmArgsPrepend = { "--add-modules=jdk.incubator.vector" })
+    public void scoreFromMemorySegmentBulk(Blackhole bh) throws IOException {
+        for (int j = 0; j < numQueries; j++) {
+            in.seek(0);
+            for (int i = 0; i < numVectors; i += ES91Int4VectorsScorer.BULK_SIZE) {
+                scorer.scoreBulk(
+                    binaryQueries[j],
+                    queryCorrections.lowerInterval(),
+                    queryCorrections.upperInterval(),
+                    queryCorrections.quantizedComponentSum(),
+                    queryCorrections.additionalCorrection(),
+                    VectorSimilarityFunction.EUCLIDEAN,
+                    centroidDp,
+                    scores
+                );
+                for (float score : scores) {
+                    bh.consume(score);
+                }
             }
         }
     }

+ 142 - 0
libs/simdvec/src/main/java/org/elasticsearch/simdvec/ES91Int4VectorsScorer.java

@@ -8,10 +8,15 @@
  */
 package org.elasticsearch.simdvec;
 
+import org.apache.lucene.index.VectorSimilarityFunction;
 import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.VectorUtil;
 
 import java.io.IOException;
 
+import static org.apache.lucene.index.VectorSimilarityFunction.EUCLIDEAN;
+import static org.apache.lucene.index.VectorSimilarityFunction.MAXIMUM_INNER_PRODUCT;
+
 /** Scorer for quantized vectors stored as an {@link IndexInput}.
  * <p>
  * Similar to {@link org.apache.lucene.util.VectorUtil#int4DotProduct(byte[], byte[])} but
@@ -20,11 +25,19 @@ import java.io.IOException;
  * */
 public class ES91Int4VectorsScorer {
 
+    public static final int BULK_SIZE = 16;
+    protected static final float FOUR_BIT_SCALE = 1f / ((1 << 4) - 1);
+
     /** The wrapper {@link IndexInput}. */
     protected final IndexInput in;
     protected final int dimensions;
     protected byte[] scratch;
 
+    protected final float[] lowerIntervals = new float[BULK_SIZE];
+    protected final float[] upperIntervals = new float[BULK_SIZE];
+    protected final int[] targetComponentSums = new int[BULK_SIZE];
+    protected final float[] additionalCorrections = new float[BULK_SIZE];
+
     /** Sole constructor, called by sub-classes. */
     public ES91Int4VectorsScorer(IndexInput in, int dimensions) {
         this.in = in;
@@ -32,6 +45,10 @@ public class ES91Int4VectorsScorer {
         scratch = new byte[dimensions];
     }
 
+    /**
+     * compute the quantize distance between the provided quantized query and the quantized vector
+     * that is read from the wrapped {@link IndexInput}.
+     */
     public long int4DotProduct(byte[] b) throws IOException {
         in.readBytes(scratch, 0, dimensions);
         int total = 0;
@@ -40,4 +57,129 @@ public class ES91Int4VectorsScorer {
         }
         return total;
     }
+
+    /**
+     * compute the quantize distance between the provided quantized query and the quantized vectors
+     * that are read from the wrapped {@link IndexInput}. The number of quantized vectors to read is
+     * determined by {code count} and the results are stored in the provided {@code scores} array.
+     */
+    public void int4DotProductBulk(byte[] b, int count, float[] scores) throws IOException {
+        for (int i = 0; i < count; i++) {
+            scores[i] = int4DotProduct(b);
+        }
+    }
+
+    /**
+     * Computes the score by applying the necessary corrections to the provided quantized distance.
+     */
+    public float score(
+        byte[] q,
+        float queryLowerInterval,
+        float queryUpperInterval,
+        int queryComponentSum,
+        float queryAdditionalCorrection,
+        VectorSimilarityFunction similarityFunction,
+        float centroidDp
+    ) throws IOException {
+        float score = int4DotProduct(q);
+        in.readFloats(lowerIntervals, 0, 3);
+        int addition = Short.toUnsignedInt(in.readShort());
+        return applyCorrections(
+            queryLowerInterval,
+            queryUpperInterval,
+            queryComponentSum,
+            queryAdditionalCorrection,
+            similarityFunction,
+            centroidDp,
+            lowerIntervals[0],
+            lowerIntervals[1],
+            addition,
+            lowerIntervals[2],
+            score
+        );
+    }
+
+    /**
+     * compute the distance between the provided quantized query and the quantized vectors that are
+     * read from the wrapped {@link IndexInput}.
+     *
+     * <p>The number of vectors to score is defined by {@link #BULK_SIZE}. The expected format of the
+     * input is as follows: First the quantized vectors are read from the input,then all the lower
+     * intervals as floats, then all the upper intervals as floats, then all the target component sums
+     * as shorts, and finally all the additional corrections as floats.
+     *
+     * <p>The results are stored in the provided scores array.
+     */
+    public void scoreBulk(
+        byte[] q,
+        float queryLowerInterval,
+        float queryUpperInterval,
+        int queryComponentSum,
+        float queryAdditionalCorrection,
+        VectorSimilarityFunction similarityFunction,
+        float centroidDp,
+        float[] scores
+    ) throws IOException {
+        int4DotProductBulk(q, BULK_SIZE, scores);
+        in.readFloats(lowerIntervals, 0, BULK_SIZE);
+        in.readFloats(upperIntervals, 0, BULK_SIZE);
+        for (int i = 0; i < BULK_SIZE; i++) {
+            targetComponentSums[i] = Short.toUnsignedInt(in.readShort());
+        }
+        in.readFloats(additionalCorrections, 0, BULK_SIZE);
+        for (int i = 0; i < BULK_SIZE; i++) {
+            scores[i] = applyCorrections(
+                queryLowerInterval,
+                queryUpperInterval,
+                queryComponentSum,
+                queryAdditionalCorrection,
+                similarityFunction,
+                centroidDp,
+                lowerIntervals[i],
+                upperIntervals[i],
+                targetComponentSums[i],
+                additionalCorrections[i],
+                scores[i]
+            );
+        }
+    }
+
+    /**
+     * Computes the score by applying the necessary corrections to the provided quantized distance.
+     */
+    public float applyCorrections(
+        float queryLowerInterval,
+        float queryUpperInterval,
+        int queryComponentSum,
+        float queryAdditionalCorrection,
+        VectorSimilarityFunction similarityFunction,
+        float centroidDp,
+        float lowerInterval,
+        float upperInterval,
+        int targetComponentSum,
+        float additionalCorrection,
+        float qcDist
+    ) {
+        float ax = lowerInterval;
+        // Here we assume `lx` is simply bit vectors, so the scaling isn't necessary
+        float lx = upperInterval - ax;
+        float ay = queryLowerInterval;
+        float ly = (queryUpperInterval - ay) * FOUR_BIT_SCALE;
+        float y1 = queryComponentSum;
+        float score = ax * ay * dimensions + ay * lx * (float) targetComponentSum + ax * ly * y1 + lx * ly * qcDist;
+        // For euclidean, we need to invert the score and apply the additional correction, which is
+        // assumed to be the squared l2norm of the centroid centered vectors.
+        if (similarityFunction == EUCLIDEAN) {
+            score = queryAdditionalCorrection + additionalCorrection - 2 * score;
+            return Math.max(1 / (1f + score), 0);
+        } else {
+            // For cosine and max inner product, we need to apply the additional correction, which is
+            // assumed to be the non-centered dot-product between the vector and the centroid
+            score += queryAdditionalCorrection + additionalCorrection - centroidDp;
+            if (similarityFunction == MAXIMUM_INNER_PRODUCT) {
+                return VectorUtil.scaleMaxInnerProductScore(score);
+            }
+            return Math.max((1f + score) / 2f, 0);
+        }
+    }
 }

+ 240 - 29
libs/simdvec/src/main21/java/org/elasticsearch/simdvec/internal/vectorization/MemorySegmentES91Int4VectorsScorer.java

@@ -9,22 +9,30 @@
 package org.elasticsearch.simdvec.internal.vectorization;
 
 import jdk.incubator.vector.ByteVector;
+import jdk.incubator.vector.FloatVector;
 import jdk.incubator.vector.IntVector;
 import jdk.incubator.vector.ShortVector;
 import jdk.incubator.vector.Vector;
+import jdk.incubator.vector.VectorOperators;
+import jdk.incubator.vector.VectorShape;
 import jdk.incubator.vector.VectorSpecies;
 
+import org.apache.lucene.index.VectorSimilarityFunction;
 import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.VectorUtil;
 import org.elasticsearch.simdvec.ES91Int4VectorsScorer;
 
 import java.io.IOException;
 import java.lang.foreign.MemorySegment;
+import java.nio.ByteOrder;
 
 import static java.nio.ByteOrder.LITTLE_ENDIAN;
 import static jdk.incubator.vector.VectorOperators.ADD;
 import static jdk.incubator.vector.VectorOperators.B2I;
 import static jdk.incubator.vector.VectorOperators.B2S;
 import static jdk.incubator.vector.VectorOperators.S2I;
+import static org.apache.lucene.index.VectorSimilarityFunction.EUCLIDEAN;
+import static org.apache.lucene.index.VectorSimilarityFunction.MAXIMUM_INNER_PRODUCT;
 
 /** Panamized scorer for quantized vectors stored as an {@link IndexInput}.
  * <p>
@@ -43,6 +51,15 @@ public final class MemorySegmentES91Int4VectorsScorer extends ES91Int4VectorsSco
     private static final VectorSpecies<Integer> INT_SPECIES_256 = IntVector.SPECIES_256;
     private static final VectorSpecies<Integer> INT_SPECIES_512 = IntVector.SPECIES_512;
 
+    private static final VectorSpecies<Float> FLOAT_SPECIES;
+    private static final VectorSpecies<Short> SHORT_SPECIES;
+
+    static {
+        // default to platform supported bitsize
+        FLOAT_SPECIES = VectorSpecies.of(float.class, VectorShape.forBitSize(PanamaESVectorUtilSupport.VECTOR_BITSIZE));
+        SHORT_SPECIES = VectorSpecies.of(short.class, VectorShape.forBitSize(PanamaESVectorUtilSupport.VECTOR_BITSIZE));
+    }
+
     private final MemorySegment memorySegment;
 
     public MemorySegmentES91Int4VectorsScorer(IndexInput in, int dimensions, MemorySegment memorySegment) {
@@ -99,12 +116,11 @@ public final class MemorySegmentES91Int4VectorsScorer extends ES91Int4VectorsSco
     }
 
     private long dotProduct(byte[] q) throws IOException {
-        int i = 0;
-        int res = 0;
-
         // only vectorize if we'll at least enter the loop a single time, and we have at least 128-bit
         // vectors (256-bit on intel to dodge performance landmines)
         if (dimensions >= 16 && PanamaESVectorUtilSupport.HAS_FAST_INTEGER_VECTORS) {
+            int i = 0;
+            int res = 0;
             // compute vectorized dot product consistent with VPDPBUSD instruction
             if (PanamaESVectorUtilSupport.VECTOR_BITSIZE >= 512) {
                 i += BYTE_SPECIES_128.loopBound(dimensions);
@@ -113,16 +129,15 @@ public final class MemorySegmentES91Int4VectorsScorer extends ES91Int4VectorsSco
                 i += BYTE_SPECIES_64.loopBound(dimensions);
                 res += dotProductBody256(q, i);
             } else {
-                // tricky: we don't have SPECIES_32, so we workaround with "overlapping read"
-                i += BYTE_SPECIES_64.loopBound(dimensions - BYTE_SPECIES_64.length());
-                res += dotProductBody128(q, i);
+                throw new IllegalArgumentException("Unreacheable statement");
             }
+            // scalar tail
+            for (; i < q.length; i++) {
+                res += in.readByte() * q[i];
+            }
+            return res;
         }
-        // scalar tail
-        for (; i < q.length; i++) {
-            res += in.readByte() * q[i];
-        }
-        return res;
+        return super.int4DotProduct(q);
     }
 
     /** vectorized dot product body (512 bit vectors) */
@@ -166,26 +181,222 @@ public final class MemorySegmentES91Int4VectorsScorer extends ES91Int4VectorsSco
         return acc.reduceLanes(ADD);
     }
 
-    /** vectorized dot product body (128 bit vectors) */
-    private int dotProductBody128(byte[] q, int limit) throws IOException {
-        IntVector acc = IntVector.zero(INT_SPECIES_128);
-        long offset = in.getFilePointer();
-        // 4 bytes at a time (re-loading half the vector each time!)
-        for (int i = 0; i < limit; i += BYTE_SPECIES_64.length() >> 1) {
-            // load 8 bytes
-            ByteVector va8 = ByteVector.fromArray(BYTE_SPECIES_64, q, i);
-            ByteVector vb8 = ByteVector.fromMemorySegment(BYTE_SPECIES_64, memorySegment, offset + i, LITTLE_ENDIAN);
+    @Override
+    public void int4DotProductBulk(byte[] q, int count, float[] scores) throws IOException {
+        if (PanamaESVectorUtilSupport.VECTOR_BITSIZE >= 512 || PanamaESVectorUtilSupport.VECTOR_BITSIZE == 256) {
+            dotProductBulk(q, count, scores);
+            return;
+        }
+        if (dimensions >= 32 && PanamaESVectorUtilSupport.HAS_FAST_INTEGER_VECTORS) {
+            int4DotProductBody128Bulk(q, count, scores);
+            return;
+        }
+        super.int4DotProductBulk(q, count, scores);
+    }
 
-            // process first "half" only: 16-bit multiply
-            Vector<Short> va16 = va8.convert(B2S, 0);
-            Vector<Short> vb16 = vb8.convert(B2S, 0);
-            Vector<Short> prod16 = va16.mul(vb16);
+    private void int4DotProductBody128Bulk(byte[] q, int count, float[] scores) throws IOException {
+        int limit = BYTE_SPECIES_128.loopBound(dimensions);
+        for (int iter = 0; iter < count; iter++) {
+            int sum = 0;
+            long offset = in.getFilePointer();
+            for (int i = 0; i < limit; i += 1024) {
+                ShortVector acc0 = ShortVector.zero(SHORT_SPECIES_128);
+                ShortVector acc1 = ShortVector.zero(SHORT_SPECIES_128);
 
-            // 32-bit add
-            acc = acc.add(prod16.convertShape(S2I, INT_SPECIES_128, 0));
+                int innerLimit = Math.min(limit - i, 1024);
+                for (int j = 0; j < innerLimit; j += BYTE_SPECIES_128.length()) {
+                    ByteVector va8 = ByteVector.fromArray(BYTE_SPECIES_64, q, i + j);
+                    ByteVector vb8 = ByteVector.fromMemorySegment(BYTE_SPECIES_64, memorySegment, offset + i + j, LITTLE_ENDIAN);
+
+                    ByteVector prod8 = va8.mul(vb8);
+                    ShortVector prod16 = prod8.convertShape(B2S, ShortVector.SPECIES_128, 0).reinterpretAsShorts();
+                    acc0 = acc0.add(prod16.and((short) 255));
+
+                    va8 = ByteVector.fromArray(BYTE_SPECIES_64, q, i + j + 8);
+                    vb8 = ByteVector.fromMemorySegment(BYTE_SPECIES_64, memorySegment, offset + i + j + 8, LITTLE_ENDIAN);
+
+                    prod8 = va8.mul(vb8);
+                    prod16 = prod8.convertShape(B2S, SHORT_SPECIES_128, 0).reinterpretAsShorts();
+                    acc1 = acc1.add(prod16.and((short) 255));
+                }
+
+                IntVector intAcc0 = acc0.convertShape(S2I, INT_SPECIES_128, 0).reinterpretAsInts();
+                IntVector intAcc1 = acc0.convertShape(S2I, INT_SPECIES_128, 1).reinterpretAsInts();
+                IntVector intAcc2 = acc1.convertShape(S2I, INT_SPECIES_128, 0).reinterpretAsInts();
+                IntVector intAcc3 = acc1.convertShape(S2I, INT_SPECIES_128, 1).reinterpretAsInts();
+                sum += intAcc0.add(intAcc1).add(intAcc2).add(intAcc3).reduceLanes(ADD);
+            }
+            in.seek(offset + limit);
+            in.readBytes(scratch, limit, dimensions - limit);
+            for (int j = limit; j < dimensions; j++) {
+                sum += scratch[j] * q[j];
+            }
+            scores[iter] = sum;
         }
-        in.seek(offset + limit);
-        // reduce
-        return acc.reduceLanes(ADD);
+    }
+
+    private void dotProductBulk(byte[] q, int count, float[] scores) throws IOException {
+        // only vectorize if we'll at least enter the loop a single time, and we have at least 128-bit
+        // vectors (256-bit on intel to dodge performance landmines)
+        if (dimensions >= 16 && PanamaESVectorUtilSupport.HAS_FAST_INTEGER_VECTORS) {
+            // compute vectorized dot product consistent with VPDPBUSD instruction
+            if (PanamaESVectorUtilSupport.VECTOR_BITSIZE >= 512) {
+                dotProductBody512Bulk(q, count, scores);
+            } else if (PanamaESVectorUtilSupport.VECTOR_BITSIZE == 256) {
+                dotProductBody256Bulk(q, count, scores);
+            } else {
+                throw new IllegalArgumentException("Unreacheable statement");
+            }
+            return;
+        }
+        super.int4DotProductBulk(q, count, scores);
+    }
+
+    /** vectorized dot product body (512 bit vectors) */
+    private void dotProductBody512Bulk(byte[] q, int count, float[] scores) throws IOException {
+        int limit = BYTE_SPECIES_128.loopBound(dimensions);
+        for (int iter = 0; iter < count; iter++) {
+            IntVector acc = IntVector.zero(INT_SPECIES_512);
+            long offset = in.getFilePointer();
+            int i = 0;
+            for (; i < limit; i += BYTE_SPECIES_128.length()) {
+                ByteVector va8 = ByteVector.fromArray(BYTE_SPECIES_128, q, i);
+                ByteVector vb8 = ByteVector.fromMemorySegment(BYTE_SPECIES_128, memorySegment, offset + i, LITTLE_ENDIAN);
+
+                // 16-bit multiply: avoid AVX-512 heavy multiply on zmm
+                Vector<Short> va16 = va8.convertShape(B2S, SHORT_SPECIES_256, 0);
+                Vector<Short> vb16 = vb8.convertShape(B2S, SHORT_SPECIES_256, 0);
+                Vector<Short> prod16 = va16.mul(vb16);
+
+                // 32-bit add
+                Vector<Integer> prod32 = prod16.convertShape(S2I, INT_SPECIES_512, 0);
+                acc = acc.add(prod32);
+            }
+
+            in.seek(offset + limit); // advance the input stream
+            // reduce
+            long res = acc.reduceLanes(ADD);
+            for (; i < q.length; i++) {
+                res += in.readByte() * q[i];
+            }
+            scores[iter] = res;
+        }
+    }
+
+    /** vectorized dot product body (256 bit vectors) */
+    private void dotProductBody256Bulk(byte[] q, int count, float[] scores) throws IOException {
+        int limit = BYTE_SPECIES_128.loopBound(dimensions);
+        for (int iter = 0; iter < count; iter++) {
+            IntVector acc = IntVector.zero(INT_SPECIES_256);
+            long offset = in.getFilePointer();
+            int i = 0;
+            for (; i < limit; i += BYTE_SPECIES_64.length()) {
+                ByteVector va8 = ByteVector.fromArray(BYTE_SPECIES_64, q, i);
+                ByteVector vb8 = ByteVector.fromMemorySegment(BYTE_SPECIES_64, memorySegment, offset + i, LITTLE_ENDIAN);
+
+                // 32-bit multiply and add into accumulator
+                Vector<Integer> va32 = va8.convertShape(B2I, INT_SPECIES_256, 0);
+                Vector<Integer> vb32 = vb8.convertShape(B2I, INT_SPECIES_256, 0);
+                acc = acc.add(va32.mul(vb32));
+            }
+            in.seek(offset + limit);
+            // reduce
+            long res = acc.reduceLanes(ADD);
+            for (; i < q.length; i++) {
+                res += in.readByte() * q[i];
+            }
+            scores[iter] = res;
+        }
+    }
+
+    @Override
+    public void scoreBulk(
+        byte[] q,
+        float queryLowerInterval,
+        float queryUpperInterval,
+        int queryComponentSum,
+        float queryAdditionalCorrection,
+        VectorSimilarityFunction similarityFunction,
+        float centroidDp,
+        float[] scores
+    ) throws IOException {
+        int4DotProductBulk(q, BULK_SIZE, scores);
+        applyCorrectionsBulk(
+            queryLowerInterval,
+            queryUpperInterval,
+            queryComponentSum,
+            queryAdditionalCorrection,
+            similarityFunction,
+            centroidDp,
+            scores
+        );
+    }
+
+    private void applyCorrectionsBulk(
+        float queryLowerInterval,
+        float queryUpperInterval,
+        int queryComponentSum,
+        float queryAdditionalCorrection,
+        VectorSimilarityFunction similarityFunction,
+        float centroidDp,
+        float[] scores
+    ) throws IOException {
+        int limit = FLOAT_SPECIES.loopBound(BULK_SIZE);
+        int i = 0;
+        long offset = in.getFilePointer();
+        float ay = queryLowerInterval;
+        float ly = (queryUpperInterval - ay) * FOUR_BIT_SCALE;
+        float y1 = queryComponentSum;
+        for (; i < limit; i += FLOAT_SPECIES.length()) {
+            var ax = FloatVector.fromMemorySegment(FLOAT_SPECIES, memorySegment, offset + i * Float.BYTES, ByteOrder.LITTLE_ENDIAN);
+            var lx = FloatVector.fromMemorySegment(
+                FLOAT_SPECIES,
+                memorySegment,
+                offset + 4 * BULK_SIZE + i * Float.BYTES,
+                ByteOrder.LITTLE_ENDIAN
+            ).sub(ax);
+            var targetComponentSums = ShortVector.fromMemorySegment(
+                SHORT_SPECIES,
+                memorySegment,
+                offset + 8 * BULK_SIZE + i * Short.BYTES,
+                ByteOrder.LITTLE_ENDIAN
+            ).convert(VectorOperators.S2I, 0).reinterpretAsInts().and(0xffff).convert(VectorOperators.I2F, 0);
+            var additionalCorrections = FloatVector.fromMemorySegment(
+                FLOAT_SPECIES,
+                memorySegment,
+                offset + 10 * BULK_SIZE + i * Float.BYTES,
+                ByteOrder.LITTLE_ENDIAN
+            );
+            var qcDist = FloatVector.fromArray(FLOAT_SPECIES, scores, i);
+            // ax * ay * dimensions + ay * lx * (float) targetComponentSum + ax * ly * y1 + lx * ly *
+            // qcDist;
+            var res1 = ax.mul(ay).mul(dimensions);
+            var res2 = lx.mul(ay).mul(targetComponentSums);
+            var res3 = ax.mul(ly).mul(y1);
+            var res4 = lx.mul(ly).mul(qcDist);
+            var res = res1.add(res2).add(res3).add(res4);
+            // For euclidean, we need to invert the score and apply the additional correction, which is
+            // assumed to be the squared l2norm of the centroid centered vectors.
+            if (similarityFunction == EUCLIDEAN) {
+                res = res.mul(-2).add(additionalCorrections).add(queryAdditionalCorrection).add(1f);
+                res = FloatVector.broadcast(FLOAT_SPECIES, 1).div(res).max(0);
+                res.intoArray(scores, i);
+            } else {
+                // For cosine and max inner product, we need to apply the additional correction, which is
+                // assumed to be the non-centered dot-product between the vector and the centroid
+                res = res.add(queryAdditionalCorrection).add(additionalCorrections).sub(centroidDp);
+                if (similarityFunction == MAXIMUM_INNER_PRODUCT) {
+                    res.intoArray(scores, i);
+                    // not sure how to do it better
+                    for (int j = 0; j < FLOAT_SPECIES.length(); j++) {
+                        scores[i + j] = VectorUtil.scaleMaxInnerProductScore(scores[i + j]);
+                    }
+                } else {
+                    res = res.add(1f).mul(0.5f).max(0);
+                    res.intoArray(scores, i);
+                }
+            }
+        }
+        in.seek(offset + 14L * BULK_SIZE);
     }
 }

+ 154 - 0
libs/simdvec/src/test/java/org/elasticsearch/simdvec/internal/vectorization/ES91Int4VectorScorerTests.java

@@ -9,13 +9,18 @@
 
 package org.elasticsearch.simdvec.internal.vectorization;
 
+import org.apache.lucene.index.VectorSimilarityFunction;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.MMapDirectory;
 import org.apache.lucene.util.VectorUtil;
+import org.elasticsearch.index.codec.vectors.OptimizedScalarQuantizer;
 import org.elasticsearch.simdvec.ES91Int4VectorsScorer;
+import org.elasticsearch.simdvec.ES91OSQVectorsScorer;
+
+import static org.hamcrest.Matchers.lessThan;
 
 public class ES91Int4VectorScorerTests extends BaseVectorizationTests {
 
@@ -57,4 +62,153 @@ public class ES91Int4VectorScorerTests extends BaseVectorizationTests {
             }
         }
     }
+
+    public void testInt4Score() throws Exception {
+        // only even dimensions are supported
+        final int dimensions = random().nextInt(1, 1000) * 2;
+        final int numVectors = random().nextInt(1, 100);
+        final byte[] vector = new byte[dimensions];
+        final byte[] corrections = new byte[14];
+        try (Directory dir = new MMapDirectory(createTempDir())) {
+            try (IndexOutput out = dir.createOutput("tests.bin", IOContext.DEFAULT)) {
+                for (int i = 0; i < numVectors; i++) {
+                    for (int j = 0; j < dimensions; j++) {
+                        vector[j] = (byte) random().nextInt(16); // 4-bit quantization
+                    }
+                    out.writeBytes(vector, 0, dimensions);
+                    random().nextBytes(corrections);
+                    out.writeBytes(corrections, 0, corrections.length);
+                }
+            }
+            final byte[] query = new byte[dimensions];
+            for (int j = 0; j < dimensions; j++) {
+                query[j] = (byte) random().nextInt(16); // 4-bit quantization
+            }
+            OptimizedScalarQuantizer.QuantizationResult queryCorrections = new OptimizedScalarQuantizer.QuantizationResult(
+                random().nextFloat(),
+                random().nextFloat(),
+                random().nextFloat(),
+                Short.toUnsignedInt((short) random().nextInt())
+            );
+            float centroidDp = random().nextFloat();
+            VectorSimilarityFunction similarityFunction = randomFrom(VectorSimilarityFunction.values());
+            try (IndexInput in = dir.openInput("tests.bin", IOContext.DEFAULT)) {
+                // Work on a slice that has just the right number of bytes to make the test fail with an
+                // index-out-of-bounds in case the implementation reads more than the allowed number of
+                // padding bytes.
+                final IndexInput slice = in.slice("test", 0, (long) (dimensions + 14) * numVectors);
+                final ES91Int4VectorsScorer defaultScorer = defaultProvider().newES91Int4VectorsScorer(in, dimensions);
+                final ES91Int4VectorsScorer panamaScorer = maybePanamaProvider().newES91Int4VectorsScorer(slice, dimensions);
+                for (int i = 0; i < numVectors; i++) {
+                    float scoreDefault = defaultScorer.score(
+                        query,
+                        queryCorrections.lowerInterval(),
+                        queryCorrections.upperInterval(),
+                        queryCorrections.quantizedComponentSum(),
+                        queryCorrections.additionalCorrection(),
+                        similarityFunction,
+                        centroidDp
+                    );
+                    float scorePanama = panamaScorer.score(
+                        query,
+                        queryCorrections.lowerInterval(),
+                        queryCorrections.upperInterval(),
+                        queryCorrections.quantizedComponentSum(),
+                        queryCorrections.additionalCorrection(),
+                        similarityFunction,
+                        centroidDp
+                    );
+                    assertEquals(scoreDefault, scorePanama, 0.001f);
+                    assertEquals(in.getFilePointer(), slice.getFilePointer());
+                }
+                assertEquals((long) (dimensions + 14) * numVectors, in.getFilePointer());
+            }
+        }
+    }
+
+    public void testInt4ScoreBulk() throws Exception {
+        // only even dimensions are supported
+        final int dimensions = random().nextInt(1, 1000) * 2;
+        final int numVectors = random().nextInt(1, 10) * ES91Int4VectorsScorer.BULK_SIZE;
+        final byte[] vector = new byte[ES91Int4VectorsScorer.BULK_SIZE * dimensions];
+        final byte[] corrections = new byte[ES91Int4VectorsScorer.BULK_SIZE * 14];
+        try (Directory dir = new MMapDirectory(createTempDir())) {
+            try (IndexOutput out = dir.createOutput("tests.bin", IOContext.DEFAULT)) {
+                for (int i = 0; i < numVectors; i += ES91Int4VectorsScorer.BULK_SIZE) {
+                    for (int j = 0; j < ES91Int4VectorsScorer.BULK_SIZE * dimensions; j++) {
+                        vector[j] = (byte) random().nextInt(16); // 4-bit quantization
+                    }
+                    out.writeBytes(vector, 0, vector.length);
+                    random().nextBytes(corrections);
+                    out.writeBytes(corrections, 0, corrections.length);
+                }
+            }
+            final byte[] query = new byte[dimensions];
+            for (int j = 0; j < dimensions; j++) {
+                query[j] = (byte) random().nextInt(16); // 4-bit quantization
+            }
+            OptimizedScalarQuantizer.QuantizationResult queryCorrections = new OptimizedScalarQuantizer.QuantizationResult(
+                random().nextFloat(),
+                random().nextFloat(),
+                random().nextFloat(),
+                Short.toUnsignedInt((short) random().nextInt())
+            );
+            float centroidDp = random().nextFloat();
+            VectorSimilarityFunction similarityFunction = randomFrom(VectorSimilarityFunction.values());
+            try (IndexInput in = dir.openInput("tests.bin", IOContext.DEFAULT)) {
+                // Work on a slice that has just the right number of bytes to make the test fail with an
+                // index-out-of-bounds in case the implementation reads more than the allowed number of
+                // padding bytes.
+                final IndexInput slice = in.slice("test", 0, (long) (dimensions + 14) * numVectors);
+                final ES91Int4VectorsScorer defaultScorer = defaultProvider().newES91Int4VectorsScorer(in, dimensions);
+                final ES91Int4VectorsScorer panamaScorer = maybePanamaProvider().newES91Int4VectorsScorer(slice, dimensions);
+                float[] scoresDefault = new float[ES91Int4VectorsScorer.BULK_SIZE];
+                float[] scoresPanama = new float[ES91Int4VectorsScorer.BULK_SIZE];
+                for (int i = 0; i < numVectors; i += ES91Int4VectorsScorer.BULK_SIZE) {
+                    defaultScorer.scoreBulk(
+                        query,
+                        queryCorrections.lowerInterval(),
+                        queryCorrections.upperInterval(),
+                        queryCorrections.quantizedComponentSum(),
+                        queryCorrections.additionalCorrection(),
+                        similarityFunction,
+                        centroidDp,
+                        scoresDefault
+                    );
+                    panamaScorer.scoreBulk(
+                        query,
+                        queryCorrections.lowerInterval(),
+                        queryCorrections.upperInterval(),
+                        queryCorrections.quantizedComponentSum(),
+                        queryCorrections.additionalCorrection(),
+                        similarityFunction,
+                        centroidDp,
+                        scoresPanama
+                    );
+                    for (int j = 0; j < ES91OSQVectorsScorer.BULK_SIZE; j++) {
+                        if (scoresDefault[j] == scoresPanama[j]) {
+                            continue;
+                        }
+                        if (scoresDefault[j] > (1000 * Byte.MAX_VALUE)) {
+                            float diff = Math.abs(scoresDefault[j] - scoresPanama[j]);
+                            assertThat(
+                                "defaultScores: " + scoresDefault[j] + " bulkScores: " + scoresPanama[j],
+                                diff / scoresDefault[j],
+                                lessThan(1e-5f)
+                            );
+                            assertThat(
+                                "defaultScores: " + scoresDefault[j] + " bulkScores: " + scoresPanama[j],
+                                diff / scoresPanama[j],
+                                lessThan(1e-5f)
+                            );
+                        } else {
+                            assertEquals(scoresDefault[j], scoresPanama[j], 1e-2f);
+                        }
+                    }
+                    assertEquals(in.getFilePointer(), slice.getFilePointer());
+                }
+                assertEquals((long) (dimensions + 14) * numVectors, in.getFilePointer());
+            }
+        }
+    }
 }