Explorar o código

Exposing lucene 6.x minhash filter. (#20206)

Exposing lucene 6.x minhash tokenfilter

Generate min hash tokens from an incoming stream of tokens that can
be used to estimate document similarity.

Closes #20149
Alexander Lin %!s(int64=9) %!d(string=hai) anos
pai
achega
f825e8f4cb

+ 57 - 0
core/src/main/java/org/elasticsearch/index/analysis/MinHashTokenFilterFactory.java

@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.minhash.MinHashFilterFactory;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.IndexSettings;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * TokenFilterFactoryAdapter for {@link MinHashFilterFactory}
+ *
+ */
+public class MinHashTokenFilterFactory extends AbstractTokenFilterFactory {
+
+    private final MinHashFilterFactory minHashFilterFactory;
+
+    public MinHashTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
+        super(indexSettings, name, settings);
+        minHashFilterFactory = new MinHashFilterFactory(convertSettings(settings));
+    }
+
+    @Override
+    public TokenStream create(TokenStream tokenStream) {
+        return minHashFilterFactory.create(tokenStream);
+    }
+
+    private Map<String, String> convertSettings(Settings settings) {
+        Map<String, String> settingMap = new HashMap<>();
+        settingMap.put("hashCount", settings.get("hash_count"));
+        settingMap.put("bucketCount", settings.get("bucket_count"));
+        settingMap.put("hashSetSize", settings.get("hash_set_size"));
+        settingMap.put("withRotation", settings.get("with_rotation"));
+        return settingMap;
+    }
+}

+ 2 - 0
core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java

@@ -90,6 +90,7 @@ import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider;
 import org.elasticsearch.index.analysis.LowerCaseTokenFilterFactory;
 import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
 import org.elasticsearch.index.analysis.MappingCharFilterFactory;
+import org.elasticsearch.index.analysis.MinHashTokenFilterFactory;
 import org.elasticsearch.index.analysis.NGramTokenFilterFactory;
 import org.elasticsearch.index.analysis.NGramTokenizerFactory;
 import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider;
@@ -214,6 +215,7 @@ public final class AnalysisModule {
         tokenFilters.register("edgeNGram", EdgeNGramTokenFilterFactory::new);
         tokenFilters.register("edge_ngram", EdgeNGramTokenFilterFactory::new);
         tokenFilters.register("shingle", ShingleTokenFilterFactory::new);
+        tokenFilters.register("min_hash", MinHashTokenFilterFactory::new);
         tokenFilters.register("unique", UniqueTokenFilterFactory::new);
         tokenFilters.register("truncate", requriesAnalysisSettings(TruncateTokenFilterFactory::new));
         tokenFilters.register("trim", TrimTokenFilterFactory::new);

+ 70 - 0
core/src/test/java/org/elasticsearch/index/analysis/MinHashFilterFactoryTests.java

@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTokenStreamTestCase;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+public class MinHashFilterFactoryTests extends ESTokenStreamTestCase {
+    public void testDefault() throws IOException {
+        int default_hash_count = 1;
+        int default_bucket_size = 512;
+        int default_hash_set_size = 1;
+        Settings settings = Settings.builder()
+            .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+            .build();
+        AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+        TokenFilterFactory tokenFilter = analysisService.tokenFilter("min_hash");
+        String source = "the quick brown fox";
+        Tokenizer tokenizer = new WhitespaceTokenizer();
+        tokenizer.setReader(new StringReader(source));
+
+        // with_rotation is true by default, and hash_set_size is 1, so even though the source doesn't
+        // have enough tokens to fill all the buckets, we still expect 512 tokens.
+        assertStreamHasNumberOfTokens(tokenFilter.create(tokenizer),
+            default_hash_count * default_bucket_size * default_hash_set_size);
+    }
+
+    public void testSettings() throws IOException {
+        Settings settings = Settings.builder()
+            .put("index.analysis.filter.test_min_hash.type", "min_hash")
+            .put("index.analysis.filter.test_min_hash.hash_count", "1")
+            .put("index.analysis.filter.test_min_hash.bucket_count", "2")
+            .put("index.analysis.filter.test_min_hash.hash_set_size", "1")
+            .put("index.analysis.filter.test_min_hash.with_rotation", false)
+            .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+            .build();
+        AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+        TokenFilterFactory tokenFilter = analysisService.tokenFilter("test_min_hash");
+        String source = "sushi";
+        Tokenizer tokenizer = new WhitespaceTokenizer();
+        tokenizer.setReader(new StringReader(source));
+
+        // despite the fact that bucket_count is 2 and hash_set_size is 1,
+        // because with_rotation is false, we only expect 1 token here.
+        assertStreamHasNumberOfTokens(tokenFilter.create(tokenizer), 1);
+    }
+}

+ 3 - 1
docs/reference/analysis/tokenfilters.asciidoc

@@ -87,4 +87,6 @@ include::tokenfilters/apostrophe-tokenfilter.asciidoc[]
 
 include::tokenfilters/decimal-digit-tokenfilter.asciidoc[]
 
-include::tokenfilters/fingerprint-tokenfilter.asciidoc[]
+include::tokenfilters/fingerprint-tokenfilter.asciidoc[]
+
+include::tokenfilters/minhash-tokenfilter.asciidoc[]

+ 22 - 0
docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc

@@ -0,0 +1,22 @@
+[[analysis-minhash-tokenfilter]]
+== Minhash Token Filter
+
+A token filter of type `min_hash` hashes each token of the token stream and divides
+the resulting hashes into buckets, keeping the lowest-valued hashes per
+bucket. It then returns these hashes as tokens.
+
+The following are settings that can be set for a `min_hash` token filter.
+
+[cols="<,<", options="header",]
+|=======================================================================
+|Setting |Description
+|`hash_count` |The number of hashes to hash the token stream with. Defaults to `1`.
+
+|`bucket_count` |The number of buckets to divide the minhashes into. Defaults to `512`.
+
+|`hash_set_size` |The number of minhashes to keep per bucket. Defaults to `1`.
+
+|`with_rotation` |Whether or not to fill empty buckets with the value of the first non-empty
+bucket to its circular right. Only takes effect if hash_set_size is equal to one.
+Defaults to `true` if bucket_count is greater than one, else `false`.
+|=======================================================================

+ 8 - 7
test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java

@@ -53,6 +53,7 @@ import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory;
 import org.elasticsearch.index.analysis.LowerCaseTokenFilterFactory;
 import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
 import org.elasticsearch.index.analysis.MappingCharFilterFactory;
+import org.elasticsearch.index.analysis.MinHashTokenFilterFactory;
 import org.elasticsearch.index.analysis.MultiTermAwareComponent;
 import org.elasticsearch.index.analysis.NGramTokenFilterFactory;
 import org.elasticsearch.index.analysis.NGramTokenizerFactory;
@@ -93,7 +94,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
-/** 
+/**
  * Alerts us if new analyzers are added to lucene, so we don't miss them.
  * <p>
  * If we don't want to expose one for a specific reason, just map it to Void.
@@ -115,11 +116,11 @@ public class AnalysisFactoryTestCase extends ESTestCase {
         .put("thai",          ThaiTokenizerFactory.class)
         .put("uax29urlemail", UAX29URLEmailTokenizerFactory.class)
         .put("whitespace",    WhitespaceTokenizerFactory.class)
-        
+
         // this one "seems to mess up offsets". probably shouldn't be a tokenizer...
         .put("wikipedia",     Void.class)
         .immutableMap();
-    
+
     static final Map<String,Class<?>> KNOWN_TOKENFILTERS = new MapBuilder<String,Class<?>>()
         // exposed in ES
         .put("apostrophe",                ApostropheFilterFactory.class)
@@ -184,6 +185,7 @@ public class AnalysisFactoryTestCase extends ESTestCase {
         .put("scandinaviannormalization", ScandinavianNormalizationFilterFactory.class)
         .put("serbiannormalization",      SerbianNormalizationFilterFactory.class)
         .put("shingle",                   ShingleTokenFilterFactory.class)
+        .put("minhash",                   MinHashTokenFilterFactory.class)
         .put("snowballporter",            SnowballTokenFilterFactory.class)
         .put("soraninormalization",       SoraniNormalizationFilterFactory.class)
         .put("soranistem",                StemmerTokenFilterFactory.class)
@@ -199,7 +201,7 @@ public class AnalysisFactoryTestCase extends ESTestCase {
         .put("type",                      KeepTypesFilterFactory.class)
         .put("uppercase",                 UpperCaseTokenFilterFactory.class)
         .put("worddelimiter",             WordDelimiterTokenFilterFactory.class)
-                
+
         // TODO: these tokenfilters are not yet exposed: useful?
 
         // suggest stop
@@ -228,16 +230,15 @@ public class AnalysisFactoryTestCase extends ESTestCase {
         .put("fingerprint",               Void.class)
         // for tee-sinks
         .put("daterecognizer",            Void.class)
-        .put("minhash",                   Void.class)
 
         .immutableMap();
-    
+
     static final Map<String,Class<?>> KNOWN_CHARFILTERS = new MapBuilder<String,Class<?>>()
         // exposed in ES
         .put("htmlstrip",      HtmlStripCharFilterFactory.class)
         .put("mapping",        MappingCharFilterFactory.class)
         .put("patternreplace", PatternReplaceCharFilterFactory.class)
-                
+
         // TODO: these charfilters are not yet exposed: useful?
         // handling of zwnj for persian
         .put("persian",        Void.class)