001/*
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *     http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.lucene.demo;
018
019import java.io.BufferedReader;
020import java.io.IOException;
021import java.io.InputStreamReader;
022import java.nio.charset.StandardCharsets;
023import java.nio.file.Files;
024import java.nio.file.Paths;
025import java.util.ArrayList;
026import java.util.Date;
027import java.util.List;
028import org.apache.lucene.analysis.Analyzer;
029import org.apache.lucene.analysis.standard.StandardAnalyzer;
030import org.apache.lucene.demo.knn.DemoEmbeddings;
031import org.apache.lucene.demo.knn.KnnVectorDict;
032import org.apache.lucene.document.Document;
033import org.apache.lucene.index.DirectoryReader;
034import org.apache.lucene.index.StoredFields;
035import org.apache.lucene.index.Term;
036import org.apache.lucene.queryparser.classic.QueryParser;
037import org.apache.lucene.search.BooleanClause;
038import org.apache.lucene.search.BooleanQuery;
039import org.apache.lucene.search.IndexSearcher;
040import org.apache.lucene.search.KnnFloatVectorQuery;
041import org.apache.lucene.search.Query;
042import org.apache.lucene.search.QueryVisitor;
043import org.apache.lucene.search.ScoreDoc;
044import org.apache.lucene.search.TopDocs;
045import org.apache.lucene.store.FSDirectory;
046
047/** Simple command-line based search demo. */
048public class SearchFiles {
049
050  private SearchFiles() {}
051
052  /** Simple command-line based search demo. */
053  public static void main(String[] args) throws Exception {
054    String usage =
055        "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage] [-knn_vector knnHits]\n\nSee http://lucene.apache.org/core/9_0_0/demo/ for details.";
056    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
057      System.out.println(usage);
058      System.exit(0);
059    }
060
061    String index = "index";
062    String field = "contents";
063    String queries = null;
064    int repeat = 0;
065    boolean raw = false;
066    int knnVectors = 0;
067    String queryString = null;
068    int hitsPerPage = 10;
069
070    for (int i = 0; i < args.length; i++) {
071      switch (args[i]) {
072        case "-index":
073          index = args[++i];
074          break;
075        case "-field":
076          field = args[++i];
077          break;
078        case "-queries":
079          queries = args[++i];
080          break;
081        case "-query":
082          queryString = args[++i];
083          break;
084        case "-repeat":
085          repeat = Integer.parseInt(args[++i]);
086          break;
087        case "-raw":
088          raw = true;
089          break;
090        case "-paging":
091          hitsPerPage = Integer.parseInt(args[++i]);
092          if (hitsPerPage <= 0) {
093            System.err.println("There must be at least 1 hit per page.");
094            System.exit(1);
095          }
096          break;
097        case "-knn_vector":
098          knnVectors = Integer.parseInt(args[++i]);
099          break;
100        default:
101          System.err.println("Unknown argument: " + args[i]);
102          System.exit(1);
103      }
104    }
105
106    DirectoryReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index)));
107    IndexSearcher searcher = new IndexSearcher(reader);
108    Analyzer analyzer = new StandardAnalyzer();
109    KnnVectorDict vectorDict = null;
110    if (knnVectors > 0) {
111      vectorDict = new KnnVectorDict(reader.directory(), IndexFiles.KNN_DICT);
112    }
113    BufferedReader in;
114    if (queries != null) {
115      in = Files.newBufferedReader(Paths.get(queries), StandardCharsets.UTF_8);
116    } else {
117      in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
118    }
119    QueryParser parser = new QueryParser(field, analyzer);
120    while (true) {
121      if (queries == null && queryString == null) { // prompt the user
122        System.out.println("Enter query: ");
123      }
124
125      String line = queryString != null ? queryString : in.readLine();
126
127      if (line == null || line.length() == -1) {
128        break;
129      }
130
131      line = line.trim();
132      if (line.length() == 0) {
133        break;
134      }
135
136      Query query = parser.parse(line);
137      if (knnVectors > 0) {
138        query = addSemanticQuery(query, vectorDict, knnVectors);
139      }
140      System.out.println("Searching for: " + query.toString(field));
141
142      if (repeat > 0) { // repeat & time as benchmark
143        Date start = new Date();
144        for (int i = 0; i < repeat; i++) {
145          searcher.search(query, 100);
146        }
147        Date end = new Date();
148        System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
149      }
150
151      doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);
152
153      if (queryString != null) {
154        break;
155      }
156    }
157    if (vectorDict != null) {
158      vectorDict.close();
159    }
160    reader.close();
161  }
162
163  /**
164   * This demonstrates a typical paging search scenario, where the search engine presents pages of
165   * size n to the user. The user can then go to the next page if interested in the next hits.
166   *
167   * <p>When the query is executed for the first time, then only enough results are collected to
168   * fill 5 result pages. If the user wants to page beyond this limit, then the query is executed
169   * another time and all hits are collected.
170   */
171  public static void doPagingSearch(
172      BufferedReader in,
173      IndexSearcher searcher,
174      Query query,
175      int hitsPerPage,
176      boolean raw,
177      boolean interactive)
178      throws IOException {
179
180    // Collect enough docs to show 5 pages
181    TopDocs results = searcher.search(query, 5 * hitsPerPage);
182    ScoreDoc[] hits = results.scoreDocs;
183
184    int numTotalHits = Math.toIntExact(results.totalHits.value);
185    System.out.println(numTotalHits + " total matching documents");
186
187    int start = 0;
188    int end = Math.min(numTotalHits, hitsPerPage);
189
190    while (true) {
191      if (end > hits.length) {
192        System.out.println(
193            "Only results 1 - "
194                + hits.length
195                + " of "
196                + numTotalHits
197                + " total matching documents collected.");
198        System.out.println("Collect more (y/n) ?");
199        String line = in.readLine();
200        if (line.length() == 0 || line.charAt(0) == 'n') {
201          break;
202        }
203
204        hits = searcher.search(query, numTotalHits).scoreDocs;
205      }
206
207      end = Math.min(hits.length, start + hitsPerPage);
208
209      StoredFields storedFields = searcher.storedFields();
210      for (int i = start; i < end; i++) {
211        if (raw) { // output raw format
212          System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score);
213          continue;
214        }
215
216        Document doc = storedFields.document(hits[i].doc);
217        String path = doc.get("path");
218        if (path != null) {
219          System.out.println((i + 1) + ". " + path);
220          String title = doc.get("title");
221          if (title != null) {
222            System.out.println("   Title: " + doc.get("title"));
223          }
224        } else {
225          System.out.println((i + 1) + ". " + "No path for this document");
226        }
227      }
228
229      if (!interactive || end == 0) {
230        break;
231      }
232
233      if (numTotalHits >= end) {
234        boolean quit = false;
235        while (true) {
236          System.out.print("Press ");
237          if (start - hitsPerPage >= 0) {
238            System.out.print("(p)revious page, ");
239          }
240          if (start + hitsPerPage < numTotalHits) {
241            System.out.print("(n)ext page, ");
242          }
243          System.out.println("(q)uit or enter number to jump to a page.");
244
245          String line = in.readLine();
246          if (line.length() == 0 || line.charAt(0) == 'q') {
247            quit = true;
248            break;
249          }
250          if (line.charAt(0) == 'p') {
251            start = Math.max(0, start - hitsPerPage);
252            break;
253          } else if (line.charAt(0) == 'n') {
254            if (start + hitsPerPage < numTotalHits) {
255              start += hitsPerPage;
256            }
257            break;
258          } else {
259            int page = Integer.parseInt(line);
260            if ((page - 1) * hitsPerPage < numTotalHits) {
261              start = (page - 1) * hitsPerPage;
262              break;
263            } else {
264              System.out.println("No such page");
265            }
266          }
267        }
268        if (quit) break;
269        end = Math.min(numTotalHits, start + hitsPerPage);
270      }
271    }
272  }
273
274  private static Query addSemanticQuery(Query query, KnnVectorDict vectorDict, int k)
275      throws IOException {
276    StringBuilder semanticQueryText = new StringBuilder();
277    QueryFieldTermExtractor termExtractor = new QueryFieldTermExtractor("contents");
278    query.visit(termExtractor);
279    for (String term : termExtractor.terms) {
280      semanticQueryText.append(term).append(' ');
281    }
282    if (semanticQueryText.length() > 0) {
283      KnnFloatVectorQuery knnQuery =
284          new KnnFloatVectorQuery(
285              "contents-vector",
286              new DemoEmbeddings(vectorDict).computeEmbedding(semanticQueryText.toString()),
287              k);
288      BooleanQuery.Builder builder = new BooleanQuery.Builder();
289      builder.add(query, BooleanClause.Occur.SHOULD);
290      builder.add(knnQuery, BooleanClause.Occur.SHOULD);
291      return builder.build();
292    }
293    return query;
294  }
295
296  private static class QueryFieldTermExtractor extends QueryVisitor {
297    private final String field;
298    private final List<String> terms = new ArrayList<>();
299
300    QueryFieldTermExtractor(String field) {
301      this.field = field;
302    }
303
304    @Override
305    public boolean acceptField(String field) {
306      return field.equals(this.field);
307    }
308
309    @Override
310    public void consumeTerms(Query query, Term... terms) {
311      for (Term term : terms) {
312        this.terms.add(term.text());
313      }
314    }
315
316    @Override
317    public QueryVisitor getSubVisitor(BooleanClause.Occur occur, Query parent) {
318      if (occur == BooleanClause.Occur.MUST_NOT) {
319        return QueryVisitor.EMPTY_VISITOR;
320      }
321      return this;
322    }
323  }
324}