lucene入門
阿新 • • 發佈:2020-09-16
一、專案結構
二、pom.xml
<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>org.example</groupId> <artifactId>A01practice</artifactId> <version>1.0-SNAPSHOT</version> <build> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> <source>1.9</source> <target>1.9</target> </configuration> </plugin> </plugins> </build> <dependencies> <!-- https://mvnrepository.com/artifact/org.apache.lucene/lucene-core --> <dependency> <groupId>org.apache.lucene</groupId> <artifactId>lucene-core</artifactId> <version>8.1.0</version> </dependency> <!-- https://mvnrepository.com/artifact/org.apache.lucene/lucene-analyzers-common --> <dependency> <groupId>org.apache.lucene</groupId> <artifactId>lucene-analyzers-common</artifactId> <version>8.1.0</version> </dependency> <!-- https://mvnrepository.com/artifact/org.apache.lucene/lucene-queryparser --> <dependency> <groupId>org.apache.lucene</groupId> <artifactId>lucene-queryparser</artifactId> <version>8.1.0</version> </dependency> <!-- https://mvnrepository.com/artifact/commons-io/commons-io --> <dependency> <groupId>commons-io</groupId> <artifactId>commons-io</artifactId> <version>2.8.0</version> </dependency> <!--此包是第三方jar包,安裝在本地倉庫--> <dependency> <groupId>org.wltea.ik-analyzer</groupId> <artifactId>ik-analyzer</artifactId> <version>1.0-SNAPSHOT</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>4.12</version> <scope>test</scope> </dependency> </dependencies> </project>
三、程式碼
package com.wuxi.lucene; import org.apache.commons.io.FileUtils; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.*; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.search.*; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.junit.Test; import org.wltea.analyzer.lucene.IKAnalyzer; import java.io.File; public class LuceneFirst { /** * 建立索引 * * @throws Exception */ @Test public void createIndex() throws Exception { //1、建立一個Directory物件,指定索引庫儲存的位置。 //把索引庫儲存在記憶體中 //Directory directory = new RAMDirectory(); //把索引庫儲存在磁碟 Directory directory = FSDirectory.open(new File("F:\\java\\lucene\\resource\\index").toPath()); //2、基於Directory物件建立一個IndexWriter物件 IndexWriterConfig config = new IndexWriterConfig(new IKAnalyzer()); IndexWriter indexWriter = new IndexWriter(directory, config); //3、讀取磁碟上的檔案,對應每個檔案建立一個文件物件。 File dir = new File("F:\\java\\lucene\\resource\\file"); File[] files = dir.listFiles(); for (File f : files) { //取檔名 String fileName = f.getName(); //檔案的路徑 String filePath = f.getPath(); //檔案的內容 String fileContent = FileUtils.readFileToString(f, "utf-8"); //檔案的大小 long fileSize = FileUtils.sizeOf(f); //建立Field //引數1:域的名稱,引數2:域的內容,引數3:是否儲存 Field fieldName = new TextField("name", fileName, Field.Store.YES);//儲存且分詞 Field fieldPath = new StoredField("path", filePath); Field fieldContent = new TextField("content", fileContent, Field.Store.NO);//分詞但不儲存 Field fieldSizeValue = new LongPoint("size", fileSize);//可做範圍查詢 Field fieldSizeStore = new StoredField("size", fileSize);//儲存但不分詞 //建立文件物件 Document document = new Document(); //向文件物件中新增域 document.add(fieldName); document.add(fieldPath); document.add(fieldContent); document.add(fieldSizeValue); document.add(fieldSizeStore); //5、把文件物件寫入索引庫 indexWriter.addDocument(document); } //6、關閉IndexWriter物件 indexWriter.close(); } /** * 查詢 * * @throws Exception */ @Test public void searchIndex() throws Exception { //1、建立一個Directory物件,指定索引庫位置 FSDirectory directory = FSDirectory.open(new File("F:\\java\\lucene\\resource\\index").toPath()); //2、建立一個IndexReader物件 DirectoryReader indexReader = DirectoryReader.open(directory); //3、建立一個IndexSearcher物件,構造方法中的引數indexReader物件。 IndexSearcher indexSearcher = new IndexSearcher(indexReader); //4、建立一個query物件,TermQuery Query query = new TermQuery(new Term("content", "中文")); //5、執行查詢,得到一個TopDocs物件 //引數1:查詢物件 引數2:查詢結果返回的最大記錄數 TopDocs topDocs = indexSearcher.search(query, 10); //6、取查詢結果的總記錄數 System.out.println("查詢總記錄數:" + topDocs.totalHits); //7、取文件列表 ScoreDoc[] scoreDocs = topDocs.scoreDocs; //8、列印文件中的內容 for (ScoreDoc doc : scoreDocs) { //取文件id int docId = doc.doc; //根據id取文件物件 Document document = indexSearcher.doc(docId); System.out.println(document.get("name")); System.out.println(document.get("path")); System.out.println(document.get("size")); //System.out.println(document.get("content")); System.out.println("----------------寂寞的分割線"); } //9、關閉IndexReader物件 indexReader.close(); } /** * 分詞 * * @throws Exception */ @Test public void testTokenStream() throws Exception { //1、建立一個Analyzer物件,StandardAnalyzer物件 //Analyzer analyzer = new StandardAnalyzer(); IKAnalyzer analyzer = new IKAnalyzer(); //2、使用分析器物件的tokenStream方法獲得一個TokenStream物件 TokenStream tokenStream = analyzer.tokenStream("", "Lucene是apache軟體基金會4 jakarta專案組的一個子專案,
是一個開放原始碼的全文檢索引擎工具包,但它不是一個完整的全文檢索引擎,而是一個全文檢索引擎的架構,
提供了完整的查詢引擎和索引引擎,部分文字分析引擎(英文與德文兩種西方語言)。"); //3、向TokenStream物件中設定一個引用,相當於數一個指標 CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); //4、呼叫TokenStream物件的rest方法。如果不呼叫拋異常 tokenStream.reset(); //5、使用while迴圈遍歷TokenStream物件 while (tokenStream.incrementToken()) { System.out.println(charTermAttribute.toString()); } //6、關閉TokenStream物件 tokenStream.close(); } /** * 刪除所有 * * @throws Exception */ @Test public void deleteAllDocument() throws Exception { Directory directory = FSDirectory.open(new File("F:\\java\\lucene\\resource\\index").toPath()); IndexWriterConfig config = new IndexWriterConfig(new IKAnalyzer()); IndexWriter indexWriter = new IndexWriter(directory, config); indexWriter.deleteAll(); indexWriter.close(); } /** * 刪除指定 * * @throws Exception */ @Test public void deleteDocumentByQuery() throws Exception { Directory directory = FSDirectory.open(new File("F:\\java\\lucene\\resource\\index").toPath()); IndexWriterConfig config = new IndexWriterConfig(new IKAnalyzer()); IndexWriter indexWriter = new IndexWriter(directory, config); indexWriter.deleteDocuments(new Term("name", "aaa")); indexWriter.close(); } /** * 更新 * * @throws Exception */ @Test public void updateDocument() throws Exception { Directory directory = FSDirectory.open(new File("F:\\java\\lucene\\resource\\index").toPath()); IndexWriterConfig config = new IndexWriterConfig(new IKAnalyzer()); IndexWriter indexWriter = new IndexWriter(directory, config); Document document = new Document(); document.add(new TextField("name", "你好世界", Field.Store.YES)); document.add(new TextField("name1", "你好世界1", Field.Store.YES)); document.add(new TextField("name2", "你好世界2", Field.Store.YES)); indexWriter.updateDocument(new Term("name", "aaa"), document); indexWriter.close(); } /** * 範圍查詢 * * @throws Exception */ @Test public void testRangeQuery() throws Exception { FSDirectory directory = FSDirectory.open(new File("F:\\java\\lucene\\resource\\index").toPath()); DirectoryReader indexReader = DirectoryReader.open(directory); IndexSearcher indexSearcher = new IndexSearcher(indexReader); Query query = LongPoint.newRangeQuery("size", 0l, 100l); TopDocs topDocs = indexSearcher.search(query, 10); System.out.println("查詢總記錄數:" + topDocs.totalHits); ScoreDoc[] scoreDocs = topDocs.scoreDocs; for (ScoreDoc doc : scoreDocs) { int docId = doc.doc; Document document = indexSearcher.doc(docId); System.out.println(document.get("name")); System.out.println(document.get("path")); System.out.println(document.get("size")); //System.out.println(document.get("content")); System.out.println("----------------寂寞的分割線"); } indexReader.close(); } /** * 分詞查詢 * * @throws Exception */ @Test public void testQueryParser() throws Exception { FSDirectory directory = FSDirectory.open(new File("F:\\java\\lucene\\resource\\index").toPath()); DirectoryReader indexReader = DirectoryReader.open(directory); IndexSearcher indexSearcher = new IndexSearcher(indexReader); QueryParser queryParser = new QueryParser("name", new IKAnalyzer()); Query query = queryParser.parse("這是一段中文"); TopDocs topDocs = indexSearcher.search(query, 10); System.out.println("查詢總記錄數:" + topDocs.totalHits); ScoreDoc[] scoreDocs = topDocs.scoreDocs; for (ScoreDoc doc : scoreDocs) { int docId = doc.doc; Document document = indexSearcher.doc(docId); System.out.println(document.get("name")); System.out.println(document.get("path")); System.out.println(document.get("size")); //System.out.println(document.get("content")); System.out.println("----------------寂寞的分割線"); } indexReader.close(); } }
四、IKAnalyzer.cfg.xml
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd"> <properties> <comment>IK Analyzer 擴充套件配置</comment> <!--使用者可以在這裡配置自己的擴充套件字典 --> <entry key="ext_dict">hotword.dic;</entry> <!--使用者可以在這裡配置自己的擴充套件停止詞字典--> <entry key="ext_stopwords">stopword.dic;</entry> </properties>