线程“main”java.lang.classcastexception中的异常:类[b不能转换为类[c([b和[c在加载程序“bootstrap”的模块java.base中)

z8dt9xmd  于 2021-07-03  发布在  Java
关注(0)|答案(1)|浏览(9734)

我正在尝试从cmd运行java项目并收回此错误:


**Exception in thread "main" java.lang.ClassCastException: class [B cannot be cast to class [C ([B and [C are in module java.base of loader 'bootstrap')

            at jodd.util.UnsafeUtil.getChars(UnsafeUtil.java:67)
            at jodd.json.JsonParser.parse(JsonParser.java:201)
            at IndexTester.main(IndexTester.java:78)**
import java.io.File;
    import java.io.IOException;
    import java.nio.charset.StandardCharsets;
    import java.nio.file.Files;
    import java.util.Map;

    import jodd.json.JsonParser;

    import org.apache.lucene.analysis.standard.StandardAnalyzer;
    import org.apache.lucene.document.Document;
    import org.apache.lucene.document.Field;
    import org.apache.lucene.document.TextField;
    import org.apache.lucene.index.DirectoryReader;
    import org.apache.lucene.index.IndexWriter;
    import org.apache.lucene.index.IndexWriterConfig;
    import org.apache.lucene.index.LeafReader;
    import org.apache.lucene.index.SlowCompositeReaderWrapper;
    import org.apache.lucene.index.Terms;
    import org.apache.lucene.index.TermsEnum;
    import org.apache.lucene.queryparser.classic.ParseException;
    import org.apache.lucene.queryparser.classic.QueryParser;
    import org.apache.lucene.queryparser.simple.SimpleQueryParser;
    import org.apache.lucene.search.IndexSearcher;
    import org.apache.lucene.search.Query;
    import org.apache.lucene.search.ScoreDoc;
    import org.apache.lucene.search.TopDocs;
    import org.apache.lucene.store.Directory;
    import org.apache.lucene.store.FSDirectory;

    public class IndexTester {

        public static void main(String[] args) throws IOException, ParseException {

            if (args.length != 3) {
                System.err.println("Incorrect number of arguments! Usage:");
                System.err.println("");
                System.err.println("java IndexTester should_clear_index path_to_data path_to_index ");
                System.err.println("\tif should_clear_index is \"1\", the index will be rebuilt. Otherwise, it will try and use an existing index.");
                System.err.println("\tpath_to_index should point to an empty directory somewhere.");
                System.exit(-1);
            }

            String shouldClearIndex = args[0];
            String inputPath = args[1]; // where to find the file containing the JSON to index
            String idxDirPath = args[2]; // where to put/find the Lucene index we want to search

            File inputFile = new File(inputPath);

            // set up analyzer:
            StandardAnalyzer analyzer = new StandardAnalyzer();

            // set up the index
            File idxDir = new File(idxDirPath);

            Directory dir = FSDirectory.open(idxDir.toPath());

            if (shouldClearIndex.compareTo("1") == 0) {
                System.out.println("Rebuilding index...");

                IndexWriterConfig idxConfig = new IndexWriterConfig(analyzer);
                idxConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
                IndexWriter idxWriter = new IndexWriter(dir, idxConfig);

                // Now, populate the index:
                int idx = 0;
                JsonParser jParser = new JsonParser();

                for (String line : Files.readAllLines(inputFile.toPath(), StandardCharsets.UTF_8)) {
                    // On large amounts of data, this can take a while
                    if (idx % 10000 == 0) {
                        System.out.println(idx);
                    }
                    idx++;

                    // each line of the input file is a serialized JSON object
                    Map j = jParser.parse(line);
                    // simple types (strings, numbers, etc.) are handled like so:
                    String title = (String)j.get("title");
                    // complex types (lists or dicts) get turned into instances of
                    // java.util.Map and java.util.List.
                    String ab = (String)j.get("abstract");

                    // Look at the docs for TextField to see about other types- Lucene can index numbers, dates, etc.
                    Field tiField = new Field("title", title, TextField.TYPE_STORED); 
                    // The TYPE_STORED directive tells Lucene to actually store the original token in the index. This is handy 
                    // for all sorts of reasons!

                    // set up any additional fields here

                    Document thisDoc = new Document();
                    thisDoc.add(tiField);

                    // add our document to the index
                    idxWriter.addDocument(thisDoc);

                }

                System.out.println("Done!");

                System.out.println(idx + " documents indexed.");

                idxWriter.close();

            }

            do {
                // Open up the index for querying:
                DirectoryReader reader = DirectoryReader.open(dir);

                // Tell me about the index (comment in/out as needed- this may be useful for debugging):
    //          LeafReader slowC = SlowCompositeReaderWrapper.wrap(reader); 
    //          Terms idxTerms = slowC.terms("title"); // change to a different field as needed
    //          TermsEnum tEnum = idxTerms.iterator(null);
    //          System.out.println("Terms in the index for the title field:");
    //          while (tEnum.next() != null) {
    //              String s = tEnum.term().utf8ToString();         
    //              System.out.println(s + "\t" + tEnum.docFreq());
    //          }

                // Now search
                IndexSearcher searcher = new IndexSearcher(reader);

                // Things to note re: QueryParser:
                // 1.   The first argument is the "default" field to search- 
                //      if nothing else is specified, in the query, this is what
                //      will be searched.
                // 2.   You always want to make sure to use the same Analyzer for your
                //      query as you did when you built the index!
                //
                // Other query parser classes will behave similarly, but may have different argument ordering.

                QueryParser qParser = new QueryParser("title", analyzer); 

                System.out.print("Query: ");
                String queryText = System.console().readLine();

                if (queryText.compareTo("") != 0) {

                    Query q = qParser.parse(queryText);
                    TopDocs results = searcher.search(q,  10);
                    System.out.println("Got " + results.totalHits + " hits!");
                    for (ScoreDoc d : results.scoreDocs) {
                        System.out.println(d.doc + "\t" + d.score);
                        Document res = reader.document(d.doc);
                        System.out.println(res.getField("title").stringValue());
                    }
                }

            } while (true); // keep querying until user hits ctrl-C

        }

    }

这是我的代码,这是我的.txt文件:[https://openeclass.uom.gr/modules/document/file.php/dai148/%ce%94%ce%b9%ce%ac%ce%bb%ce%b5%ce%be%ce%b7%2006%20-%20%ce%94%ce%b9%ce%b1%ce%b2%ce%b1%ce%b8%ce%bc%ce%b9%cf%83%ce%bc%ce%ad%ce%bd%ce%b7%20%ce%91%ce%bd%ce%ac%ce%ba%cf%84%ce%b7%cf%83%ce%b7%2c%20%ce%9c%ce%bf%ce%bd%cf%84%ce%ad%ce%bb%ce%bf%20%ce%94%ce%b9%ce%b1%ce%bd%cf%85%cf%83%ce%bc%ce%b1%cf%84%ce%b9%ce%ba%ce%bf%cf%8d%20%ce%a7%cf%8e%cf%81%ce%bf%cf%85/使用%20lucene/data.txt.zip]

pkmbmrz7

pkmbmrz71#

请切换到最新的jodd json v6。
可能是因为 UnsafeUtil.getChars . 您可以执行以下操作:

jParser.parse(line.toCharArray());

i、 e.跳过使用 UnsafeUtil.getChars() .
新版本的jodd不再使用不安全类。

相关问题