forked from oracle/opengrok
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathIndexDatabase.java
2222 lines (1980 loc) · 85.6 KB
/
IndexDatabase.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* See LICENSE.txt included in this distribution for the specific
* language governing permissions and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at LICENSE.txt.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Portions Copyright (c) 2017, 2020, Chris Fraire <[email protected]>.
*/
package org.opengrok.indexer.index;
import java.io.BufferedInputStream;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.zip.GZIPOutputStream;
import jakarta.ws.rs.client.ClientBuilder;
import jakarta.ws.rs.client.Entity;
import jakarta.ws.rs.core.Response;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiTerms;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NativeFSLockFactory;
import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.SimpleFSLockFactory;
import org.apache.lucene.util.BytesRef;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.VisibleForTesting;
import org.opengrok.indexer.analysis.AbstractAnalyzer;
import org.opengrok.indexer.analysis.AnalyzerFactory;
import org.opengrok.indexer.analysis.AnalyzerGuru;
import org.opengrok.indexer.analysis.Ctags;
import org.opengrok.indexer.analysis.Definitions;
import org.opengrok.indexer.analysis.NullableNumLinesLOC;
import org.opengrok.indexer.analysis.NumLinesLOC;
import org.opengrok.indexer.configuration.PathAccepter;
import org.opengrok.indexer.configuration.Project;
import org.opengrok.indexer.configuration.RuntimeEnvironment;
import org.opengrok.indexer.history.FileCollector;
import org.opengrok.indexer.history.HistoryGuru;
import org.opengrok.indexer.history.Repository;
import org.opengrok.indexer.history.RepositoryInfo;
import org.opengrok.indexer.history.RepositoryWithHistoryTraversal;
import org.opengrok.indexer.logger.LoggerFactory;
import org.opengrok.indexer.search.QueryBuilder;
import org.opengrok.indexer.util.ForbiddenSymlinkException;
import org.opengrok.indexer.util.IOUtils;
import org.opengrok.indexer.util.ObjectPool;
import org.opengrok.indexer.util.Progress;
import org.opengrok.indexer.util.Statistics;
import org.opengrok.indexer.util.TandemPath;
import org.opengrok.indexer.web.Util;
import static org.opengrok.indexer.index.IndexerUtil.getWebAppHeaders;
import static org.opengrok.indexer.web.ApiUtils.waitForAsyncApi;
/**
* This class is used to create / update the index databases. Currently, we use
* one index database per project.
*
* @author Trond Norbye
* @author Lubos Kosco , update for lucene 4.x , 5.x
*/
public class IndexDatabase {
private static final Logger LOGGER = LoggerFactory.getLogger(IndexDatabase.class);
private static final Comparator<File> FILENAME_COMPARATOR = Comparator.comparing(File::getName);
private static final Set<String> CHECK_FIELDS;
private static final Set<String> REVERT_COUNTS_FIELDS;
private static final Object INSTANCE_LOCK = new Object();
/**
* Key is canonical path; Value is the first accepted, absolute path. Map
* is ordered by canonical length (ASC) and then canonical value (ASC).
* The map is accessed by a single-thread running indexDown().
*/
private final Map<String, IndexedSymlink> indexedSymlinks = new TreeMap<>(
Comparator.comparingInt(String::length).thenComparing(o -> o));
private final Project project;
private FSDirectory indexDirectory;
private IndexReader reader;
private IndexWriter writer;
private IndexAnalysisSettings3 settings;
private PendingFileCompleter completer;
private NumLinesLOCAggregator countsAggregator;
private TermsEnum uidIter;
private PostingsEnum postsIter;
private PathAccepter pathAccepter;
private AnalyzerGuru analyzerGuru;
private File xrefDir;
private boolean interrupted;
private CopyOnWriteArrayList<IndexChangedListener> listeners;
private File dirtyFile;
private final Object lock = new Object();
private boolean dirty; // Whether the index was modified either by adding or removing a document.
private boolean running;
private boolean isCountingDeltas;
private boolean isWithDirectoryCounts;
private List<String> directories;
private LockFactory lockFactory;
private final BytesRef emptyBR = new BytesRef("");
// Directory where we store indexes
public static final String INDEX_DIR = "index";
public static final String XREF_DIR = "xref";
public static final String SUGGESTER_DIR = "suggester";
private final IndexDownArgsFactory indexDownArgsFactory;
/**
* Create a new instance of the Index Database. Use this constructor if you
* don't use any projects
*
* @throws java.io.IOException if an error occurs while creating directories
*/
public IndexDatabase() throws IOException {
this(null);
}
/**
* Create a new instance of an Index Database for a given project.
*
* @param project the project to create the database for
* @param factory {@link IndexDownArgsFactory} instance
* @throws java.io.IOException if an error occurs while creating directories
*/
public IndexDatabase(Project project, IndexDownArgsFactory factory) throws IOException {
indexDownArgsFactory = factory;
this.project = project;
lockFactory = NoLockFactory.INSTANCE;
initialize();
}
@VisibleForTesting
IndexDatabase(Project project) throws IOException {
this(project, new IndexDownArgsFactory());
}
static {
CHECK_FIELDS = new HashSet<>();
CHECK_FIELDS.add(QueryBuilder.TYPE);
REVERT_COUNTS_FIELDS = new HashSet<>();
REVERT_COUNTS_FIELDS.add(QueryBuilder.D);
REVERT_COUNTS_FIELDS.add(QueryBuilder.PATH);
REVERT_COUNTS_FIELDS.add(QueryBuilder.NUML);
REVERT_COUNTS_FIELDS.add(QueryBuilder.LOC);
}
/**
* Update the index database for all the projects.
*
* @param listener where to signal the changes to the database
* @throws IOException if an error occurs
*/
static CountDownLatch updateAll(IndexChangedListener listener) throws IOException {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
List<IndexDatabase> dbs = new ArrayList<>();
if (env.hasProjects()) {
for (Project project : env.getProjectList()) {
dbs.add(new IndexDatabase(project));
}
} else {
dbs.add(new IndexDatabase());
}
IndexerParallelizer parallelizer = RuntimeEnvironment.getInstance().getIndexerParallelizer();
CountDownLatch latch = new CountDownLatch(dbs.size());
for (IndexDatabase d : dbs) {
final IndexDatabase db = d;
if (listener != null) {
db.addIndexChangedListener(listener);
}
parallelizer.getFixedExecutor().submit(() -> {
try {
db.update();
} catch (Throwable e) {
LOGGER.log(Level.SEVERE,
String.format("Problem updating index database in directory %s: ",
db.indexDirectory.getDirectory()), e);
} finally {
latch.countDown();
}
});
}
return latch;
}
/**
* Update the index database for a number of sub-directories.
*
* @param listener where to signal the changes to the database
* @param paths list of paths to be indexed
*/
public static void update(IndexChangedListener listener, List<String> paths) {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
IndexerParallelizer parallelizer = env.getIndexerParallelizer();
List<IndexDatabase> dbs = new ArrayList<>();
for (String path : paths) {
Project project = Project.getProject(path);
if (project == null && env.hasProjects()) {
LOGGER.log(Level.WARNING, "Could not find a project for \"{0}\"", path);
} else {
IndexDatabase db;
try {
if (project == null) {
db = new IndexDatabase();
} else {
db = new IndexDatabase(project);
}
int idx = dbs.indexOf(db);
if (idx != -1) {
db = dbs.get(idx);
}
if (db.addDirectory(path)) {
if (idx == -1) {
dbs.add(db);
}
} else {
LOGGER.log(Level.WARNING, "Directory does not exist \"{0}\" .", path);
}
} catch (IOException e) {
LOGGER.log(Level.WARNING, "An error occurred while updating index", e);
}
}
for (final IndexDatabase db : dbs) {
db.addIndexChangedListener(listener);
parallelizer.getFixedExecutor().submit(() -> {
try {
db.update();
} catch (Throwable e) {
LOGGER.log(Level.SEVERE, "An error occurred while updating index", e);
}
});
}
}
}
@SuppressWarnings("PMD.CollapsibleIfStatements")
private void initialize() throws IOException {
synchronized (INSTANCE_LOCK) {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
File indexDir = new File(env.getDataRootFile(), INDEX_DIR);
if (project != null) {
indexDir = new File(indexDir, project.getPath());
}
if (!indexDir.exists() && !indexDir.mkdirs()) {
// to avoid race conditions, just recheck..
if (!indexDir.exists()) {
throw new FileNotFoundException("Failed to create root directory [" + indexDir.getAbsolutePath() + "]");
}
}
lockFactory = pickLockFactory(env);
indexDirectory = FSDirectory.open(indexDir.toPath(), lockFactory);
pathAccepter = env.getPathAccepter();
analyzerGuru = new AnalyzerGuru();
xrefDir = new File(env.getDataRootFile(), XREF_DIR);
listeners = new CopyOnWriteArrayList<>();
dirtyFile = new File(indexDir, "dirty");
dirty = dirtyFile.exists();
directories = new ArrayList<>();
if (dirty) {
LOGGER.log(Level.WARNING, "Index in ''{0}'' is dirty, the last indexing was likely interrupted." +
" It might be worthwhile to reindex from scratch.", indexDir);
}
}
}
/**
* By default the indexer will traverse all directories in the project. If
* you add directories with this function update will just process the
* specified directories.
*
* @param dir The directory to scan
* @return <code>true</code> if the file is added, false otherwise
*/
@SuppressWarnings("PMD.UseStringBufferForStringAppends")
public boolean addDirectory(String dir) {
String directory = dir;
if (directory.startsWith("\\")) {
directory = directory.replace('\\', '/');
} else if (directory.charAt(0) != '/') {
directory = "/" + directory;
}
File file = new File(RuntimeEnvironment.getInstance().getSourceRootFile(), directory);
if (file.exists()) {
directories.add(directory);
return true;
}
return false;
}
private void showFileCount(String dir, IndexDownArgs args) {
if (RuntimeEnvironment.getInstance().isPrintProgress()) {
LOGGER.log(Level.INFO, String.format("Need to process: %d files for %s", args.curCount, dir));
}
}
private void markProjectIndexed(Project project) {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
// Successfully indexed the project. The message is sent even if
// the project's isIndexed() is true because it triggers RepositoryInfo
// refresh.
if (project == null) {
return;
}
// Also need to store the correct value in configuration
// when indexer writes it to a file.
project.setIndexed(true);
if (env.getConfigURI() == null) {
return;
}
Response response;
try {
response = ClientBuilder.newBuilder().connectTimeout(env.getConnectTimeout(), TimeUnit.SECONDS).build()
.target(env.getConfigURI())
.path("api")
.path("v1")
.path("projects")
.path(Util.uriEncode(project.getName()))
.path("indexed")
.request()
.headers(getWebAppHeaders())
.put(Entity.text(""));
} catch (RuntimeException e) {
LOGGER.log(Level.WARNING, String.format("Could not notify the webapp that project %s was indexed",
project), e);
return;
}
if (response.getStatus() == Response.Status.ACCEPTED.getStatusCode()) {
try {
response = waitForAsyncApi(response);
} catch (InterruptedException e) {
LOGGER.log(Level.WARNING, "interrupted while waiting for API response", e);
}
}
if (response.getStatusInfo().getFamily() != Response.Status.Family.SUCCESSFUL) {
LOGGER.log(Level.WARNING, "Could not notify the webapp that project {0} was indexed: {1}",
new Object[] {project, response});
}
}
private static List<Repository> getRepositoriesForProject(Project project) {
List<Repository> repositoryList = new ArrayList<>();
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
List<RepositoryInfo> repositoryInfoList = env.getProjectRepositoriesMap().get(project);
if (repositoryInfoList != null) {
for (RepositoryInfo repositoryInfo : repositoryInfoList) {
Repository repository = HistoryGuru.getInstance().getRepository(new File(repositoryInfo.getDirectoryName()));
if (repository != null) {
repositoryList.add(repository);
}
}
}
return repositoryList;
}
/**
* @return whether the repositories of given project are ready for history based reindex
*/
private boolean isReadyForHistoryBasedReindex() {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
// So far the history based reindex does not work without projects.
if (!env.hasProjects()) {
LOGGER.log(Level.FINEST, "projects are disabled, will be indexed by directory traversal.");
return false;
}
if (project == null) {
LOGGER.log(Level.FINEST, "no project, will be indexed by directory traversal.");
return false;
}
// History needs to be enabled for the history cache to work (see the comment below).
if (!project.isHistoryEnabled()) {
LOGGER.log(Level.FINEST, "history is disabled, will be indexed by directory traversal.");
return false;
}
// History cache is necessary to get the last indexed revision for given repository.
if (!env.isHistoryCache()) {
LOGGER.log(Level.FINEST, "history cache is disabled, will be indexed by directory traversal.");
return false;
}
// Per project tunable can override the global tunable, therefore env.isHistoryBasedReindex() is not checked.
if (!project.isHistoryBasedReindex()) {
LOGGER.log(Level.FINEST, "history-based reindex is disabled, will be indexed by directory traversal.");
return false;
}
/*
* Check that the index is present for this project.
* In case of the initial indexing, the traversal of all changesets would most likely be counterproductive,
* assuming traversal of directory tree is cheaper than getting the files from SCM history
* in such case.
*/
try {
if (getNumFiles() == 0) {
LOGGER.log(Level.FINEST, "zero number of documents for project {0}, " +
"will be indexed by directory traversal.", project);
return false;
}
} catch (IOException e) {
LOGGER.log(Level.FINEST, "failed to get number of documents for project {0}," +
"will be indexed by directory traversal.", project);
return false;
}
// If there was no change to any of the repositories of the project, a FileCollector instance will be returned
// however the list of files therein will be empty which is legitimate situation (no change of the project).
// Only in a case where getFileCollector() returns null (hinting at something went wrong),
// the file based traversal should be done.
if (env.getFileCollector(project.getName()) == null) {
LOGGER.log(Level.FINEST, "no file collector for project {0}, will be indexed by directory traversal.",
project);
return false;
}
List<Repository> repositories = getRepositoriesForProject(project);
// Projects without repositories have to be indexed using indexDown().
if (repositories.isEmpty()) {
LOGGER.log(Level.FINEST, "project {0} has no repositories, will be indexed by directory traversal.",
project);
return false;
}
for (Repository repository : repositories) {
if (!isReadyForHistoryBasedReindex(repository)) {
return false;
}
}
// Here it is assumed there are no files untracked by the repositories of this project.
return true;
}
/**
* @param repository Repository instance
* @return true if the repository can be used for history based reindex
*/
@VisibleForTesting
boolean isReadyForHistoryBasedReindex(Repository repository) {
if (!repository.isHistoryEnabled()) {
LOGGER.log(Level.FINE, "history is disabled for {0}, " +
"the associated project {1} will be indexed using directory traversal",
new Object[]{repository, project});
return false;
}
if (!repository.isHistoryBasedReindex()) {
LOGGER.log(Level.FINE, "history based reindex is disabled for {0}, " +
"the associated project {1} will be indexed using directory traversal",
new Object[]{repository, project});
return false;
}
if (!(repository instanceof RepositoryWithHistoryTraversal)) {
LOGGER.log(Level.FINE, "project {0} has a repository {1} that does not support history traversal," +
"the project will be indexed using directory traversal.",
new Object[]{project, repository});
return false;
}
return true;
}
/**
* Update the content of this index database.
*
* @throws IOException if an error occurs
*/
public void update() throws IOException {
synchronized (lock) {
if (running) {
throw new IOException("Indexer already running!");
}
running = true;
interrupted = false;
}
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
reader = null;
writer = null;
settings = null;
uidIter = null;
postsIter = null;
indexedSymlinks.clear();
IOException finishingException = null;
try {
Analyzer analyzer = AnalyzerGuru.getAnalyzer();
IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
iwc.setRAMBufferSizeMB(env.getRamBufferSize());
writer = new IndexWriter(indexDirectory, iwc);
writer.commit(); // to make sure index exists on the disk
completer = new PendingFileCompleter();
if (directories.isEmpty()) {
if (project == null) {
directories.add("");
} else {
directories.add(project.getPath());
}
}
for (String dir : directories) {
File sourceRoot;
if ("".equals(dir)) {
sourceRoot = env.getSourceRootFile();
} else {
sourceRoot = new File(env.getSourceRootFile(), dir);
}
dir = Util.fixPathIfWindows(dir);
String startUid = Util.path2uid(dir, "");
reader = DirectoryReader.open(indexDirectory); // open existing index
countsAggregator = new NumLinesLOCAggregator();
settings = readAnalysisSettings();
if (settings == null) {
settings = new IndexAnalysisSettings3();
}
Terms terms = null;
if (reader.numDocs() > 0) {
terms = MultiTerms.getTerms(reader, QueryBuilder.U);
NumLinesLOCAccessor countsAccessor = new NumLinesLOCAccessor();
if (countsAccessor.hasStored(reader)) {
isWithDirectoryCounts = true;
isCountingDeltas = true;
} else {
boolean foundCounts = countsAccessor.register(countsAggregator, reader);
isWithDirectoryCounts = false;
isCountingDeltas = foundCounts;
if (!isCountingDeltas) {
LOGGER.info("Forcing reindexing to fully compute directory counts");
}
}
} else {
isWithDirectoryCounts = false;
isCountingDeltas = false;
}
try {
if (terms != null) {
uidIter = terms.iterator();
// The seekCeil() is pretty important because it makes uidIter.term() to become non-null.
// Various indexer methods rely on this when working with the uidIter iterator - rather
// than calling uidIter.next() first thing, they check uidIter.term().
TermsEnum.SeekStatus stat = uidIter.seekCeil(new BytesRef(startUid));
if (stat == TermsEnum.SeekStatus.END) {
uidIter = null;
LOGGER.log(Level.WARNING,
"Couldn''t find a start term for {0}, empty u field?",
startUid);
}
}
// The actual indexing happens in indexParallel(). Here we merely collect the files
// that need to be indexed and the files that should be removed.
IndexDownArgs args = indexDownArgsFactory.getIndexDownArgs();
boolean usedHistory = getIndexDownArgs(dir, sourceRoot, args);
// Traverse the trailing terms. This needs to be done before indexParallel() because
// in some cases it can add items to the args parameter.
processTrailingTerms(startUid, usedHistory, args);
args.curCount = 0;
Statistics elapsed = new Statistics();
LOGGER.log(Level.INFO, "Starting indexing of directory {0}", dir);
indexParallel(dir, args);
elapsed.report(LOGGER, String.format("Done indexing of directory %s", dir),
"indexer.db.directory.index");
/*
* As a signifier that #Lines/LOC are comprehensively
* stored so that later calculation is in deltas mode, we
* need at least one D-document saved. For a repo with only
* non-code files, however, no true #Lines/LOC will have
* been saved. Subsequent re-indexing will do more work
* than necessary (until a source code file is placed). We
* can record zeroes for a fake file under the root to get
* a D-document even for this special repo situation.
*
* Metrics are aggregated for directories up to the root,
* so it suffices to put the fake directly under the root.
*/
if (!isWithDirectoryCounts) {
final String ROOT_FAKE_FILE = "/.OpenGrok_fake_file";
countsAggregator.register(new NumLinesLOC(ROOT_FAKE_FILE, 0, 0));
}
NumLinesLOCAccessor countsAccessor = new NumLinesLOCAccessor();
countsAccessor.store(writer, reader, countsAggregator,
isWithDirectoryCounts && isCountingDeltas);
markProjectIndexed(project);
} finally {
reader.close();
}
}
// The RuntimeException thrown from the block above can prevent the writing from completing.
// This is deliberate.
try {
finishWriting();
} catch (IOException e) {
finishingException = e;
}
} catch (RuntimeException ex) {
LOGGER.log(Level.SEVERE,
"Failed with unexpected RuntimeException", ex);
throw ex;
} finally {
completer = null;
try {
if (writer != null) {
writer.close();
}
} catch (IOException e) {
if (finishingException == null) {
finishingException = e;
}
LOGGER.log(Level.WARNING,
"An error occurred while closing writer", e);
} finally {
writer = null;
synchronized (lock) {
running = false;
}
}
}
if (finishingException != null) {
throw finishingException;
}
if (!isInterrupted() && isDirty()) {
unsetDirty();
env.setIndexTimestamp();
}
}
private void processTrailingTerms(String startUid, boolean usedHistory, IndexDownArgs args) throws IOException {
while (uidIter != null && uidIter.term() != null
&& uidIter.term().utf8ToString().startsWith(startUid)) {
if (usedHistory) {
// Allow for forced reindex. For history based reindex the trailing terms
// correspond to the files that have not changed. Such files might need to be re-indexed
// if the index format changed.
String termPath = Util.uid2url(uidIter.term().utf8ToString());
File termFile = new File(RuntimeEnvironment.getInstance().getSourceRootFile(), termPath);
boolean matchOK = (isWithDirectoryCounts || isCountingDeltas) &&
checkSettings(termFile, termPath);
if (!matchOK) {
removeFile(false);
args.curCount++;
args.works.add(new IndexFileWork(termFile, termPath));
}
} else {
// Remove data for the trailing terms that getIndexDownArgs()
// did not traverse. These correspond to the files that have been
// removed and have higher ordering than any present files.
removeFile(true);
}
BytesRef next = uidIter.next();
if (next == null) {
uidIter = null;
}
}
}
/**
* @param dir directory path
* @param sourceRoot source root File object
* @param args {@link IndexDownArgs} instance (output)
* @return true if history was used to gather the {@code IndexDownArgs}
* @throws IOException on error
*/
@VisibleForTesting
boolean getIndexDownArgs(String dir, File sourceRoot, IndexDownArgs args) throws IOException {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
boolean historyBased = isReadyForHistoryBasedReindex();
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.log(Level.INFO, String.format("Starting file collection using %s traversal for directory '%s'",
historyBased ? "history" : "file-system", dir));
}
Statistics elapsed = new Statistics();
if (historyBased) {
indexDownUsingHistory(env.getSourceRootFile(), args);
} else {
indexDown(sourceRoot, dir, args);
}
elapsed.report(LOGGER, String.format("Done file collection for directory '%s'", dir),
"indexer.db.collection");
showFileCount(dir, args);
return historyBased;
}
/**
* Executes the first, serial stage of indexing, by going through set of files assembled from history.
* @param sourceRoot path to the source root (same as {@link RuntimeEnvironment#getSourceRootPath()})
* @param args {@link IndexDownArgs} instance where the resulting files to be indexed will be stored
* @throws IOException on error
*/
@VisibleForTesting
void indexDownUsingHistory(File sourceRoot, IndexDownArgs args) throws IOException {
FileCollector fileCollector = RuntimeEnvironment.getInstance().getFileCollector(project.getName());
for (String path : fileCollector.getFiles()) {
File file = new File(sourceRoot, path);
processFileIncremental(args, file, path);
}
}
/**
* Reduce segment counts of all index databases.
*
* @throws IOException if an error occurs
*/
static void reduceSegmentCountAll() throws IOException {
List<IndexDatabase> dbs = new ArrayList<>();
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
IndexerParallelizer parallelizer = env.getIndexerParallelizer();
if (env.hasProjects()) {
for (Project project : env.getProjectList()) {
dbs.add(new IndexDatabase(project));
}
} else {
dbs.add(new IndexDatabase());
}
CountDownLatch latch = new CountDownLatch(dbs.size());
for (IndexDatabase d : dbs) {
final IndexDatabase db = d;
parallelizer.getFixedExecutor().submit(() -> {
try {
db.reduceSegmentCount();
} catch (Throwable e) {
LOGGER.log(Level.SEVERE,
"Problem reducing segment count of Lucene index database: ", e);
} finally {
latch.countDown();
}
});
}
try {
LOGGER.info("Waiting for the Lucene segment count reduction to finish");
latch.await();
} catch (InterruptedException exp) {
LOGGER.log(Level.WARNING, "Received interrupt while waiting" +
" for index segment count reduction to finish", exp);
}
}
/**
* Reduce number of segments in the index database.
* @throws IOException I/O exception
*/
public void reduceSegmentCount() throws IOException {
synchronized (lock) {
if (running) {
LOGGER.warning("Segment count reduction terminated... Someone else is running the operation!");
return;
}
running = true;
}
IndexWriter wrt = null;
IOException writerException = null;
try {
Statistics elapsed = new Statistics();
String projectDetail = this.project != null ? " for project " + project.getName() : "";
LOGGER.log(Level.INFO, "Reducing number of segments in the index{0}", projectDetail);
Analyzer analyzer = new StandardAnalyzer();
IndexWriterConfig conf = new IndexWriterConfig(analyzer);
conf.setOpenMode(OpenMode.CREATE_OR_APPEND);
wrt = new IndexWriter(indexDirectory, conf);
wrt.forceMerge(1);
elapsed.report(LOGGER, String.format("Done reducing number of segments in index%s", projectDetail),
"indexer.db.reduceSegments");
} catch (IOException e) {
writerException = e;
LOGGER.log(Level.SEVERE, "ERROR: reducing number of segments index", e);
} finally {
if (wrt != null) {
try {
wrt.close();
} catch (IOException e) {
if (writerException == null) {
writerException = e;
}
LOGGER.log(Level.WARNING,
"An error occurred while closing writer", e);
}
}
synchronized (lock) {
running = false;
}
}
if (writerException != null) {
throw writerException;
}
}
private boolean isDirty() {
synchronized (lock) {
return dirty;
}
}
private void setDirty() {
synchronized (lock) {
try {
if (!dirty) {
if (!dirtyFile.createNewFile() && !dirtyFile.exists()) {
LOGGER.log(Level.FINE,
"Failed to create \"dirty-file\": {0}",
dirtyFile.getAbsolutePath());
}
dirty = true;
}
} catch (IOException e) {
LOGGER.log(Level.FINE, "When creating dirty file: ", e);
}
}
}
private void unsetDirty() {
synchronized (lock) {
if (dirtyFile.exists() && !dirtyFile.delete()) {
LOGGER.log(Level.FINE, "Failed to remove \"dirty-file\": {0}", dirtyFile.getAbsolutePath());
}
dirty = false;
}
}
private File whatXrefFile(String path, boolean compress) {
String xrefPath = compress ? TandemPath.join(path, ".gz") : path;
return new File(xrefDir, xrefPath);
}
/**
* Queue the removal of xref file for given path.
* @param path path to file under source root
*/
private void removeXrefFile(String path) {
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
File xrefFile = whatXrefFile(path, env.isCompressXref());
PendingFileDeletion pending = new PendingFileDeletion(xrefFile.getAbsolutePath());
completer.add(pending);
}
private void removeHistoryFile(String path) {
HistoryGuru.getInstance().clearCacheFile(path);
}
/**
* Remove a stale file from the index database and potentially also from history cache,
* and queue the removal of the associated xref file.
*
* @param removeHistory if false, do not remove history cache for this file
* @throws java.io.IOException if an error occurs
*/
private void removeFile(boolean removeHistory) throws IOException {
String path = Util.uid2url(uidIter.term().utf8ToString());
for (IndexChangedListener listener : listeners) {
listener.fileRemove(path);
}
removeFileDocUid(path);
removeXrefFile(path);
if (removeHistory) {
removeHistoryFile(path);
}
setDirty();
for (IndexChangedListener listener : listeners) {
listener.fileRemoved(path);
}
}
private void removeFileDocUid(String path) throws IOException {
// Determine if a reversal of counts is necessary, and execute if so.
if (isCountingDeltas) {
postsIter = uidIter.postings(postsIter);
while (postsIter.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
// Read a limited-fields version of the document.
Document doc = reader.document(postsIter.docID(), REVERT_COUNTS_FIELDS);
if (doc != null) {
decrementLOCforDoc(path, doc);
break;