@@ -20,7 +20,7 @@ use std::collections::HashMap;
20
20
use futures:: channel:: oneshot;
21
21
use futures:: future:: join_all;
22
22
use futures:: { StreamExt , TryStreamExt } ;
23
- use tokio:: sync:: oneshot:: { channel , Receiver } ;
23
+ use tokio:: sync:: oneshot:: { Receiver , channel } ;
24
24
25
25
use super :: delete_filter:: { DeleteFilter , EqDelFuture } ;
26
26
use crate :: arrow:: delete_file_loader:: BasicDeleteFileLoader ;
@@ -70,30 +70,30 @@ impl CachingDeleteFileLoader {
70
70
/// Returned future completes once all loading has finished.
71
71
///
72
72
/// * Create a single stream of all delete file tasks irrespective of type,
73
- /// so that we can respect the combined concurrency limit
73
+ /// so that we can respect the combined concurrency limit
74
74
/// * We then process each in two phases: load and parse.
75
75
/// * for positional deletes the load phase instantiates an ArrowRecordBatchStream to
76
- /// stream the file contents out
76
+ /// stream the file contents out
77
77
/// * for eq deletes, we first check if the EQ delete is already loaded or being loaded by
78
- /// another concurrently processing data file scan task. If it is, we return a future
79
- /// for the pre-existing task from the load phase. If not, we create such a future
80
- /// and store it in the state to prevent other data file tasks from starting to load
81
- /// the same equality delete file, and return a record batch stream from the load phase
82
- /// as per the other delete file types - only this time it is accompanied by a one-shot
83
- /// channel sender that we will eventually use to resolve the shared future that we stored
84
- /// in the state.
78
+ /// another concurrently processing data file scan task. If it is, we return a future
79
+ /// for the pre-existing task from the load phase. If not, we create such a future
80
+ /// and store it in the state to prevent other data file tasks from starting to load
81
+ /// the same equality delete file, and return a record batch stream from the load phase
82
+ /// as per the other delete file types - only this time it is accompanied by a one-shot
83
+ /// channel sender that we will eventually use to resolve the shared future that we stored
84
+ /// in the state.
85
85
/// * When this gets updated to add support for delete vectors, the load phase will return
86
- /// a PuffinReader for them.
86
+ /// a PuffinReader for them.
87
87
/// * The parse phase parses each record batch stream according to its associated data type.
88
- /// The result of this is a map of data file paths to delete vectors for the positional
89
- /// delete tasks (and in future for the delete vector tasks). For equality delete
90
- /// file tasks, this results in an unbound Predicate.
88
+ /// The result of this is a map of data file paths to delete vectors for the positional
89
+ /// delete tasks (and in future for the delete vector tasks). For equality delete
90
+ /// file tasks, this results in an unbound Predicate.
91
91
/// * The unbound Predicates resulting from equality deletes are sent to their associated oneshot
92
- /// channel to store them in the right place in the delete file managers state.
92
+ /// channel to store them in the right place in the delete file managers state.
93
93
/// * The results of all of these futures are awaited on in parallel with the specified
94
- /// level of concurrency and collected into a vec. We then combine all the delete
95
- /// vector maps that resulted from any positional delete or delete vector files into a
96
- /// single map and persist it in the state.
94
+ /// level of concurrency and collected into a vec. We then combine all the delete
95
+ /// vector maps that resulted from any positional delete or delete vector files into a
96
+ /// single map and persist it in the state.
97
97
///
98
98
///
99
99
/// Conceptually, the data flow is like this:
0 commit comments