Skip to content

Commit d01eb4d

Browse files
committed
Add a method allowing to consumme the builder when creating a tokenizer
1 parent e571f74 commit d01eb4d

File tree

1 file changed

+24
-11
lines changed

1 file changed

+24
-11
lines changed

charabia/src/tokenizer.rs

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
use std::borrow::Cow;
12
use std::collections::HashMap;
23

34
use aho_corasick::{AhoCorasick, MatchKind};
@@ -98,32 +99,32 @@ impl Tokenize<'_> for &str {
9899
///
99100
/// See [`TokenizerBuilder`] to know how to build a [`Tokenizer`].
100101
pub struct Tokenizer<'tb> {
101-
segmenter_option: &'tb SegmenterOption<'tb>,
102-
normalizer_option: &'tb NormalizerOption<'tb>,
102+
segmenter_option: Cow<'tb, SegmenterOption<'tb>>,
103+
normalizer_option: Cow<'tb, NormalizerOption<'tb>>,
103104
}
104105

105106
impl<'tb> Tokenizer<'tb> {
106107
/// Creates an Iterator over [`Token`]s.
107108
///
108109
/// The provided text is segmented creating tokens,
109110
/// then tokens are normalized and classified depending on the list of normalizers and classifiers in [`normalizer::NORMALIZERS`].
110-
pub fn tokenize<'o>(&self, original: &'o str) -> NormalizedTokenIter<'o, 'tb> {
111-
original.segment_with_option(self.segmenter_option).normalize(self.normalizer_option)
111+
pub fn tokenize<'t, 'o>(&'t self, original: &'o str) -> NormalizedTokenIter<'o, 't> {
112+
original.segment_with_option(&self.segmenter_option).normalize(&self.normalizer_option)
112113
}
113114

114115
/// Same as [`tokenize`] but attaches each [`Token`] to its corresponding portion of the original text.
115-
pub fn reconstruct<'o>(&self, original: &'o str) -> ReconstructedTokenIter<'o, 'tb> {
116+
pub fn reconstruct<'t, 'o>(&'t self, original: &'o str) -> ReconstructedTokenIter<'o, 't> {
116117
ReconstructedTokenIter { original, token_iter: self.tokenize(original) }
117118
}
118119

119120
/// Segments the provided text creating an Iterator over [`Token`].
120-
pub fn segment<'o>(&self, original: &'o str) -> SegmentedTokenIter<'o, 'tb> {
121-
original.segment_with_option(self.segmenter_option)
121+
pub fn segment<'t, 'o>(&'t self, original: &'o str) -> SegmentedTokenIter<'o, 't> {
122+
original.segment_with_option(&self.segmenter_option)
122123
}
123124

124125
/// Segments the provided text creating an Iterator over `&str`.
125-
pub fn segment_str<'o>(&self, original: &'o str) -> SegmentedStrIter<'o, 'tb> {
126-
original.segment_str_with_option(self.segmenter_option)
126+
pub fn segment_str<'t, 'o>(&'t self, original: &'o str) -> SegmentedStrIter<'o, 't> {
127+
original.segment_str_with_option(&self.segmenter_option)
127128
}
128129
}
129130

@@ -337,8 +338,20 @@ impl<'tb, A: AsRef<[u8]>> TokenizerBuilder<'tb, A> {
337338
}
338339

339340
Tokenizer {
340-
normalizer_option: &self.normalizer_option,
341-
segmenter_option: &self.segmenter_option,
341+
normalizer_option: Cow::Borrowed(&self.normalizer_option),
342+
segmenter_option: Cow::Borrowed(&self.segmenter_option),
343+
}
344+
}
345+
346+
/// Build the configurated `Tokenizer` consumming self.
347+
///
348+
/// This method allows to drop the tokenizer builder without having to drop the Tokenizer itself.
349+
pub fn into_tokenizer(mut self) -> Tokenizer<'tb> {
350+
drop(self.build());
351+
352+
Tokenizer {
353+
normalizer_option: Cow::Owned(self.normalizer_option),
354+
segmenter_option: Cow::Owned(self.segmenter_option),
342355
}
343356
}
344357
}

0 commit comments

Comments
 (0)