12
12
import operator
13
13
import token as tokenlib
14
14
import tokenize
15
+ from collections .abc import Iterable
15
16
from io import BytesIO
16
17
from tokenize import TokenInfo
17
- from typing import Any
18
-
19
- try :
20
- from uncertainties import ufloat
21
-
22
- HAS_UNCERTAINTIES = True
23
- except ImportError :
24
- HAS_UNCERTAINTIES = False
25
- ufloat = None
18
+ from typing import Any , Callable , Generator , Generic , Iterator , TypeVar
26
19
20
+ from .compat import HAS_UNCERTAINTIES , ufloat
27
21
from .errors import DefinitionSyntaxError
28
22
29
- # For controlling order of operations
30
- _OP_PRIORITY = {
31
- "+/-" : 4 ,
32
- "**" : 3 ,
33
- "^" : 3 ,
34
- "unary" : 2 ,
35
- "*" : 1 ,
36
- "" : 1 , # operator for implicit ops
37
- "//" : 1 ,
38
- "/" : 1 ,
39
- "%" : 1 ,
40
- "+" : 0 ,
41
- "-" : 0 ,
42
- }
23
+ S = TypeVar ("S" )
43
24
25
+ if HAS_UNCERTAINTIES :
26
+ _ufloat = ufloat # type: ignore
27
+ else :
44
28
45
- def _ufloat (left , right ):
46
- if HAS_UNCERTAINTIES :
47
- return ufloat ( left , right )
48
- raise TypeError ( "Could not import support for uncertainties" )
29
+ def _ufloat (* args : Any , ** kwargs : Any ):
30
+ raise TypeError (
31
+ "Please install the uncertainties package to be able to parse quantities with uncertainty."
32
+ )
49
33
50
34
51
35
def _power (left : Any , right : Any ) -> Any :
@@ -63,46 +47,93 @@ def _power(left: Any, right: Any) -> Any:
63
47
return operator .pow (left , right )
64
48
65
49
66
- # https://stackoverflow.com/a/1517965/1291237
67
- class tokens_with_lookahead :
68
- def __init__ (self , iter ):
50
+ UnaryOpT = Callable [
51
+ [
52
+ Any ,
53
+ ],
54
+ Any ,
55
+ ]
56
+ BinaryOpT = Callable [[Any , Any ], Any ]
57
+
58
+ _UNARY_OPERATOR_MAP : dict [str , UnaryOpT ] = {"+" : lambda x : x , "-" : lambda x : x * - 1 }
59
+
60
+ _BINARY_OPERATOR_MAP : dict [str , BinaryOpT ] = {
61
+ "+/-" : _ufloat ,
62
+ "**" : _power ,
63
+ "*" : operator .mul ,
64
+ "" : operator .mul , # operator for implicit ops
65
+ "/" : operator .truediv ,
66
+ "+" : operator .add ,
67
+ "-" : operator .sub ,
68
+ "%" : operator .mod ,
69
+ "//" : operator .floordiv ,
70
+ }
71
+
72
+ # For controlling order of operations
73
+ _OP_PRIORITY = {
74
+ "+/-" : 4 ,
75
+ "**" : 3 ,
76
+ "^" : 3 ,
77
+ "unary" : 2 ,
78
+ "*" : 1 ,
79
+ "" : 1 , # operator for implicit ops
80
+ "//" : 1 ,
81
+ "/" : 1 ,
82
+ "%" : 1 ,
83
+ "+" : 0 ,
84
+ "-" : 0 ,
85
+ }
86
+
87
+
88
+ class IteratorLookAhead (Generic [S ]):
89
+ """An iterator with lookahead buffer.
90
+
91
+ Adapted: https://stackoverflow.com/a/1517965/1291237
92
+ """
93
+
94
+ def __init__ (self , iter : Iterator [S ]):
69
95
self .iter = iter
70
- self .buffer = []
96
+ self .buffer : list [ S ] = []
71
97
72
98
def __iter__ (self ):
73
99
return self
74
100
75
- def __next__ (self ):
101
+ def __next__ (self ) -> S :
76
102
if self .buffer :
77
103
return self .buffer .pop (0 )
78
104
else :
79
105
return self .iter .__next__ ()
80
106
81
- def lookahead (self , n ) :
107
+ def lookahead (self , n : int ) -> S :
82
108
"""Return an item n entries ahead in the iteration."""
83
109
while n >= len (self .buffer ):
84
110
try :
85
111
self .buffer .append (self .iter .__next__ ())
86
112
except StopIteration :
87
- return None
113
+ raise ValueError ( "Cannot look ahead, out of range" )
88
114
return self .buffer [n ]
89
115
90
116
91
- def _plain_tokenizer (input_string ):
117
+ def plain_tokenizer (input_string : str ) -> Generator [TokenInfo , None , None ]:
118
+ """Standard python tokenizer"""
92
119
for tokinfo in tokenize .tokenize (BytesIO (input_string .encode ("utf-8" )).readline ):
93
120
if tokinfo .type != tokenlib .ENCODING :
94
121
yield tokinfo
95
122
96
123
97
- def uncertainty_tokenizer (input_string ):
98
- def _number_or_nan (token ):
124
+ def uncertainty_tokenizer (input_string : str ) -> Generator [TokenInfo , None , None ]:
125
+ """Tokenizer capable of parsing uncertainties as v+/-u and v±u"""
126
+
127
+ def _number_or_nan (token : TokenInfo ) -> bool :
99
128
if token .type == tokenlib .NUMBER or (
100
129
token .type == tokenlib .NAME and token .string == "nan"
101
130
):
102
131
return True
103
132
return False
104
133
105
- def _get_possible_e (toklist , e_index ):
134
+ def _get_possible_e (
135
+ toklist : IteratorLookAhead [TokenInfo ], e_index : int
136
+ ) -> TokenInfo | None :
106
137
possible_e_token = toklist .lookahead (e_index )
107
138
if (
108
139
possible_e_token .string [0 ] == "e"
@@ -143,7 +174,7 @@ def _get_possible_e(toklist, e_index):
143
174
possible_e = None
144
175
return possible_e
145
176
146
- def _apply_e_notation (mantissa , exponent ) :
177
+ def _apply_e_notation (mantissa : TokenInfo , exponent : TokenInfo ) -> TokenInfo :
147
178
if mantissa .string == "nan" :
148
179
return mantissa
149
180
if float (mantissa .string ) == 0.0 :
@@ -156,7 +187,12 @@ def _apply_e_notation(mantissa, exponent):
156
187
line = exponent .line ,
157
188
)
158
189
159
- def _finalize_e (nominal_value , std_dev , toklist , possible_e ):
190
+ def _finalize_e (
191
+ nominal_value : TokenInfo ,
192
+ std_dev : TokenInfo ,
193
+ toklist : IteratorLookAhead [TokenInfo ],
194
+ possible_e : TokenInfo ,
195
+ ) -> tuple [TokenInfo , TokenInfo ]:
160
196
nominal_value = _apply_e_notation (nominal_value , possible_e )
161
197
std_dev = _apply_e_notation (std_dev , possible_e )
162
198
next (toklist ) # consume 'e' and positive exponent value
@@ -178,8 +214,9 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e):
178
214
# wading through all that vomit, just eliminate the problem
179
215
# in the input by rewriting ± as +/-.
180
216
input_string = input_string .replace ("±" , "+/-" )
181
- toklist = tokens_with_lookahead ( _plain_tokenizer (input_string ))
217
+ toklist = IteratorLookAhead ( plain_tokenizer (input_string ))
182
218
for tokinfo in toklist :
219
+ assert tokinfo is not None
183
220
line = tokinfo .line
184
221
start = tokinfo .start
185
222
if (
@@ -194,7 +231,7 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e):
194
231
end = toklist .lookahead (1 ).end ,
195
232
line = line ,
196
233
)
197
- for i in range (- 1 , 1 ):
234
+ for _ in range (- 1 , 1 ):
198
235
next (toklist )
199
236
yield plus_minus_op
200
237
elif (
@@ -280,31 +317,7 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e):
280
317
if HAS_UNCERTAINTIES :
281
318
tokenizer = uncertainty_tokenizer
282
319
else :
283
- tokenizer = _plain_tokenizer
284
-
285
- import typing
286
-
287
- UnaryOpT = typing .Callable [
288
- [
289
- Any ,
290
- ],
291
- Any ,
292
- ]
293
- BinaryOpT = typing .Callable [[Any , Any ], Any ]
294
-
295
- _UNARY_OPERATOR_MAP : dict [str , UnaryOpT ] = {"+" : lambda x : x , "-" : lambda x : x * - 1 }
296
-
297
- _BINARY_OPERATOR_MAP : dict [str , BinaryOpT ] = {
298
- "+/-" : _ufloat ,
299
- "**" : _power ,
300
- "*" : operator .mul ,
301
- "" : operator .mul , # operator for implicit ops
302
- "/" : operator .truediv ,
303
- "+" : operator .add ,
304
- "-" : operator .sub ,
305
- "%" : operator .mod ,
306
- "//" : operator .floordiv ,
307
- }
320
+ tokenizer = plain_tokenizer
308
321
309
322
310
323
class EvalTreeNode :
@@ -344,12 +357,7 @@ def to_string(self) -> str:
344
357
345
358
def evaluate (
346
359
self ,
347
- define_op : typing .Callable [
348
- [
349
- Any ,
350
- ],
351
- Any ,
352
- ],
360
+ define_op : UnaryOpT ,
353
361
bin_op : dict [str , BinaryOpT ] | None = None ,
354
362
un_op : dict [str , UnaryOpT ] | None = None ,
355
363
):
@@ -395,9 +403,6 @@ def evaluate(
395
403
return define_op (self .left )
396
404
397
405
398
- from collections .abc import Iterable
399
-
400
-
401
406
def _build_eval_tree (
402
407
tokens : list [TokenInfo ],
403
408
op_priority : dict [str , int ],
0 commit comments