1
1
from __future__ import annotations
2
2
3
+ from textual .css .parse import substitute_references
4
+
3
5
__all__ = ["MarkupError" , "escape" , "to_content" ]
4
6
5
7
import re
17
19
Union ,
18
20
)
19
21
22
+ from textual ._context import active_app
23
+ from textual .color import Color
20
24
from textual .css .tokenize import (
21
25
COLOR ,
22
26
PERCENT ,
23
27
TOKEN ,
24
28
VARIABLE_REF ,
25
29
Expect ,
26
30
TokenizerState ,
31
+ tokenize_values ,
27
32
)
28
33
from textual .style import Style
29
34
@@ -56,8 +61,7 @@ class MarkupError(Exception):
56
61
expect_markup_expression = Expect (
57
62
"markup" ,
58
63
end_tag = r"(?<!\\)\]" ,
59
- word = r"\w+" ,
60
- period = r"\." ,
64
+ word = r"[\w\.]+" ,
61
65
round_start = r"\(" ,
62
66
round_end = r"\)" ,
63
67
square_start = r"\[" ,
@@ -74,7 +78,7 @@ class MarkupError(Exception):
74
78
class MarkupTokenizer (TokenizerState ):
75
79
"""Tokenizes Textual markup."""
76
80
77
- EXPECT = expect_markup .expect_eof (True )
81
+ EXPECT = expect_markup .expect_eof ()
78
82
STATE_MAP = {
79
83
"open_tag" : expect_markup_tag ,
80
84
"open_closing_tag" : expect_markup_tag ,
@@ -93,6 +97,142 @@ class MarkupTokenizer(TokenizerState):
93
97
}
94
98
95
99
100
+ expect_style = Expect (
101
+ "style token" ,
102
+ end_tag = r"(?<!\\)\]" ,
103
+ key = r"[@a-zA-Z_-][a-zA-Z0-9_-]*=" ,
104
+ percent = PERCENT ,
105
+ color = COLOR ,
106
+ token = TOKEN ,
107
+ variable_ref = VARIABLE_REF ,
108
+ whitespace = r"\s+" ,
109
+ double_string = r"\".*?\"" ,
110
+ single_string = r"'.*?'" ,
111
+ )
112
+
113
+
114
+ class StyleTokenizer (TokenizerState ):
115
+ """Tokenizes a style"""
116
+
117
+ EXPECT = expect_style .expect_eof ()
118
+ STATE_MAP = {"key" : expect_markup_expression .expect_eof ()}
119
+ STATE_PUSH = {
120
+ "round_start" : expect_markup_expression ,
121
+ "square_start" : expect_markup_expression ,
122
+ "curly_start" : expect_markup_expression ,
123
+ }
124
+
125
+
126
+ STYLES = {"bold" , "dim" , "italic" , "underline" , "reverse" , "strike" }
127
+ STYLE_ABBREVIATIONS = {
128
+ "b" : "bold" ,
129
+ "d" : "dim" ,
130
+ "i" : "italic" ,
131
+ "u" : "underline" ,
132
+ "r" : "reverse" ,
133
+ "s" : "strike" ,
134
+ }
135
+
136
+
137
+ def parse_style (style : str , variables : dict [str , str ] | None = None ) -> Style :
138
+
139
+ styles : dict [str , bool | None ] = {}
140
+ color : Color | None = None
141
+ background : Color | None = None
142
+ is_background : bool = False
143
+ style_state : bool = True
144
+
145
+ tokenizer = StyleTokenizer ()
146
+ meta = {}
147
+
148
+ if variables is None :
149
+ try :
150
+ app = active_app .get ()
151
+ except LookupError :
152
+ reference_tokens = {}
153
+ else :
154
+ reference_tokens = app .stylesheet ._variable_tokens
155
+ else :
156
+ reference_tokens = tokenize_values (variables )
157
+
158
+ iter_tokens = iter (
159
+ substitute_references (
160
+ tokenizer (style , ("inline style" , "" )),
161
+ reference_tokens ,
162
+ )
163
+ )
164
+
165
+ for token in iter_tokens :
166
+ print (repr (token ))
167
+ token_name = token .name
168
+ token_value = token .value
169
+ if token_name == "key" :
170
+ key = token_value .rstrip ("=" )
171
+ parenthesis : list [str ] = []
172
+ value_text : list [str ] = []
173
+ first_token = next (iter_tokens )
174
+ if first_token .name in {"double_string" , "single_string" }:
175
+ meta [key ] = first_token .value [1 :- 1 ]
176
+ else :
177
+ for token in iter_tokens :
178
+ print ("\t " , repr (token ))
179
+ if token .name == "whitespace" and not parenthesis :
180
+ break
181
+ value_text .append (token .value )
182
+ if token .name in {"round_start" , "square_start" , "curly_start" }:
183
+ parenthesis .append (token .value )
184
+ elif token .name in {"round_end" , "square_end" , "curly_end" }:
185
+ parenthesis .pop ()
186
+ if not parenthesis :
187
+ break
188
+ tokenizer .expect (StyleTokenizer .EXPECT )
189
+
190
+ value = "" .join (value_text )
191
+ meta [key ] = value
192
+
193
+ elif token_name == "color" :
194
+ if is_background :
195
+ background = Color .parse (token .value )
196
+ else :
197
+ color = Color .parse (token .value )
198
+
199
+ elif token_name == "token" :
200
+ if token_value == "on" :
201
+ is_background = True
202
+ elif token_value == "auto" :
203
+ if is_background :
204
+ background = Color .automatic ()
205
+ else :
206
+ color = Color .automatic ()
207
+ elif token_value == "not" :
208
+ style_state = False
209
+ elif token_value in STYLES :
210
+ styles [token_value ] = style_state
211
+ style_state = True
212
+ elif token_value in STYLE_ABBREVIATIONS :
213
+ styles [STYLE_ABBREVIATIONS [token_value ]] = style_state
214
+ style_state = True
215
+ else :
216
+ if is_background :
217
+ background = Color .parse (token_value )
218
+ else :
219
+ color = Color .parse (token_value )
220
+
221
+ elif token_name == "percent" :
222
+ percent = int (token_value .rstrip ("%" )) / 100.0
223
+ if is_background :
224
+ if background is not None :
225
+ background = background .multiply_alpha (percent )
226
+ else :
227
+ if color is not None :
228
+ color = color .multiply_alpha (percent )
229
+
230
+ parsed_style = Style (background , color , link = meta .get ("link" , None ), ** styles )
231
+ if meta :
232
+ parsed_style += Style .from_meta (meta )
233
+ return parsed_style
234
+
235
+
96
236
RE_TAGS = re .compile (
97
237
r"""((\\*)\[([\$a-z#/@][^[]*?)])""" ,
98
238
re .VERBOSE ,
@@ -329,16 +469,16 @@ def to_content(markup: str, style: str | Style = "") -> Content:
329
469
position = 0
330
470
tag_text : list [str ]
331
471
for token in iter_tokens :
332
- print ( repr ( token ))
472
+
333
473
token_name = token .name
334
474
if token_name == "text" :
335
475
text .append (token .value )
336
476
position += len (token .value )
337
477
elif token_name == "open_tag" :
338
478
tag_text = []
339
- print ( "open" )
479
+
340
480
for token in iter_tokens :
341
- print ( " " , repr ( token ))
481
+
342
482
if token .name == "end_tag" :
343
483
break
344
484
tag_text .append (token .value )
@@ -347,9 +487,9 @@ def to_content(markup: str, style: str | Style = "") -> Content:
347
487
348
488
elif token_name == "open_closing_tag" :
349
489
tag_text = []
350
- print ( "closing" )
490
+
351
491
for token in iter_tokens :
352
- print ( " " , repr ( token ))
492
+
353
493
if token .name == "end_tag" :
354
494
break
355
495
tag_text .append (token .value )
@@ -363,16 +503,16 @@ def to_content(markup: str, style: str | Style = "") -> Content:
363
503
364
504
else :
365
505
open_position , tag = style_stack .pop ()
366
- spans .append (Span (open_position , position , tag ))
506
+ spans .append (Span (open_position , position , Style . parse ( tag ) ))
367
507
368
508
content_text = "" .join (text )
369
509
text_length = len (content_text )
370
510
while style_stack :
371
511
position , tag = style_stack .pop ()
372
- spans .append (Span (position , text_length , tag ))
512
+ spans .append (Span (position , text_length , Style . parse ( tag ) ))
373
513
374
514
content = Content (content_text , spans )
375
- print ( repr ( content ))
515
+
376
516
return content
377
517
378
518
0 commit comments