|
1 | 1 | from __future__ import annotations |
2 | 2 |
|
| 3 | +import asyncio |
3 | 4 | import operator |
4 | 5 | import re |
5 | 6 | from enum import Enum |
6 | 7 | from functools import reduce |
7 | 8 | from typing import ( |
8 | 9 | TYPE_CHECKING, |
9 | 10 | Any, |
10 | | - AsyncGenerator, |
11 | 11 | Dict, |
12 | 12 | FrozenSet, |
| 13 | + Generator, |
| 14 | + Iterable, |
13 | 15 | NamedTuple, |
14 | 16 | Optional, |
15 | 17 | Set, |
@@ -96,7 +98,7 @@ def __init__(self, parent: RobotLanguageServerProtocol) -> None: |
96 | 98 | parent.semantic_tokens.token_types += [e for e in RobotSemTokenTypes] |
97 | 99 | parent.semantic_tokens.collect_full.add(self.collect_full) |
98 | 100 | parent.semantic_tokens.collect_range.add(self.collect_range) |
99 | | - parent.semantic_tokens.collect_full_delta.add(self.collect_full_delta) |
| 101 | + # parent.semantic_tokens.collect_full_delta.add(self.collect_full_delta) |
100 | 102 |
|
101 | 103 | @classmethod |
102 | 104 | def generate_mapping(cls) -> Dict[str, Tuple[Enum, Optional[Set[Enum]]]]: |
@@ -164,9 +166,9 @@ def mapping(cls) -> Dict[str, Tuple[Enum, Optional[Set[Enum]]]]: |
164 | 166 | ) |
165 | 167 |
|
166 | 168 | @classmethod |
167 | | - async def generate_sem_sub_tokens( |
| 169 | + def generate_sem_sub_tokens( |
168 | 170 | cls, token: Token, col_offset: Optional[int] = None, length: Optional[int] = None |
169 | | - ) -> AsyncGenerator[SemTokenInfo, None]: |
| 171 | + ) -> Generator[SemTokenInfo, None, None]: |
170 | 172 | from robot.parsing.lexer.tokens import Token as RobotToken |
171 | 173 |
|
172 | 174 | sem_info = cls.mapping().get(token.type, None) if token.type is not None else None |
@@ -199,55 +201,55 @@ async def generate_sem_sub_tokens( |
199 | 201 | yield SemTokenInfo.from_token(token, sem_info[0], sem_info[1], col_offset, length) |
200 | 202 |
|
201 | 203 | @classmethod |
202 | | - async def generate_sem_tokens(cls, token: Token) -> AsyncGenerator[SemTokenInfo, None]: |
| 204 | + def generate_sem_tokens(cls, token: Token) -> Generator[SemTokenInfo, None, None]: |
203 | 205 | from robot.parsing.lexer.tokens import Token as RobotToken |
204 | 206 |
|
205 | 207 | if token.type in RobotToken.ALLOW_VARIABLES: |
206 | 208 | last_sub_token = token |
207 | 209 | try: |
208 | 210 | for sub_token in token.tokenize_variables(): |
209 | 211 | last_sub_token = sub_token |
210 | | - async for e in cls.generate_sem_sub_tokens(sub_token): |
| 212 | + for e in cls.generate_sem_sub_tokens(sub_token): |
211 | 213 | yield e |
212 | 214 | except BaseException: |
213 | 215 | pass |
214 | 216 | if last_sub_token == token: |
215 | | - async for e in cls.generate_sem_sub_tokens(last_sub_token): |
| 217 | + for e in cls.generate_sem_sub_tokens(last_sub_token): |
216 | 218 | yield e |
217 | 219 | elif last_sub_token is not None and last_sub_token.end_col_offset < token.end_col_offset: |
218 | | - async for e in cls.generate_sem_sub_tokens( |
| 220 | + for e in cls.generate_sem_sub_tokens( |
219 | 221 | token, |
220 | 222 | last_sub_token.end_col_offset, |
221 | 223 | token.end_col_offset - last_sub_token.end_col_offset - last_sub_token.col_offset, |
222 | 224 | ): |
223 | 225 | yield e |
224 | 226 |
|
225 | 227 | else: |
226 | | - async for e in cls.generate_sem_sub_tokens(token): |
| 228 | + for e in cls.generate_sem_sub_tokens(token): |
227 | 229 | yield e |
228 | 230 |
|
229 | | - async def collect( |
230 | | - self, document: TextDocument, range: Optional[Range] |
| 231 | + def collect_threading( |
| 232 | + self, tokens: Iterable[Token], range: Optional[Range] |
231 | 233 | ) -> Union[SemanticTokens, SemanticTokensPartialResult, None]: |
232 | 234 |
|
233 | 235 | data = [] |
234 | 236 | last_line = 0 |
235 | 237 | last_col = 0 |
236 | 238 |
|
237 | | - tokens = await self.parent.documents_cache.get_tokens(document) |
238 | | - |
239 | 239 | start = True |
| 240 | + |
240 | 241 | for robot_token in tokens: |
241 | 242 | if range is not None: |
242 | | - if start and not token_in_range(robot_token, range): |
243 | | - continue |
| 243 | + if start: |
| 244 | + if not token_in_range(robot_token, range): |
| 245 | + continue |
| 246 | + else: |
| 247 | + start = False |
244 | 248 | else: |
245 | | - start = False |
246 | | - |
247 | | - if not start and not token_in_range(robot_token, range): |
248 | | - break |
| 249 | + if not token_in_range(robot_token, range): |
| 250 | + break |
249 | 251 |
|
250 | | - async for token in self.generate_sem_tokens(robot_token): |
| 252 | + for token in self.generate_sem_tokens(robot_token): |
251 | 253 | current_line = token.lineno - 1 |
252 | 254 |
|
253 | 255 | data.append(current_line - last_line) |
@@ -281,13 +283,18 @@ async def collect( |
281 | 283 | async def collect_full( |
282 | 284 | self, sender: Any, document: TextDocument, **kwargs: Any |
283 | 285 | ) -> Union[SemanticTokens, SemanticTokensPartialResult, None]: |
284 | | - return await self.collect(document, None) |
| 286 | + |
| 287 | + return await asyncio.get_event_loop().run_in_executor( |
| 288 | + None, self.collect_threading, await self.parent.documents_cache.get_tokens(document), None |
| 289 | + ) |
285 | 290 |
|
286 | 291 | @language_id("robotframework") |
287 | 292 | async def collect_range( |
288 | 293 | self, sender: Any, document: TextDocument, range: Range, **kwargs: Any |
289 | 294 | ) -> Union[SemanticTokens, SemanticTokensPartialResult, None]: |
290 | | - return await self.collect(document, range) |
| 295 | + return await asyncio.get_event_loop().run_in_executor( |
| 296 | + None, self.collect_threading, await self.parent.documents_cache.get_tokens(document), range |
| 297 | + ) |
291 | 298 |
|
292 | 299 | @language_id("robotframework") |
293 | 300 | async def collect_full_delta( |
|
0 commit comments