Coverage for C:\leo.repo\leo-editor\leo\commands\convertCommands.py: 100%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# -*- coding: utf-8 -*-
2#@+leo-ver=5-thin
3#@+node:ekr.20160316095222.1: * @file ../commands/convertCommands.py
4#@@first
5"""Leo's file-conversion commands."""
7import re
8from typing import Any, Dict, List, Optional, Tuple
9from leo.core import leoGlobals as g
10from leo.core import leoBeautify
11from leo.commands.baseCommands import BaseEditCommandsClass
13def cmd(name):
14 """Command decorator for the ConvertCommandsClass class."""
15 return g.new_cmd_decorator(name, ['c', 'convertCommands',])
17#@+<< class To_Python >>
18#@+node:ekr.20150514063305.123: ** << class To_Python >>
19class To_Python: # pragma: no cover
20 """The base class for x-to-python commands."""
21 #@+others
22 #@+node:ekr.20150514063305.125: *3* To_Python.ctor
23 def __init__(self, c):
24 """Ctor for To_Python class."""
25 self.c = c
26 self.p = self.c.p.copy()
27 aList = g.get_directives_dict_list(self.p)
28 self.tab_width = g.scanAtTabwidthDirectives(aList) or 4
29 #@+node:ekr.20150514063305.126: *3* To_Python.go
30 def go(self):
31 import time
32 t1 = time.time()
33 c = self.c
34 u, undoType = c.undoer, 'typescript-to-python'
35 pp = leoBeautify.CPrettyPrinter(c)
36 u.beforeChangeGroup(c.p, undoType)
37 changed = False
38 n_files, n_nodes = 0, 0
39 special = ('class ', 'module ', '@file ', '@@file ')
40 files = ('@file ', '@@file ')
41 for p in self.p.self_and_subtree(copy=False):
42 if p.b:
43 n_nodes += 1
44 if any(p.h.startswith(z) for z in special):
45 g.es_print(p.h)
46 if any(p.h.startswith(z) for z in files):
47 n_files += 1
48 bunch = u.beforeChangeNodeContents(p)
49 s = pp.indent(p, giveWarnings=False)
50 aList = list(s)
51 self.convertCodeList(aList)
52 s = ''.join(aList)
53 if s != p.b:
54 p.b = s
55 p.setDirty()
56 u.afterChangeNodeContents(p, undoType, bunch)
57 changed = True
58 # Call this only once, at end.
59 if changed:
60 u.afterChangeGroup(c.p, undoType, reportFlag=False)
61 t2 = time.time()
62 g.es_print(f"done! {n_files} files, {n_nodes} nodes, {t2 - t1:2.2f} sec")
63 #@+node:ekr.20150514063305.127: *3* To_Python.convertCodeList
64 def convertCodeList(self, aList):
65 """The main search/replace method."""
66 g.trace('must be defined in subclasses.')
67 #@+node:ekr.20150514063305.128: *3* To_Python.Utils
68 #@+node:ekr.20150514063305.129: *4* match...
69 #@+node:ekr.20150514063305.130: *5* match
70 def match(self, s, i, pat):
71 """
72 Return True if s[i:] matches the pat string.
74 We can't use g.match because s is usually a list.
75 """
76 assert pat
77 j = 0
78 while i + j < len(s) and j < len(pat):
79 if s[i + j] == pat[j]:
80 j += 1
81 if j == len(pat):
82 return True
83 else:
84 return False
85 return False
86 #@+node:ekr.20150514063305.131: *5* match_word
87 def match_word(self, s, i, pat):
88 """
89 Return True if s[i:] word matches the pat string.
91 We can't use g.match_word because s is usually a list
92 and g.match_word uses s.find.
93 """
94 if self.match(s, i, pat):
95 j = i + len(pat)
96 if j >= len(s):
97 return True
98 if not pat[-1].isalnum():
99 # Bug fix 10/16/2012: The pattern terminates the word.
100 return True
101 ch = s[j]
102 return not ch.isalnum() and ch != '_'
103 return False
104 #@+node:ekr.20150514063305.132: *4* insert_not
105 def insert_not(self, aList):
106 """Change "!" to "not" except before an equal sign."""
107 i = 0
108 while i < len(aList):
109 if self.is_string_or_comment(aList, i):
110 i = self.skip_string_or_comment(aList, i)
111 elif aList[i] == '!' and not self.match(aList, i + 1, '='):
112 aList[i : i + 1] = list('not ')
113 i += 4
114 else:
115 i += 1
116 #@+node:ekr.20150514063305.133: *4* is...
117 #@+node:ekr.20150514063305.134: *5* is_section_def/ref
118 def is_section_def(self, p):
119 return self.is_section_ref(p.h)
121 def is_section_ref(self, s):
122 n1 = s.find("<<", 0)
123 n2 = s.find(">>", 0)
124 return -1 < n1 < n2 and s[n1 + 2 : n2].strip()
125 #@+node:ekr.20150514063305.135: *5* is_string_or_comment
126 def is_string_or_comment(self, s, i):
127 # Does range checking.
128 m = self.match
129 return m(s, i, "'") or m(s, i, '"') or m(s, i, "//") or m(s, i, "/*")
130 #@+node:ekr.20150514063305.136: *5* is_ws and is_ws_or_nl
131 def is_ws(self, ch):
132 return ch in ' \t'
134 def is_ws_or_nl(self, ch):
135 return ch in ' \t\n'
136 #@+node:ekr.20150514063305.137: *4* prevNonWsChar and prevNonWsOrNlChar
137 def prevNonWsChar(self, s, i):
138 i -= 1
139 while i >= 0 and self.is_ws(s[i]):
140 i -= 1
141 return i
143 def prevNonWsOrNlChar(self, s, i):
144 i -= 1
145 while i >= 0 and self.is_ws_or_nl(s[i]):
146 i -= 1
147 return i
148 #@+node:ekr.20150514063305.138: *4* remove...
149 #@+node:ekr.20150514063305.139: *5* removeBlankLines
150 def removeBlankLines(self, aList):
151 i = 0
152 while i < len(aList):
153 j = i
154 while j < len(aList) and aList[j] in " \t":
155 j += 1
156 if j == len(aList) or aList[j] == '\n':
157 del aList[i : j + 1]
158 else:
159 i = self.skip_past_line(aList, i)
160 #@+node:ekr.20150514063305.140: *5* removeExcessWs
161 def removeExcessWs(self, aList):
162 i = 0
163 i = self.removeExcessWsFromLine(aList, i)
164 while i < len(aList):
165 if self.is_string_or_comment(aList, i):
166 i = self.skip_string_or_comment(aList, i)
167 elif self.match(aList, i, '\n'):
168 i += 1
169 i = self.removeExcessWsFromLine(aList, i)
170 else: i += 1
171 #@+node:ekr.20150514063305.141: *5* removeExessWsFromLine
172 def removeExcessWsFromLine(self, aList, i):
173 assert(i == 0 or aList[i - 1] == '\n')
174 i = self.skip_ws(aList, i) # Retain the leading whitespace.
175 while i < len(aList):
176 if self.is_string_or_comment(aList, i):
177 break # safe
178 elif self.match(aList, i, '\n'):
179 break
180 elif self.match(aList, i, ' ') or self.match(aList, i, '\t'):
181 # Replace all whitespace by one blank.
182 j = self.skip_ws(aList, i)
183 assert j > i
184 aList[i:j] = [' ']
185 i += 1 # make sure we don't go past a newline!
186 else: i += 1
187 return i
188 #@+node:ekr.20150514063305.142: *5* removeMatchingBrackets
189 def removeMatchingBrackets(self, aList, i):
190 j = self.skip_to_matching_bracket(aList, i)
191 if i < j < len(aList):
192 c = aList[j]
193 if c == ')' or c == ']' or c == '}':
194 del aList[j : j + 1]
195 del aList[i : i + 1]
196 return j - 1
197 return j + 1
198 return j
199 #@+node:ekr.20150514063305.143: *5* removeSemicolonsAtEndOfLines
200 def removeSemicolonsAtEndOfLines(self, aList):
201 i = 0
202 while i < len(aList):
203 if self.is_string_or_comment(aList, i):
204 i = self.skip_string_or_comment(aList, i)
205 elif aList[i] == ';':
206 j = self.skip_ws(aList, i + 1)
207 if (
208 j >= len(aList) or
209 self.match(aList, j, '\n') or
210 self.match(aList, j, '#') or
211 self.match(aList, j, "//")
212 ):
213 del aList[i]
214 else: i += 1
215 else: i += 1
216 #@+node:ekr.20150514063305.144: *5* removeTrailingWs
217 def removeTrailingWs(self, aList):
218 i = 0
219 while i < len(aList):
220 if self.is_ws(aList[i]):
221 j = i
222 i = self.skip_ws(aList, i)
223 assert j < i
224 if i >= len(aList) or aList[i] == '\n':
225 # print "removing trailing ws:", `i-j`
226 del aList[j:i]
227 i = j
228 else: i += 1
229 #@+node:ekr.20150514063305.145: *4* replace... & safe_replace
230 #@+node:ekr.20150514063305.146: *5* replace
231 def replace(self, aList, findString, changeString):
232 """
233 Replaces all occurances of findString by changeString.
234 changeString may be the empty string, but not None.
235 """
236 if not findString:
237 return
238 changeList = list(changeString)
239 i = 0
240 while i < len(aList):
241 if self.match(aList, i, findString):
242 aList[i : i + len(findString)] = changeList
243 i += len(changeList)
244 else:
245 i += 1
246 #@+node:ekr.20150514063305.147: *5* replaceComments
247 def replaceComments(self, aList):
248 i = 0
249 while i < len(aList):
250 # Loop invariant: j > progress at end.
251 progress = i
252 if self.match(aList, i, "//"):
253 aList[i : i + 2] = ['#']
254 j = self.skip_past_line(aList, i)
255 elif self.match(aList, i, "/*"):
256 j = self.skip_c_block_comment(aList, i)
257 k = i
258 while k - 1 >= 0 and aList[k - 1] in ' \t':
259 k -= 1
260 assert k == 0 or aList[k - 1] not in ' \t'
261 lws = ''.join(aList[k:i])
262 comment_body = ''.join(aList[i + 2 : j - 2])
263 comment_lines = g.splitLines(lws + comment_body)
264 comment_lines = self.munge_block_comment(comment_lines)
265 comment = '\n'.join(comment_lines) # A list of lines.
266 comment_list = list(comment) # A list of characters.
267 aList[k:j] = comment_list
268 j = k + len(comment_list)
269 progress = j - 1 # Disable the check below.
270 elif self.match(aList, i, '"') or self.match(aList, i, "'"):
271 j = self.skip_string(aList, i)
272 else:
273 j = i + 1
274 # Defensive programming.
275 if j == progress:
276 j += 1
277 assert j > progress
278 i = j
279 #@+node:ekr.20150514063305.148: *6* munge_block_comment
280 def munge_block_comment(self, comment_lines):
282 n = len(comment_lines)
283 assert n > 0
284 s = comment_lines[0]
285 junk, w = g.skip_leading_ws_with_indent(s, 0, tab_width=4)
286 if n == 1:
287 return [f"{' ' * (w - 1)}# {s.strip()}"]
288 junk, w = g.skip_leading_ws_with_indent(s, 0, tab_width=4)
289 result = []
290 for i, s in enumerate(comment_lines):
291 if s.strip():
292 result.append(f"{' ' * w}# {s.strip()}")
293 elif i == n - 1:
294 pass # Omit the line entirely.
295 else:
296 result.append('') # Add a blank line
297 return result
298 #@+node:ekr.20150514063305.149: *5* replaceSectionDefs
299 def replaceSectionDefs(self, aList):
300 """Replaces < < x > > = by @c (at the start of lines)."""
301 if not aList:
302 return
303 i = 0
304 j = self.is_section_def(aList[i])
305 if j > 0:
306 aList[i:j] = list("@c ")
307 while i < len(aList):
308 if self.is_string_or_comment(aList, i):
309 i = self.skip_string_or_comment(aList, i)
310 elif self.match(aList, i, "\n"):
311 i += 1
312 j = self.is_section_def(aList[i])
313 if j > i:
314 aList[i:j] = list("@c ")
315 else: i += 1
316 #@+node:ekr.20150514063305.150: *5* safe_replace
317 def safe_replace(self, aList, findString, changeString):
318 """
319 Replaces occurances of findString by changeString,
320 but only outside of C comments and strings.
321 changeString may be the empty string, but not None.
322 """
323 if not findString:
324 return
325 changeList = list(changeString)
326 i = 0
327 if findString[0].isalpha(): # use self.match_word
328 while i < len(aList):
329 if self.is_string_or_comment(aList, i):
330 i = self.skip_string_or_comment(aList, i)
331 elif self.match_word(aList, i, findString):
332 aList[i : i + len(findString)] = changeList
333 i += len(changeList)
334 else:
335 i += 1
336 else: #use self.match
337 while i < len(aList):
338 if self.match(aList, i, findString):
339 aList[i : i + len(findString)] = changeList
340 i += len(changeList)
341 else:
342 i += 1
343 #@+node:ekr.20150514063305.151: *4* skip
344 #@+node:ekr.20150514063305.152: *5* skip_c_block_comment
345 def skip_c_block_comment(self, s, i):
346 assert self.match(s, i, "/*")
347 i += 2
348 while i < len(s):
349 if self.match(s, i, "*/"):
350 return i + 2
351 i += 1
352 return i
353 #@+node:ekr.20150514063305.153: *5* skip_line
354 def skip_line(self, s, i):
355 while i < len(s) and s[i] != '\n':
356 i += 1
357 return i
358 #@+node:ekr.20150514063305.154: *5* skip_past_line
359 def skip_past_line(self, s, i):
360 while i < len(s) and s[i] != '\n':
361 i += 1
362 if i < len(s) and s[i] == '\n':
363 i += 1
364 return i
365 #@+node:ekr.20150514063305.155: *5* skip_past_word
366 def skip_past_word(self, s, i):
367 assert s[i].isalpha() or s[i] == '~'
368 # Kludge: this helps recognize dtors.
369 if s[i] == '~':
370 i += 1
371 while i < len(s):
372 ch = s[i]
373 if ch.isalnum() or ch == '_':
374 i += 1
375 else:
376 break
377 return i
378 #@+node:ekr.20150514063305.156: *5* skip_string
379 def skip_string(self, s, i):
380 delim = s[i] # handle either single or double-quoted strings
381 assert delim == '"' or delim == "'"
382 i += 1
383 while i < len(s):
384 if s[i] == delim:
385 return i + 1
386 if s[i] == '\\':
387 i += 2
388 else:
389 i += 1
390 return i
391 #@+node:ekr.20150514063305.157: *5* skip_string_or_comment
392 def skip_string_or_comment(self, s, i):
393 if self.match(s, i, "'") or self.match(s, i, '"'):
394 j = self.skip_string(s, i)
395 elif self.match(s, i, "//"):
396 j = self.skip_past_line(s, i)
397 elif self.match(s, i, "/*"):
398 j = self.skip_c_block_comment(s, i)
399 else:
400 assert False
401 return j
402 #@+node:ekr.20150514063305.158: *5* skip_to_matching_bracket
403 def skip_to_matching_bracket(self, s, i):
404 ch = s[i]
405 if ch == '(':
406 delim = ')'
407 elif ch == '{':
408 delim = '}'
409 elif ch == '[':
410 delim = ']'
411 else:
412 assert False
413 i += 1
414 while i < len(s):
415 ch = s[i]
416 if self.is_string_or_comment(s, i):
417 i = self.skip_string_or_comment(s, i)
418 elif ch == delim:
419 return i
420 elif ch == '(' or ch == '[' or ch == '{':
421 i = self.skip_to_matching_bracket(s, i)
422 i += 1 # skip the closing bracket.
423 else: i += 1
424 return i
425 #@+node:ekr.20150514063305.159: *5* skip_ws and skip_ws_and_nl
426 def skip_ws(self, aList, i):
427 while i < len(aList):
428 c = aList[i]
429 if c == ' ' or c == '\t':
430 i += 1
431 else: break
432 return i
434 def skip_ws_and_nl(self, aList, i):
435 while i < len(aList):
436 c = aList[i]
437 if c == ' ' or c == '\t' or c == '\n':
438 i += 1
439 else: break
440 return i
441 #@-others
442#@-<< class To_Python >>
444#@+others
445#@+node:ekr.20210830070921.1: ** function: convert_at_test_nodes
446def convert_at_test_nodes(c, converter, root, copy_tree=False): # pragma: no cover
447 """
448 Use converter.convert() to convert all the @test nodes in the
449 root's tree to children a new last top-level node.
450 """
451 if not root:
452 print('no root')
453 return
454 last = c.lastTopLevel()
455 target = last.insertAfter()
456 target.h = 'Converted nodes'
457 count = 0
458 for p in root.subtree():
459 if p.h.startswith('@test'):
460 converter.convert_node(c, p, target)
461 if copy_tree and p.hasChildren():
462 converter.copy_children(c, p, target.lastChild())
463 count += 1
464 target.expand()
465 c.redraw(target)
466 print(f"converted {count} @test nodes")
467#@+node:ekr.20160316111303.1: ** class ConvertCommandsClass
468class ConvertCommandsClass(BaseEditCommandsClass):
469 """Leo's file-conversion commands"""
471 def __init__(self, c):
472 """Ctor for EditCommandsClass class."""
473 # pylint: disable=super-init-not-called
474 self.c = c
476 #@+others
477 #@+node:ekr.20220105151235.1: *3* ccc.add-mypy-annotations
478 @cmd('add-mypy-annotations')
479 def addMypyAnnotations(self, event): # pragma: no cover
480 """
481 The add-mypy-annotations command adds mypy annotations to function and
482 method definitions based on naming conventions.
484 To use, select an @<file> node for a python external file and execute
485 add-mypy-annotations. The command rewrites the @<file> tree, adding
486 mypy annotations for untyped function/method arguments.
488 The command attempts no type analysis. It uses "Any" as the type of
489 functions and methods that do not specify a return type. As as special
490 case, the type of __init__ methods is "None".
492 @data add-mypy-annotations in leoSettings.leo contains a list of
493 key/value pairs. Keys are argument names (as used in Leo); values are
494 mypy type names.
496 This command adds annotations for kwargs that have a constant initial
497 value.
498 """
499 self.Add_Mypy_Annotations(self.c).add_annotations()
500 self.c.bodyWantsFocus()
501 #@+node:ekr.20220105152521.1: *4* class Add_Mypy_Annotations
502 class Add_Mypy_Annotations:
504 """A class that implements the add-mypy-annotations command."""
506 changed_lines = 0
507 tag = 'add-mypy-annotations'
508 types_d: Dict[str, str] = {} # Keys are argument names. Values are mypy types.
510 def __init__(self, c):
511 self.c = c
513 #@+others
514 #@+node:ekr.20220105154019.1: *5* ama.init_types_d
515 def init_types_d(self): # pragma: no cover
516 """Init the annotations dict."""
517 c, d, tag = self.c, self.types_d, self.tag
518 data = c.config.getData(tag)
519 if not data:
520 print(f"@data {tag} not found")
521 return
522 for s in data:
523 try:
524 key, val = s.split(',', 1)
525 if key in d:
526 print(f"{tag}: ignoring duplicate key: {s!r}")
527 else:
528 d[key] = val.strip()
529 except ValueError:
530 print(f"{tag}: ignoring invalid key/value pair: {s!r}")
531 #@+node:ekr.20220105154158.1: *5* ama.add_annotations
532 def add_annotations(self): # pragma: no cover
534 c, p, tag = self.c, self.c.p, self.tag
535 # Checks.
536 if not p.isAnyAtFileNode():
537 g.es_print(f"{tag}: not an @file node: {p.h}")
538 return
539 if not p.h.endswith('.py'):
540 g.es_print(f"{tag}: not a python file: {p.h}")
541 return
542 # Init.
543 self.init_types_d()
544 if not self.types_d:
545 print(f"{self.tag}: no types given")
546 return
547 try:
548 # Convert p and (recursively) all its descendants.
549 self.convert_node(p)
550 # Redraw.
551 c.expandAllSubheads(p)
552 c.treeWantsFocusNow()
553 except Exception:
554 g.es_exception()
555 #@+node:ekr.20220105155837.4: *5* ama.convert_node
556 def convert_node(self, p): # pragma: no cover
557 # Convert p.b into child.b
558 self.convert_body(p)
559 # Recursively create all descendants.
560 for child in p.children():
561 self.convert_node(child)
562 #@+node:ekr.20220105173331.1: *5* ama.convert_body
563 def convert_body(self, p):
564 """Convert p.b in place."""
565 c = self.c
566 if not p.b.strip():
567 return # pragma: no cover
568 s = self.def_pat.sub(self.do_def, p.b)
569 if p.b != s:
570 self.changed_lines += 1
571 if not g.unitTesting:
572 print(f"changed {p.h}") # pragma: no cover
573 p.setDirty()
574 c.setChanged()
575 p.b = s
576 #@+node:ekr.20220105174453.1: *5* ama.do_def
577 def_pat = re.compile(r'^([ \t]*)def[ \t]+(\w+)\s*\((.*?)\)(.*?):(.*?)\n', re.MULTILINE + re.DOTALL)
579 def do_def(self, m):
580 lws, name, args, return_val, tail = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)
581 args = self.do_args(args)
582 if not return_val.strip():
583 val_s = 'None' if name == '__init__' else 'Any'
584 return_val = f" -> {val_s}"
585 if not tail.strip():
586 tail = ''
587 return f"{lws}def {name}({args}){return_val}:{tail}\n"
588 #@+node:ekr.20220105174453.2: *5* ama.do_args
589 arg_pat = re.compile(r'(\s*[\*\w]+\s*)([:,=])?')
590 comment_pat = re.compile(r'(\s*#.*?\n)')
592 def do_args(self, args):
593 """Add type annotations."""
594 multiline = '\n' in args.strip()
595 comma = ',\n' if multiline else ', '
596 lws = ' ' * 4 if multiline else ''
597 result: List[str] = []
598 i = 0
599 while i < len(args):
600 rest = args[i:]
601 if not rest.strip():
602 break
603 # Handle comments following arguments.
604 if multiline and result:
605 m = self.comment_pat.match(rest)
606 if m:
607 comment = m.group(0)
608 i += len(comment)
609 last = result.pop()
610 result.append(f"{last.rstrip()} {comment.strip()}\n")
611 continue
612 m = self.arg_pat.match(rest)
613 if not m: # pragma: no cover
614 g.trace('==== bad args', i, repr(rest))
615 return args
616 name1, tail = m.group(1), m.group(2)
617 name = name1.strip()
618 i += len(name1)
619 if name == 'self':
620 # Don't annotate self.
621 result.append(f"{lws}{name}{comma}")
622 if i < len(args) and args[i] == ',':
623 i += 1
624 elif tail == ':':
625 arg, i = self.find_arg(args, i)
626 result.append(f"{lws}{name}: {arg}{comma}")
627 elif tail == '=':
628 arg, i = self.find_arg(args, i)
629 kind = self.kind(arg)
630 result.append(f"{lws}{name}: {kind}={arg}{comma}")
631 elif tail == ',':
632 kind = self.types_d.get(name.strip(), 'Any')
633 result.append(f"{lws}{name}: {kind}{comma}")
634 i += 1
635 else:
636 kind = self.types_d.get(name.strip(), 'Any')
637 result.append(f"{lws}{name}: {kind}{comma}")
638 s = ''.join(result)
639 if multiline:
640 s = '\n' + s
641 if not multiline and s.endswith(', '):
642 s = s[:-2]
643 return s
644 #@+node:ekr.20220105190332.1: *5* ama.find_arg
645 def find_arg(self, s, i):
646 """
647 Scan over type annotations or initializers.
649 Return (arg, j), the index of the character following the argument starting at s[i].
650 """
651 assert s[i] in ':=', (i, s[i], s)
652 i += 1
653 while i < len(s) and s[i] == ' ':
654 i += 1
655 i1 = i
656 level = 0 # Assume balanced parens, brackets and strings.
657 while i < len(s):
658 ch = s[i]
659 i += 1
660 if ch in '[{(':
661 level += 1
662 elif ch in ']}':
663 level -= 1
664 elif ch in '\'"':
665 i = g.skip_python_string(s, i - 1)
666 elif ch == ',' and level == 0:
667 # Skip the comma, but don't include it in the result.
668 break
669 assert level == 0, (level, i == len(s), s)
670 result = s[i1:i].strip()
671 if result.endswith(','):
672 result = result[:-1].strip()
673 return result, i
674 #@+node:ekr.20220105222028.1: *5* ama.kind
675 bool_pat = re.compile(r'(True|False)')
676 float_pat = re.compile(r'[0-9]*\.[0-9]*')
677 int_pat = re.compile(r'[0-9]+')
678 none_pat = re.compile(r'None')
679 string_pat = re.compile(r'[\'"].*[\'"]')
681 def kind(self, s):
682 """Return the kind of the initial value s."""
683 if self.bool_pat.match(s):
684 return 'bool'
685 if self.float_pat.match(s):
686 return 'float'
687 if self.int_pat.match(s):
688 return 'int'
689 if self.none_pat.match(s):
690 return 'Any'
691 if self.string_pat.match(s):
692 return 'str'
693 return 'Any' # pragma: no cover
694 #@-others
695 #@+node:ekr.20160316091843.1: *3* ccc.c-to-python
696 @cmd('c-to-python')
697 def cToPy(self, event): # pragma: no cover
698 """
699 The c-to-python command converts c or c++ text to python text.
700 The conversion is not perfect, but it eliminates a lot of tedious
701 text manipulation.
702 """
703 self.C_To_Python(self.c).go()
704 self.c.bodyWantsFocus()
705 #@+node:ekr.20150514063305.160: *4* class C_To_Python (To_Python)
706 class C_To_Python(To_Python): # pragma: no cover
707 #@+others
708 #@+node:ekr.20150514063305.161: *5* ctor & helpers (C_To_Python)
709 def __init__(self, c):
710 """Ctor for C_To_Python class."""
711 super().__init__(c)
712 #
713 # Internal state...
714 self.class_name = ''
715 # The class name for the present function. Used to modify ivars.
716 self.ivars = []
717 # List of ivars to be converted to self.ivar
718 self.get_user_types()
719 #@+node:ekr.20150514063305.162: *6* get_user_types (C_To_Python)
720 def get_user_types(self):
721 c = self.c
722 self.class_list = c.config.getData('c-to-python-class-list') or []
723 self.type_list = (
724 c.config.getData('c-to-python-type-list') or
725 ["char", "void", "short", "long", "int", "double", "float"]
726 )
727 aList = c.config.getData('c-to-python-ivars-dict')
728 if aList:
729 self.ivars_dict = self.parse_ivars_data(aList)
730 else:
731 self.ivars_dict = {}
732 #@+node:ekr.20150514063305.163: *6* parse_ivars_data
733 def parse_ivars_data(self, aList):
734 d: Dict[str, List[str]] = {}
735 key = None
736 aList = [z.strip() for z in aList if z.strip()]
737 for s in aList:
738 if s.endswith(':'):
739 key = s[:-1].strip()
740 elif key:
741 ivars = [z.strip() for z in s.split(',') if z.strip()]
742 aList = d.get(key, [])
743 aList.extend(ivars)
744 d[key] = aList
745 else:
746 g.error('invalid @data c-to-python-ivars-dict', repr(s))
747 return {}
748 return d
749 #@+node:ekr.20150514063305.164: *5* convertCodeList (C_To_Python) & helpers
750 def convertCodeList(self, aList):
751 r, sr = self.replace, self.safe_replace
752 # First...
753 r(aList, "\r", '')
754 # self.convertLeadingBlanks(aList) # Now done by indent.
755 # if leoFlag: replaceSectionDefs(aList)
756 self.mungeAllFunctions(aList)
757 # Next...
758 if 1:
759 # CC2 stuff:
760 sr(aList, "TRACEPB", "if trace: g.trace")
761 sr(aList, "TRACEPN", "if trace: g.trace")
762 sr(aList, "TRACEPX", "if trace: g.trace")
763 sr(aList, "TICKB", "if trace: g.trace")
764 sr(aList, "TICKN", "if trace: g.trace")
765 sr(aList, "TICKX", "if trace: g.trace")
766 sr(aList, "g.trace(ftag,", "g.trace(")
767 sr(aList, "ASSERT_TRACE", "assert")
768 sr(aList, "ASSERT", "assert")
769 sr(aList, " -> ", '.')
770 sr(aList, "->", '.')
771 sr(aList, " . ", '.')
772 sr(aList, "this.self", "self")
773 sr(aList, "{", '')
774 sr(aList, "}", '')
775 sr(aList, "#if", "if")
776 sr(aList, "#else", "else")
777 sr(aList, "#endif", '')
778 sr(aList, "else if", "elif")
779 sr(aList, "else", "else:")
780 sr(aList, "&&", " and ")
781 sr(aList, "||", " or ")
782 sr(aList, "TRUE", "True")
783 sr(aList, "FALSE", "False")
784 sr(aList, "NULL", "None")
785 sr(aList, "this", "self")
786 sr(aList, "try", "try:")
787 sr(aList, "catch", "except:")
788 # if leoFlag: sr(aList, "@code", "@c")
789 # Next...
790 self.handle_all_keywords(aList)
791 self.insert_not(aList)
792 self.removeSemicolonsAtEndOfLines(aList)
793 # after processing for keywords
794 # Last...
795 # if firstPart and leoFlag: removeLeadingAtCode(aList)
796 self.removeBlankLines(aList)
797 self.removeExcessWs(aList)
798 # your taste may vary: in Python I don't like extra whitespace
799 sr(aList, " :", ":")
800 sr(aList, ", ", ",")
801 sr(aList, " ,", ",")
802 sr(aList, " (", "(")
803 sr(aList, "( ", "(")
804 sr(aList, " )", ")")
805 sr(aList, ") ", ")")
806 sr(aList, "@language c", "@language python")
807 self.replaceComments(aList) # should follow all calls to safe_replace
808 self.removeTrailingWs(aList)
809 r(aList, "\t ", "\t") # happens when deleting declarations.
810 #@+node:ekr.20150514063305.165: *6* handle_all_keywords
811 def handle_all_keywords(self, aList):
812 """
813 converts if ( x ) to if x:
814 converts while ( x ) to while x:
815 """
816 i = 0
817 while i < len(aList):
818 if self.is_string_or_comment(aList, i):
819 i = self.skip_string_or_comment(aList, i)
820 elif (
821 self.match_word(aList, i, "if") or
822 self.match_word(aList, i, "while") or
823 self.match_word(aList, i, "for") or
824 self.match_word(aList, i, "elif")
825 ):
826 i = self.handle_keyword(aList, i)
827 else:
828 i += 1
829 # print "handAllKeywords2:", ''.join(aList)
830 #@+node:ekr.20150514063305.166: *7* handle_keyword
831 def handle_keyword(self, aList, i):
832 if self.match_word(aList, i, "if"):
833 i += 2
834 elif self.match_word(aList, i, "elif"):
835 i += 4
836 elif self.match_word(aList, i, "while"):
837 i += 5
838 elif self.match_word(aList, i, "for"):
839 i += 3
840 else:
841 assert False
842 # Make sure one space follows the keyword.
843 k = i
844 i = self.skip_ws(aList, i)
845 if k == i:
846 c = aList[i]
847 aList[i : i + 1] = [' ', c]
848 i += 1
849 # Remove '(' and matching ')' and add a ':'
850 if aList[i] == "(":
851 # Look ahead. Don't remove if we span a line.
852 j = self.skip_to_matching_bracket(aList, i)
853 k = i
854 found = False
855 while k < j and not found:
856 found = aList[k] == '\n'
857 k += 1
858 if not found:
859 j = self.removeMatchingBrackets(aList, i)
860 if i < j < len(aList):
861 ch = aList[j]
862 aList[j : j + 1] = [ch, ":", " "]
863 j = j + 2
864 return j
865 return i
866 #@+node:ekr.20150514063305.167: *6* mungeAllFunctions
867 def mungeAllFunctions(self, aList):
868 """Scan for a '{' at the top level that is preceeded by ')' """
869 prevSemi = 0 # Previous semicolon: header contains all previous text
870 i = 0
871 firstOpen = None
872 while i < len(aList):
873 progress = i
874 if self.is_string_or_comment(aList, i):
875 j = self.skip_string_or_comment(aList, i)
876 prevSemi = j
877 elif self.match(aList, i, '('):
878 if not firstOpen:
879 firstOpen = i
880 j = i + 1
881 elif self.match(aList, i, '#'):
882 # At this point, it is a preprocessor directive.
883 j = self.skip_past_line(aList, i)
884 prevSemi = j
885 elif self.match(aList, i, ';'):
886 j = i + 1
887 prevSemi = j
888 elif self.match(aList, i, "{"):
889 j = self.handlePossibleFunctionHeader(aList, i, prevSemi, firstOpen)
890 prevSemi = j
891 firstOpen = None # restart the scan
892 else:
893 j = i + 1
894 # Handle unusual cases.
895 if j <= progress:
896 j = progress + 1
897 assert j > progress
898 i = j
899 #@+node:ekr.20150514063305.168: *7* handlePossibleFunctionHeader
900 def handlePossibleFunctionHeader(self, aList, i, prevSemi, firstOpen):
901 """
902 Converts function header lines from c++ format to python format.
903 That is, converts
904 x1..nn w::y ( t1 z1,..tn zn) {
905 to
906 def y (z1,..zn): {
907 """
908 assert self.match(aList, i, "{")
909 prevSemi = self.skip_ws_and_nl(aList, prevSemi)
910 close = self.prevNonWsOrNlChar(aList, i)
911 if close < 0 or aList[close] != ')':
912 # Should not increase *Python* indent.
913 return 1 + self.skip_to_matching_bracket(aList, i)
914 if not firstOpen:
915 return 1 + self.skip_to_matching_bracket(aList, i)
916 close2 = self.skip_to_matching_bracket(aList, firstOpen)
917 if close2 != close:
918 return 1 + self.skip_to_matching_bracket(aList, i)
919 open_paren = firstOpen
920 assert aList[open_paren] == '('
921 head = aList[prevSemi:open_paren]
922 # do nothing if the head starts with "if", "for" or "while"
923 k = self.skip_ws(head, 0)
924 if k >= len(head) or not head[k].isalpha():
925 return 1 + self.skip_to_matching_bracket(aList, i)
926 kk = self.skip_past_word(head, k)
927 if kk > k:
928 headString = ''.join(head[k:kk])
929 # C keywords that might be followed by '{'
930 # print "headString:", headString
931 if headString in [
932 "class", "do", "for", "if", "struct", "switch", "while"]:
933 return 1 + self.skip_to_matching_bracket(aList, i)
934 args = aList[open_paren : close + 1]
935 k = 1 + self.skip_to_matching_bracket(aList, i)
936 body = aList[close + 1 : k]
937 head = self.massageFunctionHead(head)
938 args = self.massageFunctionArgs(args)
939 body = self.massageFunctionBody(body)
940 result = []
941 if head:
942 result.extend(head)
943 if args:
944 result.extend(args)
945 if body:
946 result.extend(body)
947 aList[prevSemi:k] = result
948 return prevSemi + len(result)
949 #@+node:ekr.20150514063305.170: *7* massageFunctionHead (sets .class_name)
950 def massageFunctionHead(self, head):
951 result: List[Any] = []
952 prevWord = []
953 self.class_name = ''
954 i = 0
955 while i < len(head):
956 i = self.skip_ws_and_nl(head, i)
957 if i < len(head) and head[i].isalpha():
958 result = []
959 j = self.skip_past_word(head, i)
960 prevWord = head[i:j]
961 i = j
962 # look for ::word2
963 i = self.skip_ws(head, i)
964 if self.match(head, i, "::"):
965 # Set the global to the class name.
966 self.class_name = ''.join(prevWord)
967 # print(class name:", self.class_name)
968 i = self.skip_ws(head, i + 2)
969 if i < len(head) and (head[i] == '~' or head[i].isalpha()):
970 j = self.skip_past_word(head, i)
971 if head[i:j] == prevWord:
972 result.extend('__init__')
973 elif head[i] == '~' and head[i + 1 : j] == prevWord:
974 result.extend('__del__')
975 else:
976 # result.extend(list('::'))
977 result.extend(head[i:j])
978 i = j
979 else:
980 result.extend(prevWord)
981 else: i += 1
982 finalResult = list("def ")
983 finalResult.extend(result)
984 return finalResult
985 #@+node:ekr.20150514063305.169: *7* massageFunctionArgs
986 def massageFunctionArgs(self, args):
987 assert args[0] == '('
988 assert args[-1] == ')'
989 result = ['(']
990 lastWord = []
991 if self.class_name:
992 for item in list("self,"):
993 result.append(item) #can put extra comma
994 i = 1
995 while i < len(args):
996 i = self.skip_ws_and_nl(args, i)
997 ch = args[i]
998 if ch.isalpha():
999 j = self.skip_past_word(args, i)
1000 lastWord = args[i:j]
1001 i = j
1002 elif ch == ',' or ch == ')':
1003 for item in lastWord:
1004 result.append(item)
1005 if lastWord and ch == ',':
1006 result.append(',')
1007 lastWord = []
1008 i += 1
1009 else: i += 1
1010 if result[-1] == ',':
1011 del result[-1]
1012 result.append(')')
1013 result.append(':')
1014 # print "new args:", ''.join(result)
1015 return result
1016 #@+node:ekr.20150514063305.171: *7* massageFunctionBody & helpers
1017 def massageFunctionBody(self, body):
1018 body = self.massageIvars(body)
1019 body = self.removeCasts(body)
1020 body = self.removeTypeNames(body)
1021 body = self.dedentBlocks(body)
1022 return body
1023 #@+node:ekr.20150514063305.172: *8* dedentBlocks
1024 def dedentBlocks(self, body):
1025 """Look for '{' preceded by '{' or '}' or ';'
1026 (with intervening whitespace and comments).
1027 """
1028 i = 0
1029 while i < len(body):
1030 j = i
1031 ch = body[i]
1032 if self.is_string_or_comment(body, i):
1033 j = self.skip_string_or_comment(body, i)
1034 elif ch in '{};':
1035 # Look ahead ofr '{'
1036 j += 1
1037 while True:
1038 k = j
1039 j = self.skip_ws_and_nl(body, j)
1040 if self.is_string_or_comment(body, j):
1041 j = self.skip_string_or_comment(body, j)
1042 if k == j:
1043 break
1044 assert k < j
1045 if self.match(body, j, '{'):
1046 k = j
1047 j = self.skip_to_matching_bracket(body, j)
1048 m = '# <Start dedented block>...'
1049 body[k : k + 1] = list(m)
1050 j += len(m)
1051 while k < j:
1052 progress = k
1053 if body[k] == '\n':
1054 k += 1
1055 spaces = 0
1056 while spaces < 4 and k < j:
1057 if body[k] == ' ':
1058 spaces += 1
1059 k += 1
1060 else:
1061 break
1062 if spaces > 0:
1063 del body[k - spaces : k]
1064 k -= spaces
1065 j -= spaces
1066 else:
1067 k += 1
1068 assert progress < k
1069 m = ' # <End dedented block>'
1070 body[j : j + 1] = list(m)
1071 j += len(m)
1072 else:
1073 j = i + 1
1074 # Defensive programming.
1075 if i == j:
1076 j += 1
1077 assert i < j
1078 i = j
1079 return body
1080 #@+node:ekr.20150514063305.173: *8* massageIvars
1081 def massageIvars(self, body):
1082 ivars = self.ivars_dict.get(self.class_name, [])
1083 i = 0
1084 while i < len(body):
1085 if self.is_string_or_comment(body, i):
1086 i = self.skip_string_or_comment(body, i)
1087 elif body[i].isalpha():
1088 j = self.skip_past_word(body, i)
1089 word = ''.join(body[i:j])
1090 # print "looking up:", word
1091 if word in ivars:
1092 # replace word by self.word
1093 # print "replacing", word, " by self.", word
1094 word = "self." + word
1095 word = list(word) # type:ignore
1096 body[i:j] = word
1097 delta = len(word) - (j - i)
1098 i = j + delta
1099 else: i = j
1100 else: i += 1
1101 return body
1102 #@+node:ekr.20150514063305.174: *8* removeCasts
1103 def removeCasts(self, body):
1104 i = 0
1105 while i < len(body):
1106 if self.is_string_or_comment(body, i):
1107 i = self.skip_string_or_comment(body, i)
1108 elif self.match(body, i, '('):
1109 start = i
1110 i = self.skip_ws(body, i + 1)
1111 if body[i].isalpha():
1112 j = self.skip_past_word(body, i)
1113 word = ''.join(body[i:j])
1114 i = j
1115 if word in self.class_list or word in self.type_list:
1116 i = self.skip_ws(body, i)
1117 while self.match(body, i, '*'):
1118 i += 1
1119 i = self.skip_ws(body, i)
1120 if self.match(body, i, ')'):
1121 i += 1
1122 # print "removing cast:", ''.join(body[start:i])
1123 del body[start:i]
1124 i = start
1125 else: i += 1
1126 return body
1127 #@+node:ekr.20150514063305.175: *8* removeTypeNames
1128 def removeTypeNames(self, body):
1129 """Do _not_ remove type names when preceeded by new."""
1130 i = 0
1131 while i < len(body):
1132 if self.is_string_or_comment(body, i):
1133 i = self.skip_string_or_comment(body, i)
1134 elif self.match_word(body, i, "new"):
1135 i = self.skip_past_word(body, i)
1136 i = self.skip_ws(body, i)
1137 # don't remove what follows new.
1138 if body[i].isalpha():
1139 i = self.skip_past_word(body, i)
1140 elif body[i].isalpha():
1141 j = self.skip_past_word(body, i)
1142 word = ''.join(body[i:j])
1143 if word in self.class_list or word in self.type_list:
1144 j = self.skip_ws(body, j)
1145 while self.match(body, j, '*'):
1146 j += 1
1147 # print "Deleting type name:", ''.join(body[i:j])
1148 j = self.skip_ws(body, j)
1149 del body[i:j]
1150 else:
1151 i = j
1152 else: i += 1
1153 return body
1154 #@-others
1155 #@+node:ekr.20160111190632.1: *3* ccc.makeStubFiles
1156 @cmd('make-stub-files')
1157 def make_stub_files(self, event): # pragma: no cover
1158 """
1159 Make stub files for all nearby @<file> nodes.
1160 Take configuration settings from @x stub-y nodes.
1161 """
1162 #@+others
1163 #@+node:ekr.20160213070235.1: *4* class MakeStubFileAdapter
1164 class MakeStubFileAdapter: # pragma: no cover
1165 """
1166 An class that adapts leo/external/make_stub_files.py to Leo.
1168 Settings are taken from Leo settings nodes, not a .cfg file.
1169 """
1170 #@+others
1171 #@+node:ekr.20160213070235.2: *5* msf.ctor & helpers
1172 def __init__(self, c):
1173 """MakeStubFile.ctor. From StandAloneMakeStubFile.ctor."""
1174 self.c = c
1175 self.msf = msf = g.import_module('make_stub_files')
1176 x = msf.StandAloneMakeStubFile() # x is used *only* to init ivars.
1177 # Ivars set on the command line...
1178 self.config_fn = None
1179 self.enable_unit_tests = False
1180 self.files = [] # May also be set in the config file.
1181 self.output_directory = self.finalize(
1182 c.config.getString('stub-output-directory') or '.')
1183 self.output_fn = None
1184 self.overwrite = c.config.getBool('stub-overwrite', default=False)
1185 self.trace_matches = c.config.getBool(
1186 'stub-trace-matches', default=False)
1187 self.trace_patterns = c.config.getBool(
1188 'stub-trace-patterns', default=False)
1189 self.trace_reduce = c.config.getBool('stub-trace-reduce', default=False)
1190 self.trace_visitors = c.config.getBool(
1191 'stub-trace-visitors', default=False)
1192 self.update_flag = c.config.getBool('stub-update', default=False)
1193 self.verbose = c.config.getBool('stub-verbose', default=False)
1194 self.warn = c.config.getBool('stub-warn', default=False)
1195 # Pattern lists & dicts, set by config sections...
1196 self.patterns_dict = {}
1197 self.names_dict = {}
1198 self.def_patterns = self.scan_patterns('stub-def-name-patterns')
1199 self.general_patterns = self.scan_patterns('stub-general-patterns')
1200 self.prefix_lines = self.scan('stub-prefix-lines')
1201 self.regex_patterns = self.scan_patterns('stub-regex-patterns')
1202 # Complete the dicts.
1203 x.make_patterns_dict()
1204 self.patterns_dict = x.patterns_dict
1205 self.op_name_dict = x.op_name_dict = x.make_op_name_dict()
1206 # Copy the ivars.
1207 x.def_patterns = self.def_patterns
1208 x.general_patterns = self.general_patterns
1209 x.regex_patterns = self.regex_patterns
1210 x.prefix_lines = self.prefix_lines
1211 #@+node:ekr.20160213070235.3: *6* msf.scan
1212 def scan(self, kind):
1213 """Return a list of *all* lines from an @data node, including comments."""
1214 c = self.c
1215 aList = c.config.getData(kind, strip_comments=False, strip_data=False)
1216 if not aList:
1217 g.trace(f"warning: no @data {kind} node")
1218 return aList
1219 #@+node:ekr.20160213070235.4: *6* msf.scan_d
1220 def scan_d(self, kind):
1221 """Return a dict created from an @data node of the given kind."""
1222 c = self.c
1223 aList = c.config.getData(kind, strip_comments=True, strip_data=True)
1224 d = {}
1225 if aList is None:
1226 g.trace(f"warning: no @data {kind} node")
1227 for s in aList or []:
1228 name, value = s.split(':', 1)
1229 d[name.strip()] = value.strip()
1230 return d
1231 #@+node:ekr.20160213070235.5: *6* msf.scan_patterns
1232 def scan_patterns(self, kind):
1233 """Parse the config section into a list of patterns, preserving order."""
1234 d = self.scan_d(kind)
1235 aList = []
1236 seen = set()
1237 for key in d:
1238 value = d.get(key)
1239 if key in seen:
1240 g.trace('duplicate key', key)
1241 else:
1242 seen.add(key)
1243 aList.append(self.msf.Pattern(key, value))
1244 return aList
1245 #@+node:ekr.20160213070235.6: *5* msf.finalize
1246 def finalize(self, fn):
1247 """Finalize and regularize a filename."""
1248 return g.os_path_normpath(g.os_path_abspath(g.os_path_expanduser(fn)))
1249 #@+node:ekr.20160213070235.7: *5* msf.make_stub_file
1250 def make_stub_file(self, p):
1251 """Make a stub file in ~/stubs for the @<file> node at p."""
1252 import ast
1253 assert p.isAnyAtFileNode()
1254 c = self.c
1255 fn = p.anyAtFileNodeName()
1256 if not fn.endswith('.py'):
1257 g.es_print('not a python file', fn)
1258 return
1259 abs_fn = g.fullPath(c, p)
1260 if not g.os_path_exists(abs_fn):
1261 g.es_print('not found', abs_fn)
1262 return
1263 if g.os_path_exists(self.output_directory):
1264 base_fn = g.os_path_basename(fn)
1265 out_fn = g.os_path_finalize_join(self.output_directory, base_fn)
1266 else:
1267 g.es_print('not found', self.output_directory)
1268 return
1269 out_fn = out_fn[:-3] + '.pyi'
1270 out_fn = g.os_path_normpath(out_fn)
1271 self.output_fn = out_fn # compatibility with stand-alone script
1272 s = open(abs_fn).read()
1273 node = ast.parse(s, filename=fn, mode='exec')
1274 # Make the traverser *after* creating output_fn and output_directory ivars.
1275 x = self.msf.StubTraverser(controller=self)
1276 x.output_fn = self.output_fn
1277 x.output_directory = self.output_directory
1278 x.trace_matches = self.trace_matches
1279 x.trace_patterns = self.trace_patterns
1280 x.trace_reduce = self.trace_reduce
1281 x.trace_visitors = self.trace_visitors
1282 x.run(node)
1283 #@+node:ekr.20160213070235.8: *5* msf.run
1284 def run(self, p):
1285 """Make stub files for all files in p's tree."""
1286 if p.isAnyAtFileNode():
1287 self.make_stub_file(p)
1288 return
1289 # First, look down tree.
1290 after, p2 = p.nodeAfterTree(), p.firstChild()
1291 found = False
1292 while p2 and p != after:
1293 if p2.isAnyAtFileNode():
1294 self.make_stub_file(p2)
1295 p2.moveToNext()
1296 found = True
1297 else:
1298 p2.moveToThreadNext()
1299 if not found:
1300 # Look up the tree.
1301 for p2 in p.parents():
1302 if p2.isAnyAtFileNode():
1303 self.make_stub_file(p2)
1304 break
1305 else:
1306 g.es('no files found in tree:', p.h)
1307 #@-others
1308 #@-others
1309 MakeStubFileAdapter(self.c).run(self.c.p)
1310 #@+node:ekr.20160316091923.1: *3* ccc.python-to-coffeescript
1311 @cmd('python-to-coffeescript')
1312 def python2coffeescript(self, event): # pragma: no cover
1313 """
1314 Converts python text to coffeescript text. The conversion is not
1315 perfect, but it eliminates a lot of tedious text manipulation.
1316 """
1317 #@+others
1318 #@+node:ekr.20160316092837.1: *4* class Python_To_Coffeescript_Adapter
1319 class Python_To_Coffeescript_Adapter: # pragma: no cover
1320 """An interface class between Leo and leo/external/py2cs.py."""
1321 #@+others
1322 #@+node:ekr.20160316112717.1: *5* py2cs.ctor
1323 def __init__(self, c):
1324 """Ctor for Python_To_Coffeescript_Adapter class."""
1325 self.c = c
1326 self.files = []
1327 self.output_directory = self.finalize(
1328 c.config.getString('py2cs-output-directory'))
1329 # self.output_fn = None
1330 self.overwrite = c.config.getBool('py2cs-overwrite', default=False)
1331 # Connect to the external module.
1332 self.py2cs = g.import_module('leo.external.py2cs')
1333 #@+node:ekr.20160316093019.1: *5* py2cs.main
1334 def main(self):
1335 """Main line for Python_To_CoffeeScript class."""
1336 if self.py2cs:
1337 self.run()
1338 else:
1339 g.es_print('can not load py2cs.py')
1340 #@+node:ekr.20160316094011.7: *5* py2cs.finalize
1341 def finalize(self, fn):
1342 """Finalize and regularize a filename."""
1343 return g.os_path_normpath(g.os_path_abspath(g.os_path_expanduser(fn)))
1344 #@+node:ekr.20160316094011.8: *5* py2cs.to_coffeescript
1345 def to_coffeescript(self, p):
1346 """Convert the @<file> node at p to a .coffee file."""
1347 assert p.isAnyAtFileNode()
1348 c = self.c
1349 fn = p.anyAtFileNodeName()
1350 if not fn.endswith('.py'):
1351 g.es_print('not a python file', fn)
1352 return
1353 abs_fn = g.fullPath(c, p)
1354 if not g.os_path_exists(abs_fn):
1355 g.es_print('not found', abs_fn)
1356 return
1357 if g.os_path_exists(self.output_directory):
1358 base_fn = g.os_path_basename(fn)
1359 out_fn = g.os_path_finalize_join(self.output_directory, base_fn)
1360 else:
1361 g.es_print('not found', self.output_directory)
1362 return
1363 out_fn = out_fn[:-3] + '.coffee'
1364 out_fn = g.os_path_normpath(out_fn)
1365 s = open(abs_fn).read()
1366 # s = self.strip_sentinels(s)
1367 if 0:
1368 for z in g.splitLines(s)[:20]:
1369 print(z.rstrip())
1370 x = self.py2cs.MakeCoffeeScriptController()
1371 # copy ivars and run.
1372 x.enable_unit_tests = False
1373 x.files = [abs_fn,]
1374 x.output_directory = self.output_directory
1375 x.overwrite = self.overwrite
1376 x.make_coffeescript_file(abs_fn, s=s)
1377 #@+node:ekr.20160316094011.9: *5* py2cs.run
1378 def run(self):
1379 """Create .coffee files for all @<file> nodes in p's tree."""
1380 p = c.p
1381 if p.isAnyAtFileNode():
1382 self.to_coffeescript(p)
1383 return
1384 # First, look down tree.
1385 after, p2 = p.nodeAfterTree(), p.firstChild()
1386 found = False
1387 while p2 and p != after:
1388 if p2.isAnyAtFileNode():
1389 self.to_coffeescript(p2)
1390 p2.moveToNext()
1391 found = True
1392 else:
1393 p2.moveToThreadNext()
1394 if not found:
1395 # Look up the tree.
1396 for p2 in p.parents():
1397 if p2.isAnyAtFileNode():
1398 self.to_coffeescript(p2)
1399 return
1400 g.es_print('no files found in tree:', p.h)
1401 #@+node:ekr.20160316141812.1: *5* py2cs.strip_sentinels
1402 def strip_sentinels(self, s):
1403 """
1404 Strip s of all sentinel lines.
1405 This may be dubious because it destroys outline structure.
1406 """
1407 delims = ['#', None, None]
1408 return ''.join(
1409 [z for z in g.splitLines(s) if not g.is_sentinel(z, delims)])
1410 #@-others
1411 #@-others
1412 c = self.c
1413 Python_To_Coffeescript_Adapter(c).main()
1414 c.bodyWantsFocus()
1415 #@+node:ekr.20211013080132.1: *3* ccc.python-to-typescript
1416 @cmd('python-to-typescript')
1417 def pythonToTypescriptCommand(self, event): # pragma: no cover
1418 """
1419 The python-to-typescript command converts python to typescript text.
1420 The conversion is not perfect, but it eliminates a lot of tedious text
1421 manipulation.
1423 To use, select any @<file> node and execute python-to-typescript. The
1424 command creates (safe!) results in the last top-level node of the
1425 outline.
1427 The command attempts no type analysis. It uses "void" as the type of
1428 all functions and methods. However, the script will annotate
1429 function/method arguments:
1431 @data python-to-typescript-types in leoSettings.leo contains a list of
1432 key/value pairs. Keys are argument names (as used in Leo); values are
1433 typescript type names.
1434 """
1435 c = self.c
1436 self.PythonToTypescript(c).convert(c.p)
1437 self.c.bodyWantsFocus()
1438 #@+node:ekr.20211013080132.2: *4* class PythonToTypescript
1439 #@@nobeautify
1440 class PythonToTypescript: # pragma: no cover
1442 # The handlers are clear as they are.
1443 # pylint: disable=no-else-return
1445 # Keys are argument names. Values are typescript types.
1446 # Typescript can infer types of initialized kwargs.
1447 types_d: Dict[str, str] = {}
1449 #@+others
1450 #@+node:ekr.20211020162251.1: *5* py2ts.ctor
1451 def __init__(self, c, alias=None):
1452 self.c = c
1453 self.alias = alias # For scripts. An alias for 'self'.
1454 data = c.config.getData('python-to-typescript-types') or []
1455 for line in data:
1456 try:
1457 key, value = line.split(',')
1458 self.types_d[key.strip()] = value.strip()
1459 except Exception:
1460 g.es_print('ignoring bad key/value pair in @data python-to-typescript-types')
1461 g.es_print(repr(line))
1462 #@+node:ekr.20211013081549.1: *5* py2ts.convert
1463 def convert(self, p):
1464 """
1465 The main line.
1467 Convert p and all descendants as a child of a new last top-level node.
1468 """
1469 c = self.c
1470 # Create the parent node. It will be deleted.
1471 parent = c.lastTopLevel().insertAfter()
1472 # Convert p and all its descendants.
1473 try:
1474 self.convert_node(p, parent)
1475 # Promote the translated node.
1476 parent.promote()
1477 parent.doDelete()
1478 p = c.lastTopLevel()
1479 p.h = p.h.replace('.py', '.ts').replace('@', '@@')
1480 c.redraw(p)
1481 c.expandAllSubheads(p)
1482 c.treeWantsFocusNow()
1483 except Exception:
1484 g.es_exception()
1485 #@+node:ekr.20211013101327.1: *5* py2ts.convert_node
1486 def convert_node(self, p, parent):
1487 # Create a copy of p as the last child of parent.
1488 target = parent.insertAsLastChild()
1489 target.h = p.h # The caller will rename this node.
1490 # Convert p.b into child.b
1491 self.convert_body(p, target)
1492 # Recursively create all descendants.
1493 for child in p.children():
1494 self.convert_node(child, target)
1495 #@+node:ekr.20211013102209.1: *5* py2ts.convert_body, handlers &helpers
1496 patterns: Optional[Tuple] = None
1498 def convert_body(self, p, target):
1499 """
1500 Convert p.b into target.b.
1502 This is the heart of the algorithm.
1503 """
1504 # Calculate this table only once.
1505 if not self.patterns:
1506 self.patterns = (
1507 # Head: order matters.
1508 (self.comment_pat, self.do_comment),
1509 (self.docstring_pat, self.do_docstring),
1510 (self.section_ref_pat, self.do_section_ref),
1511 # Middle: order doesn't matter.
1512 (self.class_pat, self.do_class),
1513 (self.def_pat, self.do_def),
1514 (self.elif_pat, self.do_elif),
1515 (self.else_pat, self.do_else),
1516 (self.except_pat, self.do_except),
1517 (self.finally_pat, self.do_finally),
1518 (self.for_pat, self.do_for),
1519 (self.if_pat, self.do_if),
1520 (self.import_pat, self.do_import),
1521 (self.try_pat, self.do_try),
1522 (self.while_pat, self.do_while),
1523 (self.with_pat, self.do_with),
1524 # Tail: order matters.
1525 (self.trailing_comment_pat, self.do_trailing_comment)
1526 )
1527 # The loop may change lines, but each line is scanned only once.
1528 i, lines = 0, g.splitLines(self.pre_pass(p.b))
1529 old_lines = lines[:]
1530 while i < len(lines):
1531 progress = i
1532 line = lines[i]
1533 for (pattern, handler) in self.patterns:
1534 m = pattern.match(line)
1535 if m:
1536 i = handler(i, lines, m, p) # May change lines.
1537 break
1538 else:
1539 self.do_operators(i, lines, p)
1540 self.do_semicolon(i, lines, p)
1541 i += 1
1542 assert progress < i
1543 if False and g.unitTesting and lines != old_lines:
1544 print(f"\nchanged {p.h}:\n")
1545 for z in lines:
1546 print(z.rstrip())
1547 # Run the post-pass
1548 target.b = self.post_pass(lines)
1549 # Munge target.h.
1550 target.h = target.h.replace('__init__', 'constructor')
1551 #@+node:ekr.20211018154815.1: *6* handlers
1552 #@+node:ekr.20211014023141.1: *7* py2ts.do_class
1553 class_pat = re.compile(r'^([ \t]*)class(.*):(.*)\n')
1555 def do_class(self, i, lines, m, p):
1557 j = self.find_indented_block(i, lines, m, p)
1558 lws, base, tail = m.group(1), m.group(2).strip(), m.group(3).strip()
1559 base_s = f" {base} " if base else ''
1560 tail_s = f" // {tail}" if tail else ''
1561 lines[i] = f"{lws}class{base_s}{{{tail_s}\n"
1562 lines.insert(j, f"{lws}}}\n")
1563 return i + 1
1564 #@+node:ekr.20211013165615.1: *7* py2ts.do_comment
1565 comment_pat = re.compile(r'^([ \t]*)#(.*)\n')
1567 def do_comment(self, i, lines, m, p):
1568 """Handle a stand-alone comment line."""
1569 lws, comment = m.group(1), m.group(2).strip()
1570 if comment:
1571 lines[i] = f"{lws}// {comment}\n"
1572 else:
1573 lines[i] = '\n' # Write blank line for an empty comment.
1574 return i + 1
1575 #@+node:ekr.20211013130041.1: *7* py2ts.do_def & helper
1576 def_pat = re.compile(r'^([ \t]*)def[ \t]+([\w_]+)\s*\((.*)\):(.*)\n')
1577 this_pat = re.compile(r'^.*?\bthis\b') # 'self' has already become 'this'.
1579 def do_def(self, i, lines, m, p):
1581 j = self.find_indented_block(i, lines, m, p)
1582 lws, name, args, tail = m.group(1), m.group(2), m.group(3).strip(), m.group(4).strip()
1583 args = self.do_args(args)
1584 if name == '__init__':
1585 name = 'constructor'
1586 tail_s = f" // {tail}" if tail else ''
1587 # Use void as a placeholder type.
1588 type_s = ' ' if name == 'constructor' else ': void '
1589 function_s = ' ' if self.this_pat.match(lines[i]) else ' function '
1590 lines[i] = f"{lws}public{function_s}{name}({args}){type_s}{{{tail_s}\n"
1591 lines.insert(j, f"{lws}}}\n")
1592 return i + 1
1593 #@+node:ekr.20211014031722.1: *8* py2ts.do_args
1594 def do_args(self, args):
1595 """Add type annotations and remove the 'self' argument."""
1596 result = []
1597 for arg in (z.strip() for z in args.split(',')):
1598 # Omit the self arg.
1599 if arg != 'this': # Already converted.
1600 val = self.types_d.get(arg)
1601 result.append(f"{arg}: {val}" if val else arg)
1602 return ', '.join(result)
1603 #@+node:ekr.20211013165952.1: *7* py2ts.do_docstring
1604 docstring_pat = re.compile(r'^([ \t]*)r?("""|\'\'\')(.*)\n')
1606 def do_docstring(self, i, lines, m, p):
1607 """
1608 Convert a python docstring.
1610 Always use the full multi-line typescript format, even for single-line
1611 python docstrings.
1612 """
1613 lws, delim, docstring = m.group(1), m.group(2), m.group(3).strip()
1614 tail = docstring.replace(delim, '').strip()
1615 lines[i] = f"{lws}/**\n"
1616 if tail:
1617 lines.insert(i + 1, f"{lws} * {tail}\n")
1618 i += 1
1619 if delim in docstring:
1620 lines.insert(i + 1, f"{lws} */\n")
1621 return i + 2
1622 i += 1
1623 while i < len(lines):
1624 line = lines[i]
1625 # Buglet: ignores whatever might follow.
1626 tail = line.replace(delim, '').strip()
1627 # pylint: disable=no-else-return
1628 if delim in line:
1629 if tail:
1630 lines[i] = f"{lws} * {tail}\n"
1631 lines.insert(i + 1, f"{lws} */\n")
1632 return i + 2
1633 else:
1634 lines[i] = f"{lws} */\n"
1635 return i + 1
1636 elif tail:
1637 lines[i] = f"{lws} * {tail}\n"
1638 else:
1639 lines[i] = f"{lws} *\n"
1640 i += 1
1641 return i
1642 #@+node:ekr.20211014030113.1: *7* py2ts.do_except
1643 except_pat = re.compile(r'^([ \t]*)except(.*):(.*)\n')
1645 def do_except(self, i, lines, m, p):
1647 j = self.find_indented_block(i, lines, m, p)
1648 lws, error, tail = m.group(1), m.group(2).strip(), m.group(3).strip()
1649 tail_s = f" // {tail}" if tail else ''
1650 error_s = f" ({error}) " if error else ''
1651 lines[i] = f"{lws}catch{error_s}{{{tail_s}\n"
1652 lines.insert(j, f"{lws}}}\n")
1653 return i + 1
1654 #@+node:ekr.20211013141725.1: *7* py2ts.do_for
1655 for1_s = r'^([ \t]*)for[ \t]+(.*):(.*)\n' # for (cond):
1656 for2_s = r'^([ \t]*)for[ \t]*\((.*)\n' # for (
1658 for1_pat = re.compile(for1_s)
1659 for2_pat = re.compile(for2_s)
1660 for_pat = re.compile(fr"{for1_s}|{for2_s}") # Used by main loop.
1662 def do_for(self, i, lines, m, p):
1664 line = lines[i]
1665 m1 = self.for1_pat.match(line)
1666 m2 = self.for2_pat.match(line)
1667 if m1:
1668 j = self.find_indented_block(i, lines, m, p)
1669 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip()
1670 cond_s = cond if cond.startswith('(') else f"({cond})"
1671 tail_s = f" // {tail}" if tail else ''
1672 lines[i] = f"{lws}for {cond_s} {{{tail_s}\n"
1673 self.do_operators(i, lines, p)
1674 lines.insert(j, f"{lws}}}\n")
1675 return i + 1
1676 else:
1677 j = self.find_indented_block(i, lines, m2, p)
1678 # Generate the 'for' line.
1679 lws, tail = m2.group(1), m2.group(2).strip()
1680 tail_s = f" // {tail}" if tail else ''
1681 lines[i] = f"{lws}for ({tail_s}\n"
1682 # Tell do_semicolons that lines[i:j] are not statements.
1683 self.kill_semicolons(lines, i, j)
1684 # Assume line[j] closes the paren. Insert '{'
1685 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n'
1686 # Insert '}'
1687 k = self.find_indented_block(j, lines, m2, p)
1688 lines.insert(k, f"{lws}}}\n")
1689 return i + 1
1690 #@+node:ekr.20211017202104.1: *7* py2ts.do_import
1691 import_s = r'^([ \t]*)import[ \t]+(.*)\n'
1692 import_from_s = r'^([ \t]*)from[ \t]+(.*)[ \t]+import[ \t]+(.*)\n'
1693 import_pat = re.compile(fr"{import_s}|{import_from_s}") # Used by main loop.
1694 import1_pat = re.compile(import_s)
1695 import2_pat = re.compile(import_from_s)
1697 def do_import(self, i, lines, m, p):
1699 line = lines[i]
1700 m1 = self.import1_pat.match(line)
1701 m2 = self.import2_pat.match(line)
1702 # Comment out all imports.
1703 if m1:
1704 lws, import_list = m1.group(1), m1.group(2).strip()
1705 lines[i] = f'{lws}// import "{import_list}"\n'
1706 else:
1707 lws, module, import_list = m2.group(1), m2.group(2).strip(), m2.group(3).strip()
1708 lines[i] = f'{lws}// from "{module}" import {import_list}\n'
1709 return i + 1
1710 #@+node:ekr.20211014022432.1: *7* py2ts.do_elif
1711 elif1_s = r'^([ \t]*)elif[ \t]+(.*):(.*)\n' # elif (cond):
1712 elif2_s = r'^([ \t]*)elif[ \t]*\((.*)\n' # elif (
1714 elif1_pat = re.compile(elif1_s)
1715 elif2_pat = re.compile(elif2_s)
1716 elif_pat = re.compile(fr"{elif1_s}|{elif2_s}") # Used by main loop.
1718 def do_elif(self, i, lines, m, p):
1720 line = lines[i]
1721 m1 = self.elif1_pat.match(line)
1722 m2 = self.elif2_pat.match(line)
1723 if m1:
1724 j = self.find_indented_block(i, lines, m, p)
1725 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip()
1726 cond_s = cond if cond.startswith('(') else f"({cond})"
1727 tail_s = f" // {tail}" if tail else ''
1728 lines[i] = f"{lws}else if {cond_s} {{{tail_s}\n"
1729 lines.insert(j, f"{lws}}}\n")
1730 self.do_operators(i, lines, p)
1731 return i + 1
1732 else:
1733 j = self.find_indented_block(i, lines, m2, p)
1734 # Generate the 'else if' line.
1735 lws, tail = m2.group(1), m2.group(2).strip()
1736 tail_s = f" // {tail}" if tail else ''
1737 lines[i] = f"{lws}else if ({tail_s}\n"
1738 # Tell do_semicolons that lines[i:j] are not statements.
1739 self.kill_semicolons(lines, i, j)
1740 # Assume line[j] closes the paren. Insert '{'
1741 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n'
1742 # Insert '}'
1743 k = self.find_indented_block(j, lines, m2, p)
1744 lines.insert(k, f"{lws}}}\n")
1745 return i + 1
1747 #@+node:ekr.20211014022445.1: *7* py2ts.do_else
1748 else_pat = re.compile(r'^([ \t]*)else:(.*)\n')
1750 def do_else(self, i, lines, m, p):
1752 j = self.find_indented_block(i, lines, m, p)
1753 lws, tail = m.group(1), m.group(2).strip()
1754 tail_s = f" // {tail}" if tail else ''
1755 lines[i] = f"{lws}else {{{tail_s}\n"
1756 lines.insert(j, f"{lws}}}\n")
1757 return i + 1
1758 #@+node:ekr.20211014022453.1: *7* py2ts.do_finally
1759 finally_pat = re.compile(r'^([ \t]*)finally:(.*)\n')
1761 def do_finally(self, i, lines, m, p):
1763 j = self.find_indented_block(i, lines, m, p)
1764 lws, tail = m.group(1), m.group(2).strip()
1765 tail_s = f" // {tail}" if tail else ''
1766 lines[i] = f"{lws}finally {{{tail_s}\n"
1767 lines.insert(j, f"{lws}}}\n")
1768 return i + 1
1769 #@+node:ekr.20211013131016.1: *7* py2ts.do_if
1770 if1_s = r'^([ \t]*)if[ \t]+(.*):(.*)\n' # if (cond):
1771 if2_s = r'^([ \t]*)if[ \t]*\((.*)\n' # if (
1773 if1_pat = re.compile(if1_s)
1774 if2_pat = re.compile(if2_s)
1775 if_pat = re.compile(fr"{if1_s}|{if2_s}") # Used by main loop.
1777 def do_if(self, i, lines, m, p):
1779 line = lines[i]
1780 m1 = self.if1_pat.match(line)
1781 m2 = self.if2_pat.match(line)
1782 if m1:
1783 j = self.find_indented_block(i, lines, m1, p)
1784 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip()
1785 cond_s = cond if cond.startswith('(') else f"({cond})"
1786 tail_s = f" // {tail}" if tail else ''
1787 lines[i] = f"{lws}if {cond_s} {{{tail_s}\n"
1788 self.do_operators(i, lines, p)
1789 lines.insert(j, f"{lws}}}\n")
1790 return i + 1
1791 else:
1792 j = self.find_indented_block(i, lines, m2, p)
1793 # Generate the 'if' line.
1794 lws, tail = m2.group(1), m2.group(2).strip()
1795 tail_s = f" // {tail}" if tail else ''
1796 lines[i] = f"{lws}if ({tail_s}\n"
1797 # Tell do_semicolons that lines[i:j] are not statements.
1798 self.kill_semicolons(lines, i, j)
1799 # Assume line[j] closes the paren. Insert '{'
1800 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n'
1801 # Insert '}'
1802 k = self.find_indented_block(j, lines, m2, p)
1803 lines.insert(k, f"{lws}}}\n")
1804 return i + 1
1805 #@+node:ekr.20211018125503.1: *7* py2ts.do_section_ref
1806 section_ref_pat = re.compile(r"^([ \t]*)(\<\<.*?\>\>)\s*(.*)$")
1808 def do_section_ref(self, i, lines, m, p):
1809 # Handle trailing code.
1810 lws, section_name, tail = m.group(1), m.group(2), m.group(3).strip()
1811 if tail.startswith('#'):
1812 lines[i] = f"{lws}{section_name} // {tail[1:]}\n"
1813 return i + 1
1814 #@+node:ekr.20211014022506.1: *7* py2ts.do_try
1815 try_pat = re.compile(r'^([ \t]*)try:(.*)\n')
1817 def do_try(self, i, lines, m, p):
1819 j = self.find_indented_block(i, lines, m, p)
1820 lws, tail = m.group(1), m.group(2).strip()
1821 tail_s = f" // {tail}" if tail else ''
1822 lines[i] = f"{lws}try {{{tail_s}\n"
1823 lines.insert(j, f"{lws}}}\n")
1824 return i + 1
1825 #@+node:ekr.20211013141809.1: *7* py2ts.do_while
1826 while1_s = r'^([ \t]*)while[ \t]+(.*):(.*)\n' # while (cond):
1827 while2_s = r'^([ \t]*)while[ \t]*\((.*)\n' # while (
1829 while1_pat = re.compile(while1_s)
1830 while2_pat = re.compile(while2_s)
1831 while_pat = re.compile(fr"{while1_s}|{while2_s}") # Used by main loop.
1833 def do_while(self, i, lines, m, p):
1835 line = lines[i]
1836 m1 = self.while1_pat.match(line)
1837 m2 = self.while2_pat.match(line)
1838 if m1:
1839 j = self.find_indented_block(i, lines, m, p)
1840 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip()
1841 cond_s = cond if cond.startswith('(') else f"({cond})"
1842 tail_s = f" // {tail}" if tail else ''
1843 lines[i] = f"{lws}while {cond_s} {{{tail_s}\n"
1844 self.do_operators(i, lines, p)
1845 lines.insert(j, f"{lws}}}\n")
1846 return i + 1
1847 else:
1848 j = self.find_indented_block(i, lines, m2, p)
1849 # Generate the 'while' line.
1850 lws, tail = m2.group(1), m2.group(2).strip()
1851 tail_s = f" // {tail}" if tail else ''
1852 lines[i] = f"{lws}while ({tail_s}\n"
1853 # Tell do_semicolons that lines[i:j] are not statements.
1854 self.kill_semicolons(lines, i, j)
1855 # Assume line[j] closes the paren. Insert '{'
1856 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n'
1857 # Insert '}'
1858 k = self.find_indented_block(j, lines, m2, p)
1859 lines.insert(k, f"{lws}}}\n")
1860 return i + 1
1862 #@+node:ekr.20211014022554.1: *7* py2ts.do_with
1863 with_pat = re.compile(r'^([ \t]*)with(.*):(.*)\n')
1865 def do_with(self, i, lines, m, p):
1867 j = self.find_indented_block(i, lines, m, p)
1868 lws, clause, tail = m.group(1), m.group(2).strip(), m.group(3).strip()
1869 tail_s = f" // {tail}" if tail else ''
1870 clause_s = f" ({clause}) " if clause else ''
1871 lines[i] = f"{lws}with{clause_s}{{{tail_s}\n"
1872 lines.insert(j, f"{lws}}}\n")
1873 return i + 1
1874 #@+node:ekr.20211013172540.1: *7* py2ts.do_trailing_comment
1875 trailing_comment_pat = re.compile(r'^([ \t]*)(.*)#(.*)\n')
1877 def do_trailing_comment(self, i, lines, m, p):
1878 """
1879 Handle a trailing comment line.
1881 All other patterns have already been scanned on the line.
1882 """
1883 lws, statement, trailing_comment = m.group(1), m.group(2).rstrip(), m.group(3).strip()
1884 statement_s = f"{statement};" if self.ends_statement(i, lines) else statement
1885 lines[i] = f"{lws}{statement_s} // {trailing_comment}\n"
1886 return i + 1
1887 #@+node:ekr.20211022090919.1: *6* helpers
1888 #@+node:ekr.20211017210122.1: *7* py2ts.do_operators
1889 def do_operators(self, i, lines, p):
1891 # Regex replacements.
1892 table = (
1893 ('True', 'true'),
1894 ('False', 'false'),
1895 # ('None', 'null'), # Done in post-pass.
1896 ('default', 'default_val'),
1897 ('and', '&&'),
1898 ('or', '||'),
1899 ('is not', '!='),
1900 ('is', '=='),
1901 ('not', '!'),
1902 ('assert', '// assert'),
1903 )
1904 for a, b in table:
1905 lines[i] = re.sub(fr"\b{a}\b", b, lines[i])
1907 #@+node:ekr.20211017134103.1: *7* py2ts.do_semicolon
1908 def do_semicolon(self, i, lines, p):
1909 """
1910 Insert a semicolon in lines[i] is appropriate.
1912 No other handler has matched, so we know that the line:
1913 - Does not end in a comment.
1914 - Is not part of a docstring.
1915 """
1916 # Honor the flag inserted by kill_semicolons.
1917 flag = self.kill_semicolons_flag
1918 if lines[i].endswith(flag):
1919 lines[i] = lines[i].replace(flag, '\n')
1920 return
1921 # For now, use a maximal policy.
1922 if self.ends_statement(i, lines):
1923 lines[i] = f"{lines[i].rstrip()};\n"
1926 #@+node:ekr.20211017135603.1: *7* py2ts.ends_statement
1927 def ends_statement(self, i, lines):
1928 """
1929 Return True if lines[i] ends a statement.
1931 If so, the line should end with a semicolon,
1932 before any trailing comment, that is.
1933 """
1934 # https://stackoverflow.com/questions/38823062/
1935 s = lines[i].strip()
1936 next_line = lines[i + 1] if i + 1 < len(lines) else ''
1937 # Return False for blank lines.
1938 if not s:
1939 return False
1940 # Return False for Leo directives.
1941 if s.startswith('@'):
1942 return False
1943 # Return False for section references.
1944 i = s.find('<<')
1945 j = s.find('>>')
1946 if -1 < i < j:
1947 return False
1948 # Return False if this line ends in any of the following:
1949 if s.endswith(('{', '(', '[', ':', '||', '&&', '!', ',', '`')):
1950 return False
1951 # Return False if the next line starts with '{', '(', '['.
1952 if next_line.lstrip().startswith(('[', '(', '[', '&&', '||', '!')):
1953 return False
1954 # Return False for '}' lines.
1955 if s.startswith('}'):
1956 return False
1957 return True
1958 #@+node:ekr.20211013123001.1: *7* py2ts.find_indented_block
1959 lws_pat = re.compile(r'^([ \t]*)')
1961 def find_indented_block(self, i, lines, m, p):
1962 """Return j, the index of the line *after* the indented block."""
1963 # Scan for the first non-empty line with the same or less indentation.
1964 lws = m.group(1)
1965 j = i + 1
1966 while j < len(lines):
1967 line = lines[j]
1968 m2 = self.lws_pat.match(line)
1969 lws2 = m2.group(1)
1970 if line.strip() and len(lws2) <= len(lws):
1971 # Don't add a blank line at the end of a block.
1972 if j > 1 and not lines[j - 1].strip():
1973 j -= 1
1974 break
1975 j += 1
1976 return j
1978 #@+node:ekr.20211020101415.1: *7* py2ts.kill_semicolons
1979 kill_semicolons_flag = ' // **kill-semicolon**\n' # Must end with a newline.
1981 def kill_semicolons(self, lines, i, j):
1982 """
1983 Tell later calls to do_semicolon that lines[i : j] should *not* end with a semicolon.
1984 """
1985 for n in range(i, j):
1986 lines[n] = lines[n].rstrip() + self.kill_semicolons_flag
1987 #@+node:ekr.20211016214742.1: *7* py2ts.move_docstrings
1988 class_or_def_pat = re.compile(r'^(\s*)(public|class)\s+([\w_]+)')
1990 def move_docstrings(self, lines):
1991 """Move docstrings before the preceding class or def line."""
1992 i = 0
1993 while i < len(lines):
1994 m = self.class_or_def_pat.match(lines[i])
1995 i += 1
1996 if not m:
1997 continue
1998 # Set j to the start of the docstring.
1999 j = i
2000 while j < len(lines):
2001 if lines[j].strip():
2002 break
2003 j += 1
2004 if j >= len(lines):
2005 continue
2006 if not lines[j].strip().startswith('/**'):
2007 continue
2008 # Set k to the end of the docstring.
2009 k = j
2010 while k < len(lines) and '*/' not in lines[k]:
2011 k += 1
2012 if k >= len(lines):
2013 g.printObj(lines[i - 1 : len(lines) - 1], tag='OOPS')
2014 continue
2015 # Remove 4 blanks from the docstrings.
2016 for n in range(j, k + 1):
2017 if lines[n].startswith(' ' * 4):
2018 lines[n] = lines[n][4:]
2019 # Rearrange the lines.
2020 lines[i - 1 : k + 1] = lines[j : k + 1] + [lines[i - 1]]
2021 i = k + 1
2022 ### return lines
2023 #@+node:ekr.20211016200908.1: *7* py2ts.post_pass & helpers
2024 def post_pass(self, lines):
2026 # Munge lines in place
2027 self.move_docstrings(lines)
2028 self.do_f_strings(lines)
2029 self.do_ternary(lines)
2030 self.do_assignment(lines) # Do this last, so it doesn't add 'const' to inserted comments.
2031 s = (''.join(lines)
2032 .replace('@language python', '@language typescript')
2033 .replace(self.kill_semicolons_flag, '\n')
2034 )
2035 return re.sub(r'\bNone\b', 'null', s)
2038 #@+node:ekr.20211021061023.1: *8* py2ts.do_assignment
2039 assignment_pat = re.compile(r'^([ \t]*)(.*?)\s+=\s+(.*)$') # Require whitespace around the '='
2041 def do_assignment(self, lines):
2042 """Add const to all non-tuple assignments."""
2043 # Do this late so that we can test for the ending semicolon.
2045 # Suppression table.
2046 # Missing elements are likely to cause this method to generate '= ='.
2047 table = (
2048 ',', # Tuple assignment or mutli-line argument lists.
2049 '*', # A converted docstring.
2050 '`', # f-string.
2051 '//', # Comment.
2052 '=', # Condition.
2053 # Keywords that might be followed by '='
2054 'class', 'def', 'elif', 'for', 'if', 'print', 'public', 'return', 'with', 'while',
2055 )
2056 for i, s in enumerate(lines):
2057 m = self.assignment_pat.match(s)
2058 if m:
2059 lws, lhs, rhs = m.group(1), m.group(2), m.group(3).rstrip()
2060 if not any(z in lhs for z in table):
2061 lines[i] = f"{lws}const {lhs} = {rhs}\n"
2062 #@+node:ekr.20211020185016.1: *8* py2ts.do_f_strings
2063 f_string_pat = re.compile(r'([ \t]*)(.*?)f"(.*?)"(.*)$')
2065 def do_f_strings(self, lines):
2067 i = 0
2068 while i < len(lines):
2069 progress = i
2070 s = lines[i]
2071 m = self.f_string_pat.match(s)
2072 if not m:
2073 i += 1
2074 continue
2075 lws, head, string, tail = m.group(1), m.group(2), m.group(3), m.group(4).rstrip()
2076 string_s = (
2077 string.replace('{', '${') # Add the '$'
2078 .replace('! ', 'not ') # Undo erroneous replacement.
2079 )
2080 # Remove format strings. Not perfect, but seemingly good enough.
2081 string_s = re.sub(r'\:[0-9]\.+[0-9]+[frs]', '', string_s)
2082 string_s = re.sub(r'\![frs]', '', string_s)
2083 # A hack. If the fstring is on a line by itself, remove a trailing ';'
2084 if not head.strip() and tail.endswith(';'):
2085 tail = tail[:-1].strip()
2086 if 1: # Just replace the line.
2087 lines[i] = f"{lws}{head}`{string_s}`{tail.rstrip()}\n"
2088 i += 1
2089 else:
2090 # These comments quickly become annoying.
2091 # Add the original line as a comment as a check.
2092 lines[i] = f"{lws}// {s.strip()}\n"
2093 # Add the replacement line.
2094 lines.insert(i + 1, f"{lws}{head}`{string_s}`{tail.rstrip()}\n")
2095 i += 2
2096 assert i > progress
2097 #@+node:ekr.20211021051033.1: *8* py2ts.do_ternary
2098 ternary_pat1 = re.compile(r'^([ \t]*)(.*?)\s*=\s*(.*?) if (.*?) else (.*);$') # assignment
2099 ternary_pat2 = re.compile(r'^([ \t]*)return\s+(.*?) if (.*?) else (.*);$') # return statement
2101 def do_ternary(self, lines):
2103 i = 0
2104 while i < len(lines):
2105 progress = i
2106 s = lines[i]
2107 m1 = self.ternary_pat1.match(s)
2108 m2 = self.ternary_pat2.match(s)
2109 if m1:
2110 lws, target, a, cond, b = m1.group(1), m1.group(2), m1.group(3), m1.group(4), m1.group(5)
2111 lines[i] = f"{lws}// {s.strip()}\n"
2112 lines.insert(i + 1, f"{lws}{target} = {cond} ? {a} : {b};\n")
2113 i += 2
2114 elif m2:
2115 lws, a, cond, b = m2.group(1), m2.group(2), m2.group(3), m2.group(4)
2116 lines[i] = f"{lws}// {s.strip()}\n"
2117 lines.insert(i + 1, f"{lws}return {cond} ? {a} : {b};\n")
2118 i += 2
2119 else:
2120 i += 1
2121 assert progress < i
2122 #@+node:ekr.20211017044939.1: *7* py2ts.pre_pass
2123 def pre_pass(self, s):
2125 # Remove the python encoding lines.
2126 s = s.replace('@first # -*- coding: utf-8 -*-\n', '')
2128 # Replace 'self' by 'this' *everywhere*.
2129 s = re.sub(r'\bself\b', 'this', s)
2131 # Comment out @cmd decorators.
2132 s = re.sub(r"^@cmd(.*?)$", r'// @cmd\1\n', s, flags=re.MULTILINE)
2134 # Replace the alias for 'self' by 'this' *only* in specif contexts.
2135 # Do *not* replace the alias everywhere: that could do great harm.
2136 if self.alias:
2137 s = re.sub(fr"\b{self.alias}\.", 'this.', s)
2138 # Remove lines like `at = self`.
2139 s = re.sub(fr"^\s*{self.alias}\s*=\s*this\s*\n", '', s, flags=re.MULTILINE)
2140 # Remove lines like `at, c = self, self.c`.
2141 s = re.sub(
2142 fr"^(\s*){self.alias}\s*,\s*c\s*=\s*this,\s*this.c\n",
2143 r'\1c = this.c\n', # do_assignment adds const.
2144 s,
2145 flags=re.MULTILINE)
2146 # Remove lines like `at, p = self, self.p`.
2147 s = re.sub(fr"^(\s*){self.alias}\s*,\s*p\s*=\s*this,\s*this.p\n",
2148 r'\1p = this.p\n', # do_assignment adds const.
2149 s,
2150 flags=re.MULTILINE)
2151 # Do this last.
2152 s = re.sub(fr"\b{self.alias},", 'this,', s)
2153 return s
2154 #@-others
2155 #@+node:ekr.20160316091843.2: *3* ccc.typescript-to-py
2156 @cmd('typescript-to-py')
2157 def tsToPy(self, event): # pragma: no cover
2158 """
2159 The typescript-to-python command converts typescript text to python
2160 text. The conversion is not perfect, but it eliminates a lot of tedious
2161 text manipulation.
2162 """
2163 #@+others
2164 #@+node:ekr.20150514063305.176: *4* class TS_To_Python (To_Python)
2165 class TS_To_Python(To_Python): # pragma: no cover
2166 #@+others
2167 #@+node:ekr.20150514063305.177: *5* ctor (TS_To_Python)
2168 def __init__(self, c):
2169 """Ctor for TS_To_Python class."""
2170 super().__init__(c)
2171 self.class_name = ''
2172 # The class name for the present function. Used to modify ivars.
2173 #@+node:ekr.20150514063305.178: *5* convertCodeList (TS_To_Python) & helpers
2174 def convertCodeList(self, aList):
2175 r, sr = self.replace, self.safe_replace
2176 # First...
2177 r(aList, '\r', '')
2178 self.mungeAllFunctions(aList)
2179 self.mungeAllClasses(aList)
2180 # Second...
2181 sr(aList, ' -> ', '.')
2182 sr(aList, '->', '.')
2183 sr(aList, ' . ', '.')
2184 # sr(aList, 'this.self', 'self')
2185 sr(aList, '{', '')
2186 sr(aList, '}', '')
2187 sr(aList, 'else if', 'elif')
2188 sr(aList, 'else', 'else:')
2189 sr(aList, '&&', ' and ')
2190 sr(aList, '||', ' or ')
2191 sr(aList, 'true', 'True')
2192 sr(aList, 'false', 'False')
2193 sr(aList, 'null', 'None')
2194 sr(aList, 'this', 'self')
2195 sr(aList, 'try', 'try:')
2196 sr(aList, 'catch', 'except:')
2197 sr(aList, 'constructor', '__init__')
2198 sr(aList, 'new ', '')
2199 # sr(aList, 'var ','')
2200 # var usually indicates something weird, or an uninited var,
2201 # so it may be good to retain as a marker.
2202 # Third...
2203 self.handle_all_keywords(aList)
2204 self.insert_not(aList)
2205 self.removeSemicolonsAtEndOfLines(aList)
2206 # after processing for keywords
2207 self.comment_scope_ids(aList)
2208 # Last...
2209 self.removeBlankLines(aList)
2210 self.removeExcessWs(aList)
2211 # I usually don't like extra whitespace. YMMV.
2212 sr(aList, ' and ', ' and ')
2213 sr(aList, ' not ', ' not ')
2214 sr(aList, ' or ', ' or ')
2215 sr(aList, ' and ', ' and ')
2216 sr(aList, ' not ', ' not ')
2217 sr(aList, ' or ', ' or ')
2218 sr(aList, ' :', ':')
2219 sr(aList, ', ', ',')
2220 sr(aList, ' ,', ',')
2221 sr(aList, ' (', '(')
2222 sr(aList, '( ', '(')
2223 sr(aList, ' )', ')')
2224 sr(aList, ') ', ')')
2225 sr(aList, ' and(', ' and (')
2226 sr(aList, ' not(', ' not (')
2227 sr(aList, ' or(', ' or (')
2228 sr(aList, ')and ', ') and ')
2229 sr(aList, ')not ', ') not ')
2230 sr(aList, ')or ', ') or ')
2231 sr(aList, ')and(', ') and (')
2232 sr(aList, ')not(', ') not (')
2233 sr(aList, ')or(', ') or (')
2234 sr(aList, '@language javascript', '@language python')
2235 self.replaceComments(aList) # should follow all calls to safe_replace
2236 self.removeTrailingWs(aList)
2237 r(aList, '\t ', '\t') # happens when deleting declarations.
2238 #@+node:ekr.20150514063305.179: *6* comment_scope_ids
2239 def comment_scope_ids(self, aList):
2240 """convert (public|private|export) aLine to aLine # (public|private|export)"""
2241 scope_ids = ('public', 'private', 'export',)
2242 i = 0
2243 if any(self.match_word(aList, i, z) for z in scope_ids):
2244 i = self.handle_scope_keyword(aList, i)
2245 while i < len(aList):
2246 progress = i
2247 if self.is_string_or_comment(aList, i):
2248 i = self.skip_string_or_comment(aList, i)
2249 elif aList[i] == '\n':
2250 i += 1
2251 i = self.skip_ws(aList, i)
2252 if any(self.match_word(aList, i, z) for z in scope_ids):
2253 i = self.handle_scope_keyword(aList, i)
2254 else:
2255 i += 1
2256 assert i > progress
2257 # print "handAllKeywords2:", ''.join(aList)
2258 #@+node:ekr.20150514063305.180: *7* handle_scope_keyword
2259 def handle_scope_keyword(self, aList, i):
2260 i1 = i
2261 # pylint: disable=undefined-loop-variable
2262 for word in ('public', 'private', 'export'):
2263 if self.match_word(aList, i, word):
2264 i += len(word)
2265 break
2266 else:
2267 return None
2268 # Skip any following spaces.
2269 i2 = self.skip_ws(aList, i)
2270 # Scan to the next newline:
2271 i3 = self.skip_line(aList, i)
2272 # Optional: move the word to a trailing comment.
2273 comment: List[str] = list(f" # {word}") if False else []
2274 # Change the list in place.
2275 aList[i1:i3] = aList[i2:i3] + comment
2276 i = i1 + (i3 - i2) + len(comment)
2277 return i
2278 #@+node:ekr.20150514063305.181: *6* handle_all_keywords
2279 def handle_all_keywords(self, aList):
2280 """
2281 converts if ( x ) to if x:
2282 converts while ( x ) to while x:
2283 """
2284 statements = ('elif', 'for', 'if', 'while',)
2285 i = 0
2286 while i < len(aList):
2287 if self.is_string_or_comment(aList, i):
2288 i = self.skip_string_or_comment(aList, i)
2289 elif any(self.match_word(aList, i, z) for z in statements):
2290 i = self.handle_keyword(aList, i)
2291 # elif (
2292 # self.match_word(aList,i,"if") or
2293 # self.match_word(aList,i,"while") or
2294 # self.match_word(aList,i,"for") or
2295 # self.match_word(aList,i,"elif")
2296 # ):
2297 # i = self.handle_keyword(aList,i)
2298 else:
2299 i += 1
2300 # print "handAllKeywords2:", ''.join(aList)
2301 #@+node:ekr.20150514063305.182: *7* handle_keyword
2302 def handle_keyword(self, aList, i):
2303 if self.match_word(aList, i, "if"):
2304 i += 2
2305 elif self.match_word(aList, i, "elif"):
2306 i += 4
2307 elif self.match_word(aList, i, "while"):
2308 i += 5
2309 elif self.match_word(aList, i, "for"):
2310 i += 3
2311 else: assert False, 'not a keyword'
2312 # Make sure one space follows the keyword.
2313 k = i
2314 i = self.skip_ws(aList, i)
2315 if k == i:
2316 c = aList[i]
2317 aList[i : i + 1] = [' ', c]
2318 i += 1
2319 # Remove '(' and matching ')' and add a ':'
2320 if aList[i] == "(":
2321 # Look ahead. Don't remove if we span a line.
2322 j = self.skip_to_matching_bracket(aList, i)
2323 k = i
2324 found = False
2325 while k < j and not found:
2326 found = aList[k] == '\n'
2327 k += 1
2328 if not found:
2329 j = self.removeMatchingBrackets(aList, i)
2330 if i < j < len(aList):
2331 ch = aList[j]
2332 aList[j : j + 1] = [ch, ":", " "]
2333 j = j + 2
2334 return j
2335 return i
2336 #@+node:ekr.20150514063305.183: *6* mungeAllClasses
2337 def mungeAllClasses(self, aList):
2338 """Scan for a '{' at the top level that is preceeded by ')' """
2339 i = 0
2340 while i < len(aList):
2341 progress = i
2342 if self.is_string_or_comment(aList, i):
2343 i = self.skip_string_or_comment(aList, i)
2344 elif self.match_word(aList, i, 'class'):
2345 i1 = i
2346 i = self.skip_line(aList, i)
2347 aList[i - 1 : i] = list(f"{aList[i - 1]}:")
2348 s = ''.join(aList[i1:i])
2349 k = s.find(' extends ')
2350 if k > -1:
2351 k1 = k
2352 k = g.skip_id(s, k + 1)
2353 k = g.skip_ws(s, k)
2354 if k < len(s) and g.is_c_id(s[k]):
2355 k2 = g.skip_id(s, k)
2356 word = s[k:k2]
2357 aList[i1:i] = list(f"{s[:k1]} ({word})")
2358 elif self.match_word(aList, i, 'interface'):
2359 aList[i : i + len('interface')] = list('class')
2360 i = self.skip_line(aList, i)
2361 aList[i - 1 : i] = list(f"{aList[i - 1]}: # interface")
2362 i = self.skip_line(aList, i) # Essential.
2363 else:
2364 i += 1
2365 assert i > progress
2366 #@+node:ekr.20150514063305.184: *6* mungeAllFunctions & helpers
2367 def mungeAllFunctions(self, aList):
2368 """Scan for a '{' at the top level that is preceeded by ')' """
2369 prevSemi = 0 # Previous semicolon: header contains all previous text
2370 i = 0
2371 firstOpen = None
2372 while i < len(aList):
2373 progress = i
2374 if self.is_string_or_comment(aList, i):
2375 j = self.skip_string_or_comment(aList, i)
2376 prevSemi = j
2377 elif self.match(aList, i, '('):
2378 if not firstOpen:
2379 firstOpen = i
2380 j = i + 1
2381 elif self.match(aList, i, ';'):
2382 j = i + 1
2383 prevSemi = j
2384 elif self.match(aList, i, "{"):
2385 j = self.handlePossibleFunctionHeader(
2386 aList, i, prevSemi, firstOpen)
2387 prevSemi = j
2388 firstOpen = None # restart the scan
2389 else:
2390 j = i + 1
2391 # Handle unusual cases.
2392 if j <= progress:
2393 j = progress + 1
2394 assert j > progress
2395 i = j
2396 #@+node:ekr.20150514063305.185: *7* handlePossibleFunctionHeader
2397 def handlePossibleFunctionHeader(self, aList, i, prevSemi, firstOpen):
2398 """
2399 converts function header lines from typescript format to python format.
2400 That is, converts
2401 x1..nn w::y ( t1 z1,..tn zn) { C++
2402 (public|private|export) name (t1: z1, ... tn: zn {
2403 to
2404 def y (z1,..zn): { # (public|private|export)
2405 """
2406 assert self.match(aList, i, "{")
2407 prevSemi = self.skip_ws_and_nl(aList, prevSemi)
2408 close = self.prevNonWsOrNlChar(aList, i)
2409 if close < 0 or aList[close] != ')':
2410 # Should not increase *Python* indent.
2411 return 1 + self.skip_to_matching_bracket(aList, i)
2412 if not firstOpen:
2413 return 1 + self.skip_to_matching_bracket(aList, i)
2414 close2 = self.skip_to_matching_bracket(aList, firstOpen)
2415 if close2 != close:
2416 return 1 + self.skip_to_matching_bracket(aList, i)
2417 open_paren = firstOpen
2418 assert aList[open_paren] == '('
2419 head = aList[prevSemi:open_paren]
2420 # do nothing if the head starts with "if", "for" or "while"
2421 k = self.skip_ws(head, 0)
2422 if k >= len(head) or not head[k].isalpha():
2423 return 1 + self.skip_to_matching_bracket(aList, i)
2424 kk = self.skip_past_word(head, k)
2425 if kk > k:
2426 headString = ''.join(head[k:kk])
2427 # C keywords that might be followed by '{'
2428 # print "headString:", headString
2429 if headString in ["do", "for", "if", "struct", "switch", "while"]:
2430 return 1 + self.skip_to_matching_bracket(aList, i)
2431 args = aList[open_paren : close + 1]
2432 k = 1 + self.skip_to_matching_bracket(aList, i)
2433 body = aList[close + 1 : k]
2434 head = self.massageFunctionHead(head)
2435 args = self.massageFunctionArgs(args)
2436 body = self.massageFunctionBody(body)
2437 result = []
2438 if head:
2439 result.extend(head)
2440 if args:
2441 result.extend(args)
2442 if body:
2443 result.extend(body)
2444 aList[prevSemi:k] = result
2445 return prevSemi + len(result)
2446 #@+node:ekr.20150514063305.186: *7* massageFunctionArgs
2447 def massageFunctionArgs(self, args):
2448 assert args[0] == '('
2449 assert args[-1] == ')'
2450 result = ['(']
2451 lastWord = []
2452 if self.class_name:
2453 for item in list("self,"):
2454 result.append(item) #can put extra comma
2455 i = 1
2456 while i < len(args):
2457 i = self.skip_ws_and_nl(args, i)
2458 ch = args[i]
2459 if ch.isalpha():
2460 j = self.skip_past_word(args, i)
2461 lastWord = args[i:j]
2462 i = j
2463 elif ch == ',' or ch == ')':
2464 for item in lastWord:
2465 result.append(item)
2466 if lastWord and ch == ',':
2467 result.append(',')
2468 lastWord = []
2469 i += 1
2470 else: i += 1
2471 if result[-1] == ',':
2472 del result[-1]
2473 result.append(')')
2474 result.append(':')
2475 return result
2476 #@+node:ekr.20150514063305.187: *7* massageFunctionHead (sets .class_name)
2477 def massageFunctionHead(self, head):
2478 result: List[Any] = []
2479 prevWord = []
2480 self.class_name = ''
2481 i = 0
2482 while i < len(head):
2483 i = self.skip_ws_and_nl(head, i)
2484 if i < len(head) and head[i].isalpha():
2485 result = []
2486 j = self.skip_past_word(head, i)
2487 prevWord = head[i:j]
2488 i = j
2489 # look for ::word2
2490 i = self.skip_ws(head, i)
2491 if self.match(head, i, "::"):
2492 # Set the global to the class name.
2493 self.class_name = ''.join(prevWord)
2494 # print(class name:", self.class_name)
2495 i = self.skip_ws(head, i + 2)
2496 if i < len(head) and (head[i] == '~' or head[i].isalpha()):
2497 j = self.skip_past_word(head, i)
2498 if head[i:j] == prevWord:
2499 result.extend('__init__')
2500 elif head[i] == '~' and head[i + 1 : j] == prevWord:
2501 result.extend('__del__')
2502 else:
2503 # result.extend(list('::'))
2504 result.extend(head[i:j])
2505 i = j
2506 else:
2507 result.extend(prevWord)
2508 else: i += 1
2509 finalResult = list("def ")
2510 finalResult.extend(result)
2511 return finalResult
2512 #@+node:ekr.20150514063305.188: *7* massageFunctionBody & helper
2513 def massageFunctionBody(self, body):
2514 # body = self.massageIvars(body)
2515 # body = self.removeCasts(body)
2516 # body = self.removeTypeNames(body)
2517 body = self.dedentBlocks(body)
2518 return body
2519 #@+node:ekr.20150514063305.189: *8* dedentBlocks
2520 def dedentBlocks(self, body):
2521 """
2522 Look for '{' preceded by '{' or '}' or ';'
2523 (with intervening whitespace and comments).
2524 """
2525 i = 0
2526 while i < len(body):
2527 j = i
2528 ch = body[i]
2529 if self.is_string_or_comment(body, i):
2530 j = self.skip_string_or_comment(body, i)
2531 elif ch in '{};':
2532 # Look ahead ofr '{'
2533 j += 1
2534 while True:
2535 k = j
2536 j = self.skip_ws_and_nl(body, j)
2537 if self.is_string_or_comment(body, j):
2538 j = self.skip_string_or_comment(body, j)
2539 if k == j:
2540 break
2541 assert k < j
2542 if self.match(body, j, '{'):
2543 k = j
2544 j = self.skip_to_matching_bracket(body, j)
2545 m = '# <Start dedented block>...'
2546 body[k : k + 1] = list(m)
2547 j += len(m)
2548 while k < j:
2549 progress = k
2550 if body[k] == '\n':
2551 k += 1
2552 spaces = 0
2553 while spaces < 4 and k < j:
2554 if body[k] == ' ':
2555 spaces += 1
2556 k += 1
2557 else:
2558 break
2559 if spaces > 0:
2560 del body[k - spaces : k]
2561 k -= spaces
2562 j -= spaces
2563 else:
2564 k += 1
2565 assert progress < k
2566 m = ' # <End dedented block>'
2567 body[j : j + 1] = list(m)
2568 j += len(m)
2569 else:
2570 j = i + 1
2571 # Defensive programming.
2572 if i == j:
2573 j += 1
2574 assert i < j
2575 i = j
2576 return body
2577 #@-others
2578 #@-others
2579 c = self.c
2580 TS_To_Python(c).go()
2581 c.bodyWantsFocus()
2582 #@+node:ekr.20160321042444.1: *3* ccc.import-jupyter-notebook
2583 @cmd('import-jupyter-notebook')
2584 def importJupyterNotebook(self, event): # pragma: no cover
2585 """Prompt for a Jupyter (.ipynb) file and convert it to a Leo outline."""
2586 try:
2587 import nbformat
2588 assert nbformat
2589 except ImportError:
2590 g.es_print('import-jupyter-notebook requires nbformat package')
2591 return
2592 from leo.plugins.importers.ipynb import Import_IPYNB
2593 # was @-others
2594 c = self.c
2595 x = Import_IPYNB(c)
2596 fn = x.get_file_name()
2597 if fn:
2598 p = c.lastTopLevel()
2599 root = p.insertAfter()
2600 root.h = fn
2601 x.import_file(fn, root)
2602 c.redraw(root)
2603 c.bodyWantsFocus()
2604 #@+node:ekr.20160321072007.1: *3* ccc.export-jupyter-notebook
2605 @cmd('export-jupyter-notebook')
2606 def exportJupyterNotebook(self, event): # pragma: no cover
2607 """Convert the present outline to a .ipynb file."""
2608 from leo.plugins.writers.ipynb import Export_IPYNB
2609 c = self.c
2610 Export_IPYNB(c).export_outline(c.p)
2611 #@-others
2612#@-others
2613#@-leo