Major fixes and new features
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
1419
venv/lib/python3.12/site-packages/black/__init__.py
Normal file
1419
venv/lib/python3.12/site-packages/black/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
3
venv/lib/python3.12/site-packages/black/__main__.py
Normal file
3
venv/lib/python3.12/site-packages/black/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from black import patched_main
|
||||
|
||||
patched_main()
|
||||
478
venv/lib/python3.12/site-packages/black/_width_table.py
Normal file
478
venv/lib/python3.12/site-packages/black/_width_table.py
Normal file
@@ -0,0 +1,478 @@
|
||||
# Generated by make_width_table.py
|
||||
# wcwidth 0.2.6
|
||||
# Unicode 15.0.0
|
||||
from typing import Final, List, Tuple
|
||||
|
||||
WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [
|
||||
(0, 0, 0),
|
||||
(1, 31, -1),
|
||||
(127, 159, -1),
|
||||
(768, 879, 0),
|
||||
(1155, 1161, 0),
|
||||
(1425, 1469, 0),
|
||||
(1471, 1471, 0),
|
||||
(1473, 1474, 0),
|
||||
(1476, 1477, 0),
|
||||
(1479, 1479, 0),
|
||||
(1552, 1562, 0),
|
||||
(1611, 1631, 0),
|
||||
(1648, 1648, 0),
|
||||
(1750, 1756, 0),
|
||||
(1759, 1764, 0),
|
||||
(1767, 1768, 0),
|
||||
(1770, 1773, 0),
|
||||
(1809, 1809, 0),
|
||||
(1840, 1866, 0),
|
||||
(1958, 1968, 0),
|
||||
(2027, 2035, 0),
|
||||
(2045, 2045, 0),
|
||||
(2070, 2073, 0),
|
||||
(2075, 2083, 0),
|
||||
(2085, 2087, 0),
|
||||
(2089, 2093, 0),
|
||||
(2137, 2139, 0),
|
||||
(2200, 2207, 0),
|
||||
(2250, 2273, 0),
|
||||
(2275, 2306, 0),
|
||||
(2362, 2362, 0),
|
||||
(2364, 2364, 0),
|
||||
(2369, 2376, 0),
|
||||
(2381, 2381, 0),
|
||||
(2385, 2391, 0),
|
||||
(2402, 2403, 0),
|
||||
(2433, 2433, 0),
|
||||
(2492, 2492, 0),
|
||||
(2497, 2500, 0),
|
||||
(2509, 2509, 0),
|
||||
(2530, 2531, 0),
|
||||
(2558, 2558, 0),
|
||||
(2561, 2562, 0),
|
||||
(2620, 2620, 0),
|
||||
(2625, 2626, 0),
|
||||
(2631, 2632, 0),
|
||||
(2635, 2637, 0),
|
||||
(2641, 2641, 0),
|
||||
(2672, 2673, 0),
|
||||
(2677, 2677, 0),
|
||||
(2689, 2690, 0),
|
||||
(2748, 2748, 0),
|
||||
(2753, 2757, 0),
|
||||
(2759, 2760, 0),
|
||||
(2765, 2765, 0),
|
||||
(2786, 2787, 0),
|
||||
(2810, 2815, 0),
|
||||
(2817, 2817, 0),
|
||||
(2876, 2876, 0),
|
||||
(2879, 2879, 0),
|
||||
(2881, 2884, 0),
|
||||
(2893, 2893, 0),
|
||||
(2901, 2902, 0),
|
||||
(2914, 2915, 0),
|
||||
(2946, 2946, 0),
|
||||
(3008, 3008, 0),
|
||||
(3021, 3021, 0),
|
||||
(3072, 3072, 0),
|
||||
(3076, 3076, 0),
|
||||
(3132, 3132, 0),
|
||||
(3134, 3136, 0),
|
||||
(3142, 3144, 0),
|
||||
(3146, 3149, 0),
|
||||
(3157, 3158, 0),
|
||||
(3170, 3171, 0),
|
||||
(3201, 3201, 0),
|
||||
(3260, 3260, 0),
|
||||
(3263, 3263, 0),
|
||||
(3270, 3270, 0),
|
||||
(3276, 3277, 0),
|
||||
(3298, 3299, 0),
|
||||
(3328, 3329, 0),
|
||||
(3387, 3388, 0),
|
||||
(3393, 3396, 0),
|
||||
(3405, 3405, 0),
|
||||
(3426, 3427, 0),
|
||||
(3457, 3457, 0),
|
||||
(3530, 3530, 0),
|
||||
(3538, 3540, 0),
|
||||
(3542, 3542, 0),
|
||||
(3633, 3633, 0),
|
||||
(3636, 3642, 0),
|
||||
(3655, 3662, 0),
|
||||
(3761, 3761, 0),
|
||||
(3764, 3772, 0),
|
||||
(3784, 3790, 0),
|
||||
(3864, 3865, 0),
|
||||
(3893, 3893, 0),
|
||||
(3895, 3895, 0),
|
||||
(3897, 3897, 0),
|
||||
(3953, 3966, 0),
|
||||
(3968, 3972, 0),
|
||||
(3974, 3975, 0),
|
||||
(3981, 3991, 0),
|
||||
(3993, 4028, 0),
|
||||
(4038, 4038, 0),
|
||||
(4141, 4144, 0),
|
||||
(4146, 4151, 0),
|
||||
(4153, 4154, 0),
|
||||
(4157, 4158, 0),
|
||||
(4184, 4185, 0),
|
||||
(4190, 4192, 0),
|
||||
(4209, 4212, 0),
|
||||
(4226, 4226, 0),
|
||||
(4229, 4230, 0),
|
||||
(4237, 4237, 0),
|
||||
(4253, 4253, 0),
|
||||
(4352, 4447, 2),
|
||||
(4957, 4959, 0),
|
||||
(5906, 5908, 0),
|
||||
(5938, 5939, 0),
|
||||
(5970, 5971, 0),
|
||||
(6002, 6003, 0),
|
||||
(6068, 6069, 0),
|
||||
(6071, 6077, 0),
|
||||
(6086, 6086, 0),
|
||||
(6089, 6099, 0),
|
||||
(6109, 6109, 0),
|
||||
(6155, 6157, 0),
|
||||
(6159, 6159, 0),
|
||||
(6277, 6278, 0),
|
||||
(6313, 6313, 0),
|
||||
(6432, 6434, 0),
|
||||
(6439, 6440, 0),
|
||||
(6450, 6450, 0),
|
||||
(6457, 6459, 0),
|
||||
(6679, 6680, 0),
|
||||
(6683, 6683, 0),
|
||||
(6742, 6742, 0),
|
||||
(6744, 6750, 0),
|
||||
(6752, 6752, 0),
|
||||
(6754, 6754, 0),
|
||||
(6757, 6764, 0),
|
||||
(6771, 6780, 0),
|
||||
(6783, 6783, 0),
|
||||
(6832, 6862, 0),
|
||||
(6912, 6915, 0),
|
||||
(6964, 6964, 0),
|
||||
(6966, 6970, 0),
|
||||
(6972, 6972, 0),
|
||||
(6978, 6978, 0),
|
||||
(7019, 7027, 0),
|
||||
(7040, 7041, 0),
|
||||
(7074, 7077, 0),
|
||||
(7080, 7081, 0),
|
||||
(7083, 7085, 0),
|
||||
(7142, 7142, 0),
|
||||
(7144, 7145, 0),
|
||||
(7149, 7149, 0),
|
||||
(7151, 7153, 0),
|
||||
(7212, 7219, 0),
|
||||
(7222, 7223, 0),
|
||||
(7376, 7378, 0),
|
||||
(7380, 7392, 0),
|
||||
(7394, 7400, 0),
|
||||
(7405, 7405, 0),
|
||||
(7412, 7412, 0),
|
||||
(7416, 7417, 0),
|
||||
(7616, 7679, 0),
|
||||
(8203, 8207, 0),
|
||||
(8232, 8238, 0),
|
||||
(8288, 8291, 0),
|
||||
(8400, 8432, 0),
|
||||
(8986, 8987, 2),
|
||||
(9001, 9002, 2),
|
||||
(9193, 9196, 2),
|
||||
(9200, 9200, 2),
|
||||
(9203, 9203, 2),
|
||||
(9725, 9726, 2),
|
||||
(9748, 9749, 2),
|
||||
(9800, 9811, 2),
|
||||
(9855, 9855, 2),
|
||||
(9875, 9875, 2),
|
||||
(9889, 9889, 2),
|
||||
(9898, 9899, 2),
|
||||
(9917, 9918, 2),
|
||||
(9924, 9925, 2),
|
||||
(9934, 9934, 2),
|
||||
(9940, 9940, 2),
|
||||
(9962, 9962, 2),
|
||||
(9970, 9971, 2),
|
||||
(9973, 9973, 2),
|
||||
(9978, 9978, 2),
|
||||
(9981, 9981, 2),
|
||||
(9989, 9989, 2),
|
||||
(9994, 9995, 2),
|
||||
(10024, 10024, 2),
|
||||
(10060, 10060, 2),
|
||||
(10062, 10062, 2),
|
||||
(10067, 10069, 2),
|
||||
(10071, 10071, 2),
|
||||
(10133, 10135, 2),
|
||||
(10160, 10160, 2),
|
||||
(10175, 10175, 2),
|
||||
(11035, 11036, 2),
|
||||
(11088, 11088, 2),
|
||||
(11093, 11093, 2),
|
||||
(11503, 11505, 0),
|
||||
(11647, 11647, 0),
|
||||
(11744, 11775, 0),
|
||||
(11904, 11929, 2),
|
||||
(11931, 12019, 2),
|
||||
(12032, 12245, 2),
|
||||
(12272, 12283, 2),
|
||||
(12288, 12329, 2),
|
||||
(12330, 12333, 0),
|
||||
(12334, 12350, 2),
|
||||
(12353, 12438, 2),
|
||||
(12441, 12442, 0),
|
||||
(12443, 12543, 2),
|
||||
(12549, 12591, 2),
|
||||
(12593, 12686, 2),
|
||||
(12688, 12771, 2),
|
||||
(12784, 12830, 2),
|
||||
(12832, 12871, 2),
|
||||
(12880, 19903, 2),
|
||||
(19968, 42124, 2),
|
||||
(42128, 42182, 2),
|
||||
(42607, 42610, 0),
|
||||
(42612, 42621, 0),
|
||||
(42654, 42655, 0),
|
||||
(42736, 42737, 0),
|
||||
(43010, 43010, 0),
|
||||
(43014, 43014, 0),
|
||||
(43019, 43019, 0),
|
||||
(43045, 43046, 0),
|
||||
(43052, 43052, 0),
|
||||
(43204, 43205, 0),
|
||||
(43232, 43249, 0),
|
||||
(43263, 43263, 0),
|
||||
(43302, 43309, 0),
|
||||
(43335, 43345, 0),
|
||||
(43360, 43388, 2),
|
||||
(43392, 43394, 0),
|
||||
(43443, 43443, 0),
|
||||
(43446, 43449, 0),
|
||||
(43452, 43453, 0),
|
||||
(43493, 43493, 0),
|
||||
(43561, 43566, 0),
|
||||
(43569, 43570, 0),
|
||||
(43573, 43574, 0),
|
||||
(43587, 43587, 0),
|
||||
(43596, 43596, 0),
|
||||
(43644, 43644, 0),
|
||||
(43696, 43696, 0),
|
||||
(43698, 43700, 0),
|
||||
(43703, 43704, 0),
|
||||
(43710, 43711, 0),
|
||||
(43713, 43713, 0),
|
||||
(43756, 43757, 0),
|
||||
(43766, 43766, 0),
|
||||
(44005, 44005, 0),
|
||||
(44008, 44008, 0),
|
||||
(44013, 44013, 0),
|
||||
(44032, 55203, 2),
|
||||
(63744, 64255, 2),
|
||||
(64286, 64286, 0),
|
||||
(65024, 65039, 0),
|
||||
(65040, 65049, 2),
|
||||
(65056, 65071, 0),
|
||||
(65072, 65106, 2),
|
||||
(65108, 65126, 2),
|
||||
(65128, 65131, 2),
|
||||
(65281, 65376, 2),
|
||||
(65504, 65510, 2),
|
||||
(66045, 66045, 0),
|
||||
(66272, 66272, 0),
|
||||
(66422, 66426, 0),
|
||||
(68097, 68099, 0),
|
||||
(68101, 68102, 0),
|
||||
(68108, 68111, 0),
|
||||
(68152, 68154, 0),
|
||||
(68159, 68159, 0),
|
||||
(68325, 68326, 0),
|
||||
(68900, 68903, 0),
|
||||
(69291, 69292, 0),
|
||||
(69373, 69375, 0),
|
||||
(69446, 69456, 0),
|
||||
(69506, 69509, 0),
|
||||
(69633, 69633, 0),
|
||||
(69688, 69702, 0),
|
||||
(69744, 69744, 0),
|
||||
(69747, 69748, 0),
|
||||
(69759, 69761, 0),
|
||||
(69811, 69814, 0),
|
||||
(69817, 69818, 0),
|
||||
(69826, 69826, 0),
|
||||
(69888, 69890, 0),
|
||||
(69927, 69931, 0),
|
||||
(69933, 69940, 0),
|
||||
(70003, 70003, 0),
|
||||
(70016, 70017, 0),
|
||||
(70070, 70078, 0),
|
||||
(70089, 70092, 0),
|
||||
(70095, 70095, 0),
|
||||
(70191, 70193, 0),
|
||||
(70196, 70196, 0),
|
||||
(70198, 70199, 0),
|
||||
(70206, 70206, 0),
|
||||
(70209, 70209, 0),
|
||||
(70367, 70367, 0),
|
||||
(70371, 70378, 0),
|
||||
(70400, 70401, 0),
|
||||
(70459, 70460, 0),
|
||||
(70464, 70464, 0),
|
||||
(70502, 70508, 0),
|
||||
(70512, 70516, 0),
|
||||
(70712, 70719, 0),
|
||||
(70722, 70724, 0),
|
||||
(70726, 70726, 0),
|
||||
(70750, 70750, 0),
|
||||
(70835, 70840, 0),
|
||||
(70842, 70842, 0),
|
||||
(70847, 70848, 0),
|
||||
(70850, 70851, 0),
|
||||
(71090, 71093, 0),
|
||||
(71100, 71101, 0),
|
||||
(71103, 71104, 0),
|
||||
(71132, 71133, 0),
|
||||
(71219, 71226, 0),
|
||||
(71229, 71229, 0),
|
||||
(71231, 71232, 0),
|
||||
(71339, 71339, 0),
|
||||
(71341, 71341, 0),
|
||||
(71344, 71349, 0),
|
||||
(71351, 71351, 0),
|
||||
(71453, 71455, 0),
|
||||
(71458, 71461, 0),
|
||||
(71463, 71467, 0),
|
||||
(71727, 71735, 0),
|
||||
(71737, 71738, 0),
|
||||
(71995, 71996, 0),
|
||||
(71998, 71998, 0),
|
||||
(72003, 72003, 0),
|
||||
(72148, 72151, 0),
|
||||
(72154, 72155, 0),
|
||||
(72160, 72160, 0),
|
||||
(72193, 72202, 0),
|
||||
(72243, 72248, 0),
|
||||
(72251, 72254, 0),
|
||||
(72263, 72263, 0),
|
||||
(72273, 72278, 0),
|
||||
(72281, 72283, 0),
|
||||
(72330, 72342, 0),
|
||||
(72344, 72345, 0),
|
||||
(72752, 72758, 0),
|
||||
(72760, 72765, 0),
|
||||
(72767, 72767, 0),
|
||||
(72850, 72871, 0),
|
||||
(72874, 72880, 0),
|
||||
(72882, 72883, 0),
|
||||
(72885, 72886, 0),
|
||||
(73009, 73014, 0),
|
||||
(73018, 73018, 0),
|
||||
(73020, 73021, 0),
|
||||
(73023, 73029, 0),
|
||||
(73031, 73031, 0),
|
||||
(73104, 73105, 0),
|
||||
(73109, 73109, 0),
|
||||
(73111, 73111, 0),
|
||||
(73459, 73460, 0),
|
||||
(73472, 73473, 0),
|
||||
(73526, 73530, 0),
|
||||
(73536, 73536, 0),
|
||||
(73538, 73538, 0),
|
||||
(78912, 78912, 0),
|
||||
(78919, 78933, 0),
|
||||
(92912, 92916, 0),
|
||||
(92976, 92982, 0),
|
||||
(94031, 94031, 0),
|
||||
(94095, 94098, 0),
|
||||
(94176, 94179, 2),
|
||||
(94180, 94180, 0),
|
||||
(94192, 94193, 2),
|
||||
(94208, 100343, 2),
|
||||
(100352, 101589, 2),
|
||||
(101632, 101640, 2),
|
||||
(110576, 110579, 2),
|
||||
(110581, 110587, 2),
|
||||
(110589, 110590, 2),
|
||||
(110592, 110882, 2),
|
||||
(110898, 110898, 2),
|
||||
(110928, 110930, 2),
|
||||
(110933, 110933, 2),
|
||||
(110948, 110951, 2),
|
||||
(110960, 111355, 2),
|
||||
(113821, 113822, 0),
|
||||
(118528, 118573, 0),
|
||||
(118576, 118598, 0),
|
||||
(119143, 119145, 0),
|
||||
(119163, 119170, 0),
|
||||
(119173, 119179, 0),
|
||||
(119210, 119213, 0),
|
||||
(119362, 119364, 0),
|
||||
(121344, 121398, 0),
|
||||
(121403, 121452, 0),
|
||||
(121461, 121461, 0),
|
||||
(121476, 121476, 0),
|
||||
(121499, 121503, 0),
|
||||
(121505, 121519, 0),
|
||||
(122880, 122886, 0),
|
||||
(122888, 122904, 0),
|
||||
(122907, 122913, 0),
|
||||
(122915, 122916, 0),
|
||||
(122918, 122922, 0),
|
||||
(123023, 123023, 0),
|
||||
(123184, 123190, 0),
|
||||
(123566, 123566, 0),
|
||||
(123628, 123631, 0),
|
||||
(124140, 124143, 0),
|
||||
(125136, 125142, 0),
|
||||
(125252, 125258, 0),
|
||||
(126980, 126980, 2),
|
||||
(127183, 127183, 2),
|
||||
(127374, 127374, 2),
|
||||
(127377, 127386, 2),
|
||||
(127488, 127490, 2),
|
||||
(127504, 127547, 2),
|
||||
(127552, 127560, 2),
|
||||
(127568, 127569, 2),
|
||||
(127584, 127589, 2),
|
||||
(127744, 127776, 2),
|
||||
(127789, 127797, 2),
|
||||
(127799, 127868, 2),
|
||||
(127870, 127891, 2),
|
||||
(127904, 127946, 2),
|
||||
(127951, 127955, 2),
|
||||
(127968, 127984, 2),
|
||||
(127988, 127988, 2),
|
||||
(127992, 128062, 2),
|
||||
(128064, 128064, 2),
|
||||
(128066, 128252, 2),
|
||||
(128255, 128317, 2),
|
||||
(128331, 128334, 2),
|
||||
(128336, 128359, 2),
|
||||
(128378, 128378, 2),
|
||||
(128405, 128406, 2),
|
||||
(128420, 128420, 2),
|
||||
(128507, 128591, 2),
|
||||
(128640, 128709, 2),
|
||||
(128716, 128716, 2),
|
||||
(128720, 128722, 2),
|
||||
(128725, 128727, 2),
|
||||
(128732, 128735, 2),
|
||||
(128747, 128748, 2),
|
||||
(128756, 128764, 2),
|
||||
(128992, 129003, 2),
|
||||
(129008, 129008, 2),
|
||||
(129292, 129338, 2),
|
||||
(129340, 129349, 2),
|
||||
(129351, 129535, 2),
|
||||
(129648, 129660, 2),
|
||||
(129664, 129672, 2),
|
||||
(129680, 129725, 2),
|
||||
(129727, 129733, 2),
|
||||
(129742, 129755, 2),
|
||||
(129760, 129768, 2),
|
||||
(129776, 129784, 2),
|
||||
(131072, 196605, 2),
|
||||
(196608, 262141, 2),
|
||||
(917760, 917999, 0),
|
||||
]
|
||||
375
venv/lib/python3.12/site-packages/black/brackets.py
Normal file
375
venv/lib/python3.12/site-packages/black/brackets.py
Normal file
@@ -0,0 +1,375 @@
|
||||
"""Builds on top of nodes.py to track brackets."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, Final, Iterable, List, Optional, Sequence, Set, Tuple, Union
|
||||
|
||||
from black.nodes import (
|
||||
BRACKET,
|
||||
CLOSING_BRACKETS,
|
||||
COMPARATORS,
|
||||
LOGIC_OPERATORS,
|
||||
MATH_OPERATORS,
|
||||
OPENING_BRACKETS,
|
||||
UNPACKING_PARENTS,
|
||||
VARARGS_PARENTS,
|
||||
is_vararg,
|
||||
syms,
|
||||
)
|
||||
from blib2to3.pgen2 import token
|
||||
from blib2to3.pytree import Leaf, Node
|
||||
|
||||
# types
|
||||
LN = Union[Leaf, Node]
|
||||
Depth = int
|
||||
LeafID = int
|
||||
NodeType = int
|
||||
Priority = int
|
||||
|
||||
|
||||
COMPREHENSION_PRIORITY: Final = 20
|
||||
COMMA_PRIORITY: Final = 18
|
||||
TERNARY_PRIORITY: Final = 16
|
||||
LOGIC_PRIORITY: Final = 14
|
||||
STRING_PRIORITY: Final = 12
|
||||
COMPARATOR_PRIORITY: Final = 10
|
||||
MATH_PRIORITIES: Final = {
|
||||
token.VBAR: 9,
|
||||
token.CIRCUMFLEX: 8,
|
||||
token.AMPER: 7,
|
||||
token.LEFTSHIFT: 6,
|
||||
token.RIGHTSHIFT: 6,
|
||||
token.PLUS: 5,
|
||||
token.MINUS: 5,
|
||||
token.STAR: 4,
|
||||
token.SLASH: 4,
|
||||
token.DOUBLESLASH: 4,
|
||||
token.PERCENT: 4,
|
||||
token.AT: 4,
|
||||
token.TILDE: 3,
|
||||
token.DOUBLESTAR: 2,
|
||||
}
|
||||
DOT_PRIORITY: Final = 1
|
||||
|
||||
|
||||
class BracketMatchError(Exception):
|
||||
"""Raised when an opening bracket is unable to be matched to a closing bracket."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class BracketTracker:
|
||||
"""Keeps track of brackets on a line."""
|
||||
|
||||
depth: int = 0
|
||||
bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
||||
delimiters: Dict[LeafID, Priority] = field(default_factory=dict)
|
||||
previous: Optional[Leaf] = None
|
||||
_for_loop_depths: List[int] = field(default_factory=list)
|
||||
_lambda_argument_depths: List[int] = field(default_factory=list)
|
||||
invisible: List[Leaf] = field(default_factory=list)
|
||||
|
||||
def mark(self, leaf: Leaf) -> None:
|
||||
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
|
||||
|
||||
All leaves receive an int `bracket_depth` field that stores how deep
|
||||
within brackets a given leaf is. 0 means there are no enclosing brackets
|
||||
that started on this line.
|
||||
|
||||
If a leaf is itself a closing bracket and there is a matching opening
|
||||
bracket earlier, it receives an `opening_bracket` field with which it forms a
|
||||
pair. This is a one-directional link to avoid reference cycles. Closing
|
||||
bracket without opening happens on lines continued from previous
|
||||
breaks, e.g. `) -> "ReturnType":` as part of a funcdef where we place
|
||||
the return type annotation on its own line of the previous closing RPAR.
|
||||
|
||||
If a leaf is a delimiter (a token on which Black can split the line if
|
||||
needed) and it's on depth 0, its `id()` is stored in the tracker's
|
||||
`delimiters` field.
|
||||
"""
|
||||
if leaf.type == token.COMMENT:
|
||||
return
|
||||
|
||||
if (
|
||||
self.depth == 0
|
||||
and leaf.type in CLOSING_BRACKETS
|
||||
and (self.depth, leaf.type) not in self.bracket_match
|
||||
):
|
||||
return
|
||||
|
||||
self.maybe_decrement_after_for_loop_variable(leaf)
|
||||
self.maybe_decrement_after_lambda_arguments(leaf)
|
||||
if leaf.type in CLOSING_BRACKETS:
|
||||
self.depth -= 1
|
||||
try:
|
||||
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
|
||||
except KeyError as e:
|
||||
raise BracketMatchError(
|
||||
"Unable to match a closing bracket to the following opening"
|
||||
f" bracket: {leaf}"
|
||||
) from e
|
||||
leaf.opening_bracket = opening_bracket
|
||||
if not leaf.value:
|
||||
self.invisible.append(leaf)
|
||||
leaf.bracket_depth = self.depth
|
||||
if self.depth == 0:
|
||||
delim = is_split_before_delimiter(leaf, self.previous)
|
||||
if delim and self.previous is not None:
|
||||
self.delimiters[id(self.previous)] = delim
|
||||
else:
|
||||
delim = is_split_after_delimiter(leaf, self.previous)
|
||||
if delim:
|
||||
self.delimiters[id(leaf)] = delim
|
||||
if leaf.type in OPENING_BRACKETS:
|
||||
self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
|
||||
self.depth += 1
|
||||
if not leaf.value:
|
||||
self.invisible.append(leaf)
|
||||
self.previous = leaf
|
||||
self.maybe_increment_lambda_arguments(leaf)
|
||||
self.maybe_increment_for_loop_variable(leaf)
|
||||
|
||||
def any_open_brackets(self) -> bool:
|
||||
"""Return True if there is an yet unmatched open bracket on the line."""
|
||||
return bool(self.bracket_match)
|
||||
|
||||
def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
|
||||
"""Return the highest priority of a delimiter found on the line.
|
||||
|
||||
Values are consistent with what `is_split_*_delimiter()` return.
|
||||
Raises ValueError on no delimiters.
|
||||
"""
|
||||
return max(v for k, v in self.delimiters.items() if k not in exclude)
|
||||
|
||||
def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
|
||||
"""Return the number of delimiters with the given `priority`.
|
||||
|
||||
If no `priority` is passed, defaults to max priority on the line.
|
||||
"""
|
||||
if not self.delimiters:
|
||||
return 0
|
||||
|
||||
priority = priority or self.max_delimiter_priority()
|
||||
return sum(1 for p in self.delimiters.values() if p == priority)
|
||||
|
||||
def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
|
||||
"""In a for loop, or comprehension, the variables are often unpacks.
|
||||
|
||||
To avoid splitting on the comma in this situation, increase the depth of
|
||||
tokens between `for` and `in`.
|
||||
"""
|
||||
if leaf.type == token.NAME and leaf.value == "for":
|
||||
self.depth += 1
|
||||
self._for_loop_depths.append(self.depth)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
|
||||
"""See `maybe_increment_for_loop_variable` above for explanation."""
|
||||
if (
|
||||
self._for_loop_depths
|
||||
and self._for_loop_depths[-1] == self.depth
|
||||
and leaf.type == token.NAME
|
||||
and leaf.value == "in"
|
||||
):
|
||||
self.depth -= 1
|
||||
self._for_loop_depths.pop()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
|
||||
"""In a lambda expression, there might be more than one argument.
|
||||
|
||||
To avoid splitting on the comma in this situation, increase the depth of
|
||||
tokens between `lambda` and `:`.
|
||||
"""
|
||||
if leaf.type == token.NAME and leaf.value == "lambda":
|
||||
self.depth += 1
|
||||
self._lambda_argument_depths.append(self.depth)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
|
||||
"""See `maybe_increment_lambda_arguments` above for explanation."""
|
||||
if (
|
||||
self._lambda_argument_depths
|
||||
and self._lambda_argument_depths[-1] == self.depth
|
||||
and leaf.type == token.COLON
|
||||
):
|
||||
self.depth -= 1
|
||||
self._lambda_argument_depths.pop()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_open_lsqb(self) -> Optional[Leaf]:
|
||||
"""Return the most recent opening square bracket (if any)."""
|
||||
return self.bracket_match.get((self.depth - 1, token.RSQB))
|
||||
|
||||
|
||||
def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
|
||||
"""Return the priority of the `leaf` delimiter, given a line break after it.
|
||||
|
||||
The delimiter priorities returned here are from those delimiters that would
|
||||
cause a line break after themselves.
|
||||
|
||||
Higher numbers are higher priority.
|
||||
"""
|
||||
if leaf.type == token.COMMA:
|
||||
return COMMA_PRIORITY
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
|
||||
"""Return the priority of the `leaf` delimiter, given a line break before it.
|
||||
|
||||
The delimiter priorities returned here are from those delimiters that would
|
||||
cause a line break before themselves.
|
||||
|
||||
Higher numbers are higher priority.
|
||||
"""
|
||||
if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
|
||||
# * and ** might also be MATH_OPERATORS but in this case they are not.
|
||||
# Don't treat them as a delimiter.
|
||||
return 0
|
||||
|
||||
if (
|
||||
leaf.type == token.DOT
|
||||
and leaf.parent
|
||||
and leaf.parent.type not in {syms.import_from, syms.dotted_name}
|
||||
and (previous is None or previous.type in CLOSING_BRACKETS)
|
||||
):
|
||||
return DOT_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.type in MATH_OPERATORS
|
||||
and leaf.parent
|
||||
and leaf.parent.type not in {syms.factor, syms.star_expr}
|
||||
):
|
||||
return MATH_PRIORITIES[leaf.type]
|
||||
|
||||
if leaf.type in COMPARATORS:
|
||||
return COMPARATOR_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.type == token.STRING
|
||||
and previous is not None
|
||||
and previous.type == token.STRING
|
||||
):
|
||||
return STRING_PRIORITY
|
||||
|
||||
if leaf.type not in {token.NAME, token.ASYNC}:
|
||||
return 0
|
||||
|
||||
if (
|
||||
leaf.value == "for"
|
||||
and leaf.parent
|
||||
and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
|
||||
or leaf.type == token.ASYNC
|
||||
):
|
||||
if (
|
||||
not isinstance(leaf.prev_sibling, Leaf)
|
||||
or leaf.prev_sibling.value != "async"
|
||||
):
|
||||
return COMPREHENSION_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.value == "if"
|
||||
and leaf.parent
|
||||
and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
|
||||
):
|
||||
return COMPREHENSION_PRIORITY
|
||||
|
||||
if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
|
||||
return TERNARY_PRIORITY
|
||||
|
||||
if leaf.value == "is":
|
||||
return COMPARATOR_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.value == "in"
|
||||
and leaf.parent
|
||||
and leaf.parent.type in {syms.comp_op, syms.comparison}
|
||||
and not (
|
||||
previous is not None
|
||||
and previous.type == token.NAME
|
||||
and previous.value == "not"
|
||||
)
|
||||
):
|
||||
return COMPARATOR_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.value == "not"
|
||||
and leaf.parent
|
||||
and leaf.parent.type == syms.comp_op
|
||||
and not (
|
||||
previous is not None
|
||||
and previous.type == token.NAME
|
||||
and previous.value == "is"
|
||||
)
|
||||
):
|
||||
return COMPARATOR_PRIORITY
|
||||
|
||||
if leaf.value in LOGIC_OPERATORS and leaf.parent:
|
||||
return LOGIC_PRIORITY
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def max_delimiter_priority_in_atom(node: LN) -> Priority:
|
||||
"""Return maximum delimiter priority inside `node`.
|
||||
|
||||
This is specific to atoms with contents contained in a pair of parentheses.
|
||||
If `node` isn't an atom or there are no enclosing parentheses, returns 0.
|
||||
"""
|
||||
if node.type != syms.atom:
|
||||
return 0
|
||||
|
||||
first = node.children[0]
|
||||
last = node.children[-1]
|
||||
if not (first.type == token.LPAR and last.type == token.RPAR):
|
||||
return 0
|
||||
|
||||
bt = BracketTracker()
|
||||
for c in node.children[1:-1]:
|
||||
if isinstance(c, Leaf):
|
||||
bt.mark(c)
|
||||
else:
|
||||
for leaf in c.leaves():
|
||||
bt.mark(leaf)
|
||||
try:
|
||||
return bt.max_delimiter_priority()
|
||||
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
|
||||
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> Set[LeafID]:
|
||||
"""Return leaves that are inside matching brackets.
|
||||
|
||||
The input `leaves` can have non-matching brackets at the head or tail parts.
|
||||
Matching brackets are included.
|
||||
"""
|
||||
try:
|
||||
# Start with the first opening bracket and ignore closing brackets before.
|
||||
start_index = next(
|
||||
i for i, l in enumerate(leaves) if l.type in OPENING_BRACKETS
|
||||
)
|
||||
except StopIteration:
|
||||
return set()
|
||||
bracket_stack = []
|
||||
ids = set()
|
||||
for i in range(start_index, len(leaves)):
|
||||
leaf = leaves[i]
|
||||
if leaf.type in OPENING_BRACKETS:
|
||||
bracket_stack.append((BRACKET[leaf.type], i))
|
||||
if leaf.type in CLOSING_BRACKETS:
|
||||
if bracket_stack and leaf.type == bracket_stack[-1][0]:
|
||||
_, start = bracket_stack.pop()
|
||||
for j in range(start, i + 1):
|
||||
ids.add(id(leaves[j]))
|
||||
else:
|
||||
break
|
||||
return ids
|
||||
143
venv/lib/python3.12/site-packages/black/cache.py
Normal file
143
venv/lib/python3.12/site-packages/black/cache.py
Normal file
@@ -0,0 +1,143 @@
|
||||
"""Caching of formatted files with feature-based invalidation."""
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
import tempfile
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, NamedTuple, Set, Tuple
|
||||
|
||||
from platformdirs import user_cache_dir
|
||||
|
||||
from _black_version import version as __version__
|
||||
from black.mode import Mode
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import Self
|
||||
else:
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
class FileData(NamedTuple):
|
||||
st_mtime: float
|
||||
st_size: int
|
||||
hash: str
|
||||
|
||||
|
||||
def get_cache_dir() -> Path:
|
||||
"""Get the cache directory used by black.
|
||||
|
||||
Users can customize this directory on all systems using `BLACK_CACHE_DIR`
|
||||
environment variable. By default, the cache directory is the user cache directory
|
||||
under the black application.
|
||||
|
||||
This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid
|
||||
repeated calls.
|
||||
"""
|
||||
# NOTE: Function mostly exists as a clean way to test getting the cache directory.
|
||||
default_cache_dir = user_cache_dir("black")
|
||||
cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir))
|
||||
cache_dir = cache_dir / __version__
|
||||
return cache_dir
|
||||
|
||||
|
||||
CACHE_DIR = get_cache_dir()
|
||||
|
||||
|
||||
def get_cache_file(mode: Mode) -> Path:
|
||||
return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Cache:
|
||||
mode: Mode
|
||||
cache_file: Path
|
||||
file_data: Dict[str, FileData] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def read(cls, mode: Mode) -> Self:
|
||||
"""Read the cache if it exists and is well formed.
|
||||
|
||||
If it is not well formed, the call to write later should
|
||||
resolve the issue.
|
||||
"""
|
||||
cache_file = get_cache_file(mode)
|
||||
if not cache_file.exists():
|
||||
return cls(mode, cache_file)
|
||||
|
||||
with cache_file.open("rb") as fobj:
|
||||
try:
|
||||
data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj)
|
||||
file_data = {k: FileData(*v) for k, v in data.items()}
|
||||
except (pickle.UnpicklingError, ValueError, IndexError):
|
||||
return cls(mode, cache_file)
|
||||
|
||||
return cls(mode, cache_file, file_data)
|
||||
|
||||
@staticmethod
|
||||
def hash_digest(path: Path) -> str:
|
||||
"""Return hash digest for path."""
|
||||
|
||||
data = path.read_bytes()
|
||||
return hashlib.sha256(data).hexdigest()
|
||||
|
||||
@staticmethod
|
||||
def get_file_data(path: Path) -> FileData:
|
||||
"""Return file data for path."""
|
||||
|
||||
stat = path.stat()
|
||||
hash = Cache.hash_digest(path)
|
||||
return FileData(stat.st_mtime, stat.st_size, hash)
|
||||
|
||||
def is_changed(self, source: Path) -> bool:
|
||||
"""Check if source has changed compared to cached version."""
|
||||
res_src = source.resolve()
|
||||
old = self.file_data.get(str(res_src))
|
||||
if old is None:
|
||||
return True
|
||||
|
||||
st = res_src.stat()
|
||||
if st.st_size != old.st_size:
|
||||
return True
|
||||
if int(st.st_mtime) != int(old.st_mtime):
|
||||
new_hash = Cache.hash_digest(res_src)
|
||||
if new_hash != old.hash:
|
||||
return True
|
||||
return False
|
||||
|
||||
def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
|
||||
"""Split an iterable of paths in `sources` into two sets.
|
||||
|
||||
The first contains paths of files that modified on disk or are not in the
|
||||
cache. The other contains paths to non-modified files.
|
||||
"""
|
||||
changed: Set[Path] = set()
|
||||
done: Set[Path] = set()
|
||||
for src in sources:
|
||||
if self.is_changed(src):
|
||||
changed.add(src)
|
||||
else:
|
||||
done.add(src)
|
||||
return changed, done
|
||||
|
||||
def write(self, sources: Iterable[Path]) -> None:
|
||||
"""Update the cache file data and write a new cache file."""
|
||||
self.file_data.update(
|
||||
**{str(src.resolve()): Cache.get_file_data(src) for src in sources}
|
||||
)
|
||||
try:
|
||||
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
with tempfile.NamedTemporaryFile(
|
||||
dir=str(self.cache_file.parent), delete=False
|
||||
) as f:
|
||||
# We store raw tuples in the cache because pickling NamedTuples
|
||||
# doesn't work with mypyc on Python 3.8, and because it's faster.
|
||||
data: Dict[str, Tuple[float, int, str]] = {
|
||||
k: (*v,) for k, v in self.file_data.items()
|
||||
}
|
||||
pickle.dump(data, f, protocol=4)
|
||||
os.replace(f.name, self.cache_file)
|
||||
except OSError:
|
||||
pass
|
||||
329
venv/lib/python3.12/site-packages/black/comments.py
Normal file
329
venv/lib/python3.12/site-packages/black/comments.py
Normal file
@@ -0,0 +1,329 @@
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import Final, Iterator, List, Optional, Union
|
||||
|
||||
from black.nodes import (
|
||||
CLOSING_BRACKETS,
|
||||
STANDALONE_COMMENT,
|
||||
WHITESPACE,
|
||||
container_of,
|
||||
first_leaf_of,
|
||||
preceding_leaf,
|
||||
syms,
|
||||
)
|
||||
from blib2to3.pgen2 import token
|
||||
from blib2to3.pytree import Leaf, Node
|
||||
|
||||
# types
|
||||
LN = Union[Leaf, Node]
|
||||
|
||||
FMT_OFF: Final = {"# fmt: off", "# fmt:off", "# yapf: disable"}
|
||||
FMT_SKIP: Final = {"# fmt: skip", "# fmt:skip"}
|
||||
FMT_PASS: Final = {*FMT_OFF, *FMT_SKIP}
|
||||
FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"}
|
||||
|
||||
COMMENT_EXCEPTIONS = " !:#'"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProtoComment:
|
||||
"""Describes a piece of syntax that is a comment.
|
||||
|
||||
It's not a :class:`blib2to3.pytree.Leaf` so that:
|
||||
|
||||
* it can be cached (`Leaf` objects should not be reused more than once as
|
||||
they store their lineno, column, prefix, and parent information);
|
||||
* `newlines` and `consumed` fields are kept separate from the `value`. This
|
||||
simplifies handling of special marker comments like ``# fmt: off/on``.
|
||||
"""
|
||||
|
||||
type: int # token.COMMENT or STANDALONE_COMMENT
|
||||
value: str # content of the comment
|
||||
newlines: int # how many newlines before the comment
|
||||
consumed: int # how many characters of the original leaf's prefix did we consume
|
||||
|
||||
|
||||
def generate_comments(leaf: LN) -> Iterator[Leaf]:
|
||||
"""Clean the prefix of the `leaf` and generate comments from it, if any.
|
||||
|
||||
Comments in lib2to3 are shoved into the whitespace prefix. This happens
|
||||
in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
|
||||
move because it does away with modifying the grammar to include all the
|
||||
possible places in which comments can be placed.
|
||||
|
||||
The sad consequence for us though is that comments don't "belong" anywhere.
|
||||
This is why this function generates simple parentless Leaf objects for
|
||||
comments. We simply don't know what the correct parent should be.
|
||||
|
||||
No matter though, we can live without this. We really only need to
|
||||
differentiate between inline and standalone comments. The latter don't
|
||||
share the line with any code.
|
||||
|
||||
Inline comments are emitted as regular token.COMMENT leaves. Standalone
|
||||
are emitted with a fake STANDALONE_COMMENT token identifier.
|
||||
"""
|
||||
for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
|
||||
yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
|
||||
|
||||
|
||||
@lru_cache(maxsize=4096)
|
||||
def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
|
||||
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
||||
result: List[ProtoComment] = []
|
||||
if not prefix or "#" not in prefix:
|
||||
return result
|
||||
|
||||
consumed = 0
|
||||
nlines = 0
|
||||
ignored_lines = 0
|
||||
for index, line in enumerate(re.split("\r?\n", prefix)):
|
||||
consumed += len(line) + 1 # adding the length of the split '\n'
|
||||
line = line.lstrip()
|
||||
if not line:
|
||||
nlines += 1
|
||||
if not line.startswith("#"):
|
||||
# Escaped newlines outside of a comment are not really newlines at
|
||||
# all. We treat a single-line comment following an escaped newline
|
||||
# as a simple trailing comment.
|
||||
if line.endswith("\\"):
|
||||
ignored_lines += 1
|
||||
continue
|
||||
|
||||
if index == ignored_lines and not is_endmarker:
|
||||
comment_type = token.COMMENT # simple trailing comment
|
||||
else:
|
||||
comment_type = STANDALONE_COMMENT
|
||||
comment = make_comment(line)
|
||||
result.append(
|
||||
ProtoComment(
|
||||
type=comment_type, value=comment, newlines=nlines, consumed=consumed
|
||||
)
|
||||
)
|
||||
nlines = 0
|
||||
return result
|
||||
|
||||
|
||||
def make_comment(content: str) -> str:
|
||||
"""Return a consistently formatted comment from the given `content` string.
|
||||
|
||||
All comments (except for "##", "#!", "#:", '#'") should have a single
|
||||
space between the hash sign and the content.
|
||||
|
||||
If `content` didn't start with a hash sign, one is provided.
|
||||
"""
|
||||
content = content.rstrip()
|
||||
if not content:
|
||||
return "#"
|
||||
|
||||
if content[0] == "#":
|
||||
content = content[1:]
|
||||
NON_BREAKING_SPACE = " "
|
||||
if (
|
||||
content
|
||||
and content[0] == NON_BREAKING_SPACE
|
||||
and not content.lstrip().startswith("type:")
|
||||
):
|
||||
content = " " + content[1:] # Replace NBSP by a simple space
|
||||
if content and content[0] not in COMMENT_EXCEPTIONS:
|
||||
content = " " + content
|
||||
return "#" + content
|
||||
|
||||
|
||||
def normalize_fmt_off(node: Node) -> None:
|
||||
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
||||
try_again = True
|
||||
while try_again:
|
||||
try_again = convert_one_fmt_off_pair(node)
|
||||
|
||||
|
||||
def convert_one_fmt_off_pair(node: Node) -> bool:
|
||||
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
||||
|
||||
Returns True if a pair was converted.
|
||||
"""
|
||||
for leaf in node.leaves():
|
||||
previous_consumed = 0
|
||||
for comment in list_comments(leaf.prefix, is_endmarker=False):
|
||||
if comment.value not in FMT_PASS:
|
||||
previous_consumed = comment.consumed
|
||||
continue
|
||||
# We only want standalone comments. If there's no previous leaf or
|
||||
# the previous leaf is indentation, it's a standalone comment in
|
||||
# disguise.
|
||||
if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT:
|
||||
prev = preceding_leaf(leaf)
|
||||
if prev:
|
||||
if comment.value in FMT_OFF and prev.type not in WHITESPACE:
|
||||
continue
|
||||
if comment.value in FMT_SKIP and prev.type in WHITESPACE:
|
||||
continue
|
||||
|
||||
ignored_nodes = list(generate_ignored_nodes(leaf, comment))
|
||||
if not ignored_nodes:
|
||||
continue
|
||||
|
||||
first = ignored_nodes[0] # Can be a container node with the `leaf`.
|
||||
parent = first.parent
|
||||
prefix = first.prefix
|
||||
if comment.value in FMT_OFF:
|
||||
first.prefix = prefix[comment.consumed :]
|
||||
if comment.value in FMT_SKIP:
|
||||
first.prefix = ""
|
||||
standalone_comment_prefix = prefix
|
||||
else:
|
||||
standalone_comment_prefix = (
|
||||
prefix[:previous_consumed] + "\n" * comment.newlines
|
||||
)
|
||||
hidden_value = "".join(str(n) for n in ignored_nodes)
|
||||
if comment.value in FMT_OFF:
|
||||
hidden_value = comment.value + "\n" + hidden_value
|
||||
if comment.value in FMT_SKIP:
|
||||
hidden_value += " " + comment.value
|
||||
if hidden_value.endswith("\n"):
|
||||
# That happens when one of the `ignored_nodes` ended with a NEWLINE
|
||||
# leaf (possibly followed by a DEDENT).
|
||||
hidden_value = hidden_value[:-1]
|
||||
first_idx: Optional[int] = None
|
||||
for ignored in ignored_nodes:
|
||||
index = ignored.remove()
|
||||
if first_idx is None:
|
||||
first_idx = index
|
||||
assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
|
||||
assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
|
||||
parent.insert_child(
|
||||
first_idx,
|
||||
Leaf(
|
||||
STANDALONE_COMMENT,
|
||||
hidden_value,
|
||||
prefix=standalone_comment_prefix,
|
||||
fmt_pass_converted_first_leaf=first_leaf_of(first),
|
||||
),
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]:
|
||||
"""Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
|
||||
|
||||
If comment is skip, returns leaf only.
|
||||
Stops at the end of the block.
|
||||
"""
|
||||
if comment.value in FMT_SKIP:
|
||||
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment)
|
||||
return
|
||||
container: Optional[LN] = container_of(leaf)
|
||||
while container is not None and container.type != token.ENDMARKER:
|
||||
if is_fmt_on(container):
|
||||
return
|
||||
|
||||
# fix for fmt: on in children
|
||||
if children_contains_fmt_on(container):
|
||||
for index, child in enumerate(container.children):
|
||||
if isinstance(child, Leaf) and is_fmt_on(child):
|
||||
if child.type in CLOSING_BRACKETS:
|
||||
# This means `# fmt: on` is placed at a different bracket level
|
||||
# than `# fmt: off`. This is an invalid use, but as a courtesy,
|
||||
# we include this closing bracket in the ignored nodes.
|
||||
# The alternative is to fail the formatting.
|
||||
yield child
|
||||
return
|
||||
if (
|
||||
child.type == token.INDENT
|
||||
and index < len(container.children) - 1
|
||||
and children_contains_fmt_on(container.children[index + 1])
|
||||
):
|
||||
# This means `# fmt: on` is placed right after an indentation
|
||||
# level, and we shouldn't swallow the previous INDENT token.
|
||||
return
|
||||
if children_contains_fmt_on(child):
|
||||
return
|
||||
yield child
|
||||
else:
|
||||
if container.type == token.DEDENT and container.next_sibling is None:
|
||||
# This can happen when there is no matching `# fmt: on` comment at the
|
||||
# same level as `# fmt: on`. We need to keep this DEDENT.
|
||||
return
|
||||
yield container
|
||||
container = container.next_sibling
|
||||
|
||||
|
||||
def _generate_ignored_nodes_from_fmt_skip(
|
||||
leaf: Leaf, comment: ProtoComment
|
||||
) -> Iterator[LN]:
|
||||
"""Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`."""
|
||||
prev_sibling = leaf.prev_sibling
|
||||
parent = leaf.parent
|
||||
# Need to properly format the leaf prefix to compare it to comment.value,
|
||||
# which is also formatted
|
||||
comments = list_comments(leaf.prefix, is_endmarker=False)
|
||||
if not comments or comment.value != comments[0].value:
|
||||
return
|
||||
if prev_sibling is not None:
|
||||
leaf.prefix = ""
|
||||
siblings = [prev_sibling]
|
||||
while "\n" not in prev_sibling.prefix and prev_sibling.prev_sibling is not None:
|
||||
prev_sibling = prev_sibling.prev_sibling
|
||||
siblings.insert(0, prev_sibling)
|
||||
yield from siblings
|
||||
elif (
|
||||
parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE
|
||||
):
|
||||
# The `# fmt: skip` is on the colon line of the if/while/def/class/...
|
||||
# statements. The ignored nodes should be previous siblings of the
|
||||
# parent suite node.
|
||||
leaf.prefix = ""
|
||||
ignored_nodes: List[LN] = []
|
||||
parent_sibling = parent.prev_sibling
|
||||
while parent_sibling is not None and parent_sibling.type != syms.suite:
|
||||
ignored_nodes.insert(0, parent_sibling)
|
||||
parent_sibling = parent_sibling.prev_sibling
|
||||
# Special case for `async_stmt` where the ASYNC token is on the
|
||||
# grandparent node.
|
||||
grandparent = parent.parent
|
||||
if (
|
||||
grandparent is not None
|
||||
and grandparent.prev_sibling is not None
|
||||
and grandparent.prev_sibling.type == token.ASYNC
|
||||
):
|
||||
ignored_nodes.insert(0, grandparent.prev_sibling)
|
||||
yield from iter(ignored_nodes)
|
||||
|
||||
|
||||
def is_fmt_on(container: LN) -> bool:
|
||||
"""Determine whether formatting is switched on within a container.
|
||||
Determined by whether the last `# fmt:` comment is `on` or `off`.
|
||||
"""
|
||||
fmt_on = False
|
||||
for comment in list_comments(container.prefix, is_endmarker=False):
|
||||
if comment.value in FMT_ON:
|
||||
fmt_on = True
|
||||
elif comment.value in FMT_OFF:
|
||||
fmt_on = False
|
||||
return fmt_on
|
||||
|
||||
|
||||
def children_contains_fmt_on(container: LN) -> bool:
|
||||
"""Determine if children have formatting switched on."""
|
||||
for child in container.children:
|
||||
leaf = first_leaf_of(child)
|
||||
if leaf is not None and is_fmt_on(leaf):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def contains_pragma_comment(comment_list: List[Leaf]) -> bool:
|
||||
"""
|
||||
Returns:
|
||||
True iff one of the comments in @comment_list is a pragma used by one
|
||||
of the more common static analysis tools for python (e.g. mypy, flake8,
|
||||
pylint).
|
||||
"""
|
||||
for comment in comment_list:
|
||||
if comment.value.startswith(("# type:", "# noqa", "# pylint:")):
|
||||
return True
|
||||
|
||||
return False
|
||||
190
venv/lib/python3.12/site-packages/black/concurrency.py
Normal file
190
venv/lib/python3.12/site-packages/black/concurrency.py
Normal file
@@ -0,0 +1,190 @@
|
||||
"""
|
||||
Formatting many files at once via multiprocessing. Contains entrypoint and utilities.
|
||||
|
||||
NOTE: this module is only imported if we need to format several files at once.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
|
||||
from multiprocessing import Manager
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterable, Optional, Set
|
||||
|
||||
from mypy_extensions import mypyc_attr
|
||||
|
||||
from black import WriteBack, format_file_in_place
|
||||
from black.cache import Cache
|
||||
from black.mode import Mode
|
||||
from black.output import err
|
||||
from black.report import Changed, Report
|
||||
|
||||
|
||||
def maybe_install_uvloop() -> None:
|
||||
"""If our environment has uvloop installed we use it.
|
||||
|
||||
This is called only from command-line entry points to avoid
|
||||
interfering with the parent process if Black is used as a library.
|
||||
"""
|
||||
try:
|
||||
import uvloop
|
||||
|
||||
uvloop.install()
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None:
|
||||
"""asyncio signal handler that cancels all `tasks` and reports to stderr."""
|
||||
err("Aborted!")
|
||||
for task in tasks:
|
||||
task.cancel()
|
||||
|
||||
|
||||
def shutdown(loop: asyncio.AbstractEventLoop) -> None:
|
||||
"""Cancel all pending tasks on `loop`, wait for them, and close the loop."""
|
||||
try:
|
||||
# This part is borrowed from asyncio/runners.py in Python 3.7b2.
|
||||
to_cancel = [task for task in asyncio.all_tasks(loop) if not task.done()]
|
||||
if not to_cancel:
|
||||
return
|
||||
|
||||
for task in to_cancel:
|
||||
task.cancel()
|
||||
loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
|
||||
finally:
|
||||
# `concurrent.futures.Future` objects cannot be cancelled once they
|
||||
# are already running. There might be some when the `shutdown()` happened.
|
||||
# Silence their logger's spew about the event loop being closed.
|
||||
cf_logger = logging.getLogger("concurrent.futures")
|
||||
cf_logger.setLevel(logging.CRITICAL)
|
||||
loop.close()
|
||||
|
||||
|
||||
# diff-shades depends on being to monkeypatch this function to operate. I know it's
|
||||
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
|
||||
@mypyc_attr(patchable=True)
|
||||
def reformat_many(
|
||||
sources: Set[Path],
|
||||
fast: bool,
|
||||
write_back: WriteBack,
|
||||
mode: Mode,
|
||||
report: Report,
|
||||
workers: Optional[int],
|
||||
) -> None:
|
||||
"""Reformat multiple files using a ProcessPoolExecutor."""
|
||||
maybe_install_uvloop()
|
||||
|
||||
executor: Executor
|
||||
if workers is None:
|
||||
workers = int(os.environ.get("BLACK_NUM_WORKERS", 0))
|
||||
workers = workers or os.cpu_count() or 1
|
||||
if sys.platform == "win32":
|
||||
# Work around https://bugs.python.org/issue26903
|
||||
workers = min(workers, 60)
|
||||
try:
|
||||
executor = ProcessPoolExecutor(max_workers=workers)
|
||||
except (ImportError, NotImplementedError, OSError):
|
||||
# we arrive here if the underlying system does not support multi-processing
|
||||
# like in AWS Lambda or Termux, in which case we gracefully fallback to
|
||||
# a ThreadPoolExecutor with just a single worker (more workers would not do us
|
||||
# any good due to the Global Interpreter Lock)
|
||||
executor = ThreadPoolExecutor(max_workers=1)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
loop.run_until_complete(
|
||||
schedule_formatting(
|
||||
sources=sources,
|
||||
fast=fast,
|
||||
write_back=write_back,
|
||||
mode=mode,
|
||||
report=report,
|
||||
loop=loop,
|
||||
executor=executor,
|
||||
)
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
shutdown(loop)
|
||||
finally:
|
||||
asyncio.set_event_loop(None)
|
||||
if executor is not None:
|
||||
executor.shutdown()
|
||||
|
||||
|
||||
async def schedule_formatting(
|
||||
sources: Set[Path],
|
||||
fast: bool,
|
||||
write_back: WriteBack,
|
||||
mode: Mode,
|
||||
report: "Report",
|
||||
loop: asyncio.AbstractEventLoop,
|
||||
executor: "Executor",
|
||||
) -> None:
|
||||
"""Run formatting of `sources` in parallel using the provided `executor`.
|
||||
|
||||
(Use ProcessPoolExecutors for actual parallelism.)
|
||||
|
||||
`write_back`, `fast`, and `mode` options are passed to
|
||||
:func:`format_file_in_place`.
|
||||
"""
|
||||
cache = Cache.read(mode)
|
||||
if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
|
||||
sources, cached = cache.filtered_cached(sources)
|
||||
for src in sorted(cached):
|
||||
report.done(src, Changed.CACHED)
|
||||
if not sources:
|
||||
return
|
||||
|
||||
cancelled = []
|
||||
sources_to_cache = []
|
||||
lock = None
|
||||
if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
|
||||
# For diff output, we need locks to ensure we don't interleave output
|
||||
# from different processes.
|
||||
manager = Manager()
|
||||
lock = manager.Lock()
|
||||
tasks = {
|
||||
asyncio.ensure_future(
|
||||
loop.run_in_executor(
|
||||
executor, format_file_in_place, src, fast, mode, write_back, lock
|
||||
)
|
||||
): src
|
||||
for src in sorted(sources)
|
||||
}
|
||||
pending = tasks.keys()
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, cancel, pending)
|
||||
loop.add_signal_handler(signal.SIGTERM, cancel, pending)
|
||||
except NotImplementedError:
|
||||
# There are no good alternatives for these on Windows.
|
||||
pass
|
||||
while pending:
|
||||
done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
|
||||
for task in done:
|
||||
src = tasks.pop(task)
|
||||
if task.cancelled():
|
||||
cancelled.append(task)
|
||||
elif exc := task.exception():
|
||||
if report.verbose:
|
||||
traceback.print_exception(type(exc), exc, exc.__traceback__)
|
||||
report.failed(src, str(exc))
|
||||
else:
|
||||
changed = Changed.YES if task.result() else Changed.NO
|
||||
# If the file was written back or was successfully checked as
|
||||
# well-formatted, store this information in the cache.
|
||||
if write_back is WriteBack.YES or (
|
||||
write_back is WriteBack.CHECK and changed is Changed.NO
|
||||
):
|
||||
sources_to_cache.append(src)
|
||||
report.done(src, changed)
|
||||
if cancelled:
|
||||
await asyncio.gather(*cancelled, return_exceptions=True)
|
||||
if sources_to_cache:
|
||||
cache.write(sources_to_cache)
|
||||
4
venv/lib/python3.12/site-packages/black/const.py
Normal file
4
venv/lib/python3.12/site-packages/black/const.py
Normal file
@@ -0,0 +1,4 @@
|
||||
DEFAULT_LINE_LENGTH = 88
|
||||
DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.ipynb_checkpoints|\.mypy_cache|\.nox|\.pytest_cache|\.ruff_cache|\.tox|\.svn|\.venv|\.vscode|__pypackages__|_build|buck-out|build|dist|venv)/" # noqa: B950
|
||||
DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$"
|
||||
STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__"
|
||||
54
venv/lib/python3.12/site-packages/black/debug.py
Normal file
54
venv/lib/python3.12/site-packages/black/debug.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Iterator, List, TypeVar, Union
|
||||
|
||||
from black.nodes import Visitor
|
||||
from black.output import out
|
||||
from black.parsing import lib2to3_parse
|
||||
from blib2to3.pgen2 import token
|
||||
from blib2to3.pytree import Leaf, Node, type_repr
|
||||
|
||||
LN = Union[Leaf, Node]
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@dataclass
|
||||
class DebugVisitor(Visitor[T]):
|
||||
tree_depth: int = 0
|
||||
list_output: List[str] = field(default_factory=list)
|
||||
print_output: bool = True
|
||||
|
||||
def out(self, message: str, *args: Any, **kwargs: Any) -> None:
|
||||
self.list_output.append(message)
|
||||
if self.print_output:
|
||||
out(message, *args, **kwargs)
|
||||
|
||||
def visit_default(self, node: LN) -> Iterator[T]:
|
||||
indent = " " * (2 * self.tree_depth)
|
||||
if isinstance(node, Node):
|
||||
_type = type_repr(node.type)
|
||||
self.out(f"{indent}{_type}", fg="yellow")
|
||||
self.tree_depth += 1
|
||||
for child in node.children:
|
||||
yield from self.visit(child)
|
||||
|
||||
self.tree_depth -= 1
|
||||
self.out(f"{indent}/{_type}", fg="yellow", bold=False)
|
||||
else:
|
||||
_type = token.tok_name.get(node.type, str(node.type))
|
||||
self.out(f"{indent}{_type}", fg="blue", nl=False)
|
||||
if node.prefix:
|
||||
# We don't have to handle prefixes for `Node` objects since
|
||||
# that delegates to the first child anyway.
|
||||
self.out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
|
||||
self.out(f" {node.value!r}", fg="blue", bold=False)
|
||||
|
||||
@classmethod
|
||||
def show(cls, code: Union[str, Leaf, Node]) -> None:
|
||||
"""Pretty-print the lib2to3 AST of a given string of `code`.
|
||||
|
||||
Convenience method for debugging.
|
||||
"""
|
||||
v: DebugVisitor[None] = DebugVisitor()
|
||||
if isinstance(code, str):
|
||||
code = lib2to3_parse(code)
|
||||
list(v.visit(code))
|
||||
414
venv/lib/python3.12/site-packages/black/files.py
Normal file
414
venv/lib/python3.12/site-packages/black/files.py
Normal file
@@ -0,0 +1,414 @@
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
from mypy_extensions import mypyc_attr
|
||||
from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet
|
||||
from packaging.version import InvalidVersion, Version
|
||||
from pathspec import PathSpec
|
||||
from pathspec.patterns.gitwildmatch import GitWildMatchPatternError
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
try:
|
||||
import tomllib
|
||||
except ImportError:
|
||||
# Help users on older alphas
|
||||
if not TYPE_CHECKING:
|
||||
import tomli as tomllib
|
||||
else:
|
||||
import tomli as tomllib
|
||||
|
||||
from black.handle_ipynb_magics import jupyter_dependencies_are_installed
|
||||
from black.mode import TargetVersion
|
||||
from black.output import err
|
||||
from black.report import Report
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import colorama # noqa: F401
|
||||
|
||||
|
||||
@lru_cache
|
||||
def find_project_root(
|
||||
srcs: Sequence[str], stdin_filename: Optional[str] = None
|
||||
) -> Tuple[Path, str]:
|
||||
"""Return a directory containing .git, .hg, or pyproject.toml.
|
||||
|
||||
That directory will be a common parent of all files and directories
|
||||
passed in `srcs`.
|
||||
|
||||
If no directory in the tree contains a marker that would specify it's the
|
||||
project root, the root of the file system is returned.
|
||||
|
||||
Returns a two-tuple with the first element as the project root path and
|
||||
the second element as a string describing the method by which the
|
||||
project root was discovered.
|
||||
"""
|
||||
if stdin_filename is not None:
|
||||
srcs = tuple(stdin_filename if s == "-" else s for s in srcs)
|
||||
if not srcs:
|
||||
srcs = [str(Path.cwd().resolve())]
|
||||
|
||||
path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
|
||||
|
||||
# A list of lists of parents for each 'src'. 'src' is included as a
|
||||
# "parent" of itself if it is a directory
|
||||
src_parents = [
|
||||
list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
|
||||
]
|
||||
|
||||
common_base = max(
|
||||
set.intersection(*(set(parents) for parents in src_parents)),
|
||||
key=lambda path: path.parts,
|
||||
)
|
||||
|
||||
for directory in (common_base, *common_base.parents):
|
||||
if (directory / ".git").exists():
|
||||
return directory, ".git directory"
|
||||
|
||||
if (directory / ".hg").is_dir():
|
||||
return directory, ".hg directory"
|
||||
|
||||
if (directory / "pyproject.toml").is_file():
|
||||
return directory, "pyproject.toml"
|
||||
|
||||
return directory, "file system root"
|
||||
|
||||
|
||||
def find_pyproject_toml(
|
||||
path_search_start: Tuple[str, ...], stdin_filename: Optional[str] = None
|
||||
) -> Optional[str]:
|
||||
"""Find the absolute filepath to a pyproject.toml if it exists"""
|
||||
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
|
||||
path_pyproject_toml = path_project_root / "pyproject.toml"
|
||||
if path_pyproject_toml.is_file():
|
||||
return str(path_pyproject_toml)
|
||||
|
||||
try:
|
||||
path_user_pyproject_toml = find_user_pyproject_toml()
|
||||
return (
|
||||
str(path_user_pyproject_toml)
|
||||
if path_user_pyproject_toml.is_file()
|
||||
else None
|
||||
)
|
||||
except (PermissionError, RuntimeError) as e:
|
||||
# We do not have access to the user-level config directory, so ignore it.
|
||||
err(f"Ignoring user configuration directory due to {e!r}")
|
||||
return None
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
|
||||
"""Parse a pyproject toml file, pulling out relevant parts for Black.
|
||||
|
||||
If parsing fails, will raise a tomllib.TOMLDecodeError.
|
||||
"""
|
||||
with open(path_config, "rb") as f:
|
||||
pyproject_toml = tomllib.load(f)
|
||||
config: Dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
||||
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
|
||||
|
||||
if "target_version" not in config:
|
||||
inferred_target_version = infer_target_version(pyproject_toml)
|
||||
if inferred_target_version is not None:
|
||||
config["target_version"] = [v.name.lower() for v in inferred_target_version]
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def infer_target_version(
|
||||
pyproject_toml: Dict[str, Any]
|
||||
) -> Optional[List[TargetVersion]]:
|
||||
"""Infer Black's target version from the project metadata in pyproject.toml.
|
||||
|
||||
Supports the PyPA standard format (PEP 621):
|
||||
https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#requires-python
|
||||
|
||||
If the target version cannot be inferred, returns None.
|
||||
"""
|
||||
project_metadata = pyproject_toml.get("project", {})
|
||||
requires_python = project_metadata.get("requires-python", None)
|
||||
if requires_python is not None:
|
||||
try:
|
||||
return parse_req_python_version(requires_python)
|
||||
except InvalidVersion:
|
||||
pass
|
||||
try:
|
||||
return parse_req_python_specifier(requires_python)
|
||||
except (InvalidSpecifier, InvalidVersion):
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersion]]:
|
||||
"""Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion.
|
||||
|
||||
If parsing fails, will raise a packaging.version.InvalidVersion error.
|
||||
If the parsed version cannot be mapped to a valid TargetVersion, returns None.
|
||||
"""
|
||||
version = Version(requires_python)
|
||||
if version.release[0] != 3:
|
||||
return None
|
||||
try:
|
||||
return [TargetVersion(version.release[1])]
|
||||
except (IndexError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVersion]]:
|
||||
"""Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion.
|
||||
|
||||
If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.
|
||||
If the parsed specifier cannot be mapped to a valid TargetVersion, returns None.
|
||||
"""
|
||||
specifier_set = strip_specifier_set(SpecifierSet(requires_python))
|
||||
if not specifier_set:
|
||||
return None
|
||||
|
||||
target_version_map = {f"3.{v.value}": v for v in TargetVersion}
|
||||
compatible_versions: List[str] = list(specifier_set.filter(target_version_map))
|
||||
if compatible_versions:
|
||||
return [target_version_map[v] for v in compatible_versions]
|
||||
return None
|
||||
|
||||
|
||||
def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet:
|
||||
"""Strip minor versions for some specifiers in the specifier set.
|
||||
|
||||
For background on version specifiers, see PEP 440:
|
||||
https://peps.python.org/pep-0440/#version-specifiers
|
||||
"""
|
||||
specifiers = []
|
||||
for s in specifier_set:
|
||||
if "*" in str(s):
|
||||
specifiers.append(s)
|
||||
elif s.operator in ["~=", "==", ">=", "==="]:
|
||||
version = Version(s.version)
|
||||
stripped = Specifier(f"{s.operator}{version.major}.{version.minor}")
|
||||
specifiers.append(stripped)
|
||||
elif s.operator == ">":
|
||||
version = Version(s.version)
|
||||
if len(version.release) > 2:
|
||||
s = Specifier(f">={version.major}.{version.minor}")
|
||||
specifiers.append(s)
|
||||
else:
|
||||
specifiers.append(s)
|
||||
|
||||
return SpecifierSet(",".join(str(s) for s in specifiers))
|
||||
|
||||
|
||||
@lru_cache
|
||||
def find_user_pyproject_toml() -> Path:
|
||||
r"""Return the path to the top-level user configuration for black.
|
||||
|
||||
This looks for ~\.black on Windows and ~/.config/black on Linux and other
|
||||
Unix systems.
|
||||
|
||||
May raise:
|
||||
- RuntimeError: if the current user has no homedir
|
||||
- PermissionError: if the current process cannot access the user's homedir
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
# Windows
|
||||
user_config_path = Path.home() / ".black"
|
||||
else:
|
||||
config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config")
|
||||
user_config_path = Path(config_root).expanduser() / "black"
|
||||
return user_config_path.resolve()
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_gitignore(root: Path) -> PathSpec:
|
||||
"""Return a PathSpec matching gitignore content if present."""
|
||||
gitignore = root / ".gitignore"
|
||||
lines: List[str] = []
|
||||
if gitignore.is_file():
|
||||
with gitignore.open(encoding="utf-8") as gf:
|
||||
lines = gf.readlines()
|
||||
try:
|
||||
return PathSpec.from_lines("gitwildmatch", lines)
|
||||
except GitWildMatchPatternError as e:
|
||||
err(f"Could not parse {gitignore}: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def normalize_path_maybe_ignore(
|
||||
path: Path,
|
||||
root: Path,
|
||||
report: Optional[Report] = None,
|
||||
) -> Optional[str]:
|
||||
"""Normalize `path`. May return `None` if `path` was ignored.
|
||||
|
||||
`report` is where "path ignored" output goes.
|
||||
"""
|
||||
try:
|
||||
abspath = path if path.is_absolute() else Path.cwd() / path
|
||||
normalized_path = abspath.resolve()
|
||||
try:
|
||||
root_relative_path = normalized_path.relative_to(root).as_posix()
|
||||
except ValueError:
|
||||
if report:
|
||||
report.path_ignored(
|
||||
path, f"is a symbolic link that points outside {root}"
|
||||
)
|
||||
return None
|
||||
|
||||
except OSError as e:
|
||||
if report:
|
||||
report.path_ignored(path, f"cannot be read because {e}")
|
||||
return None
|
||||
|
||||
return root_relative_path
|
||||
|
||||
|
||||
def _path_is_ignored(
|
||||
root_relative_path: str,
|
||||
root: Path,
|
||||
gitignore_dict: Dict[Path, PathSpec],
|
||||
report: Report,
|
||||
) -> bool:
|
||||
path = root / root_relative_path
|
||||
# Note that this logic is sensitive to the ordering of gitignore_dict. Callers must
|
||||
# ensure that gitignore_dict is ordered from least specific to most specific.
|
||||
for gitignore_path, pattern in gitignore_dict.items():
|
||||
try:
|
||||
relative_path = path.relative_to(gitignore_path).as_posix()
|
||||
except ValueError:
|
||||
break
|
||||
if pattern.match_file(relative_path):
|
||||
report.path_ignored(
|
||||
path.relative_to(root), "matches a .gitignore file content"
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def path_is_excluded(
|
||||
normalized_path: str,
|
||||
pattern: Optional[Pattern[str]],
|
||||
) -> bool:
|
||||
match = pattern.search(normalized_path) if pattern else None
|
||||
return bool(match and match.group(0))
|
||||
|
||||
|
||||
def gen_python_files(
|
||||
paths: Iterable[Path],
|
||||
root: Path,
|
||||
include: Pattern[str],
|
||||
exclude: Pattern[str],
|
||||
extend_exclude: Optional[Pattern[str]],
|
||||
force_exclude: Optional[Pattern[str]],
|
||||
report: Report,
|
||||
gitignore_dict: Optional[Dict[Path, PathSpec]],
|
||||
*,
|
||||
verbose: bool,
|
||||
quiet: bool,
|
||||
) -> Iterator[Path]:
|
||||
"""Generate all files under `path` whose paths are not excluded by the
|
||||
`exclude_regex`, `extend_exclude`, or `force_exclude` regexes,
|
||||
but are included by the `include` regex.
|
||||
|
||||
Symbolic links pointing outside of the `root` directory are ignored.
|
||||
|
||||
`report` is where output about exclusions goes.
|
||||
"""
|
||||
|
||||
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
|
||||
for child in paths:
|
||||
root_relative_path = child.absolute().relative_to(root).as_posix()
|
||||
|
||||
# First ignore files matching .gitignore, if passed
|
||||
if gitignore_dict and _path_is_ignored(
|
||||
root_relative_path, root, gitignore_dict, report
|
||||
):
|
||||
continue
|
||||
|
||||
# Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options.
|
||||
root_relative_path = "/" + root_relative_path
|
||||
if child.is_dir():
|
||||
root_relative_path += "/"
|
||||
|
||||
if path_is_excluded(root_relative_path, exclude):
|
||||
report.path_ignored(child, "matches the --exclude regular expression")
|
||||
continue
|
||||
|
||||
if path_is_excluded(root_relative_path, extend_exclude):
|
||||
report.path_ignored(
|
||||
child, "matches the --extend-exclude regular expression"
|
||||
)
|
||||
continue
|
||||
|
||||
if path_is_excluded(root_relative_path, force_exclude):
|
||||
report.path_ignored(child, "matches the --force-exclude regular expression")
|
||||
continue
|
||||
|
||||
normalized_path = normalize_path_maybe_ignore(child, root, report)
|
||||
if normalized_path is None:
|
||||
continue
|
||||
|
||||
if child.is_dir():
|
||||
# If gitignore is None, gitignore usage is disabled, while a Falsey
|
||||
# gitignore is when the directory doesn't have a .gitignore file.
|
||||
if gitignore_dict is not None:
|
||||
new_gitignore_dict = {
|
||||
**gitignore_dict,
|
||||
root / child: get_gitignore(child),
|
||||
}
|
||||
else:
|
||||
new_gitignore_dict = None
|
||||
yield from gen_python_files(
|
||||
child.iterdir(),
|
||||
root,
|
||||
include,
|
||||
exclude,
|
||||
extend_exclude,
|
||||
force_exclude,
|
||||
report,
|
||||
new_gitignore_dict,
|
||||
verbose=verbose,
|
||||
quiet=quiet,
|
||||
)
|
||||
|
||||
elif child.is_file():
|
||||
if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed(
|
||||
warn=verbose or not quiet
|
||||
):
|
||||
continue
|
||||
include_match = include.search(normalized_path) if include else True
|
||||
if include_match:
|
||||
yield child
|
||||
|
||||
|
||||
def wrap_stream_for_windows(
|
||||
f: io.TextIOWrapper,
|
||||
) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]:
|
||||
"""
|
||||
Wrap stream with colorama's wrap_stream so colors are shown on Windows.
|
||||
|
||||
If `colorama` is unavailable, the original stream is returned unmodified.
|
||||
Otherwise, the `wrap_stream()` function determines whether the stream needs
|
||||
to be wrapped for a Windows environment and will accordingly either return
|
||||
an `AnsiToWin32` wrapper or the original stream.
|
||||
"""
|
||||
try:
|
||||
from colorama.initialise import wrap_stream
|
||||
except ImportError:
|
||||
return f
|
||||
else:
|
||||
# Set `strip=False` to avoid needing to modify test_express_diff_with_color.
|
||||
return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True)
|
||||
458
venv/lib/python3.12/site-packages/black/handle_ipynb_magics.py
Normal file
458
venv/lib/python3.12/site-packages/black/handle_ipynb_magics.py
Normal file
@@ -0,0 +1,458 @@
|
||||
"""Functions to process IPython magics with."""
|
||||
|
||||
import ast
|
||||
import collections
|
||||
import dataclasses
|
||||
import secrets
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from importlib.util import find_spec
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeGuard
|
||||
else:
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
from black.output import out
|
||||
from black.report import NothingChanged
|
||||
|
||||
TRANSFORMED_MAGICS = frozenset(
|
||||
(
|
||||
"get_ipython().run_cell_magic",
|
||||
"get_ipython().system",
|
||||
"get_ipython().getoutput",
|
||||
"get_ipython().run_line_magic",
|
||||
)
|
||||
)
|
||||
TOKENS_TO_IGNORE = frozenset(
|
||||
(
|
||||
"ENDMARKER",
|
||||
"NL",
|
||||
"NEWLINE",
|
||||
"COMMENT",
|
||||
"DEDENT",
|
||||
"UNIMPORTANT_WS",
|
||||
"ESCAPED_NL",
|
||||
)
|
||||
)
|
||||
PYTHON_CELL_MAGICS = frozenset(
|
||||
(
|
||||
"capture",
|
||||
"prun",
|
||||
"pypy",
|
||||
"python",
|
||||
"python3",
|
||||
"time",
|
||||
"timeit",
|
||||
)
|
||||
)
|
||||
TOKEN_HEX = secrets.token_hex
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class Replacement:
|
||||
mask: str
|
||||
src: str
|
||||
|
||||
|
||||
@lru_cache
|
||||
def jupyter_dependencies_are_installed(*, warn: bool) -> bool:
|
||||
installed = (
|
||||
find_spec("tokenize_rt") is not None and find_spec("IPython") is not None
|
||||
)
|
||||
if not installed and warn:
|
||||
msg = (
|
||||
"Skipping .ipynb files as Jupyter dependencies are not installed.\n"
|
||||
'You can fix this by running ``pip install "black[jupyter]"``'
|
||||
)
|
||||
out(msg)
|
||||
return installed
|
||||
|
||||
|
||||
def remove_trailing_semicolon(src: str) -> Tuple[str, bool]:
|
||||
"""Remove trailing semicolon from Jupyter notebook cell.
|
||||
|
||||
For example,
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
ax.plot(x_data, y_data); # plot data
|
||||
|
||||
would become
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
ax.plot(x_data, y_data) # plot data
|
||||
|
||||
Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
|
||||
``tokenize_rt`` so that round-tripping works fine.
|
||||
"""
|
||||
from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src
|
||||
|
||||
tokens = src_to_tokens(src)
|
||||
trailing_semicolon = False
|
||||
for idx, token in reversed_enumerate(tokens):
|
||||
if token.name in TOKENS_TO_IGNORE:
|
||||
continue
|
||||
if token.name == "OP" and token.src == ";":
|
||||
del tokens[idx]
|
||||
trailing_semicolon = True
|
||||
break
|
||||
if not trailing_semicolon:
|
||||
return src, False
|
||||
return tokens_to_src(tokens), True
|
||||
|
||||
|
||||
def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
|
||||
"""Put trailing semicolon back if cell originally had it.
|
||||
|
||||
Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
|
||||
``tokenize_rt`` so that round-tripping works fine.
|
||||
"""
|
||||
if not has_trailing_semicolon:
|
||||
return src
|
||||
from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src
|
||||
|
||||
tokens = src_to_tokens(src)
|
||||
for idx, token in reversed_enumerate(tokens):
|
||||
if token.name in TOKENS_TO_IGNORE:
|
||||
continue
|
||||
tokens[idx] = token._replace(src=token.src + ";")
|
||||
break
|
||||
else: # pragma: nocover
|
||||
raise AssertionError(
|
||||
"INTERNAL ERROR: Was not able to reinstate trailing semicolon. "
|
||||
"Please report a bug on https://github.com/psf/black/issues. "
|
||||
) from None
|
||||
return str(tokens_to_src(tokens))
|
||||
|
||||
|
||||
def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
||||
"""Mask IPython magics so content becomes parseable Python code.
|
||||
|
||||
For example,
|
||||
|
||||
%matplotlib inline
|
||||
'foo'
|
||||
|
||||
becomes
|
||||
|
||||
"25716f358c32750e"
|
||||
'foo'
|
||||
|
||||
The replacements are returned, along with the transformed code.
|
||||
"""
|
||||
replacements: List[Replacement] = []
|
||||
try:
|
||||
ast.parse(src)
|
||||
except SyntaxError:
|
||||
# Might have IPython magics, will process below.
|
||||
pass
|
||||
else:
|
||||
# Syntax is fine, nothing to mask, early return.
|
||||
return src, replacements
|
||||
|
||||
from IPython.core.inputtransformer2 import TransformerManager
|
||||
|
||||
transformer_manager = TransformerManager()
|
||||
transformed = transformer_manager.transform_cell(src)
|
||||
transformed, cell_magic_replacements = replace_cell_magics(transformed)
|
||||
replacements += cell_magic_replacements
|
||||
transformed = transformer_manager.transform_cell(transformed)
|
||||
transformed, magic_replacements = replace_magics(transformed)
|
||||
if len(transformed.splitlines()) != len(src.splitlines()):
|
||||
# Multi-line magic, not supported.
|
||||
raise NothingChanged
|
||||
replacements += magic_replacements
|
||||
return transformed, replacements
|
||||
|
||||
|
||||
def get_token(src: str, magic: str) -> str:
|
||||
"""Return randomly generated token to mask IPython magic with.
|
||||
|
||||
For example, if 'magic' was `%matplotlib inline`, then a possible
|
||||
token to mask it with would be `"43fdd17f7e5ddc83"`. The token
|
||||
will be the same length as the magic, and we make sure that it was
|
||||
not already present anywhere else in the cell.
|
||||
"""
|
||||
assert magic
|
||||
nbytes = max(len(magic) // 2 - 1, 1)
|
||||
token = TOKEN_HEX(nbytes)
|
||||
counter = 0
|
||||
while token in src:
|
||||
token = TOKEN_HEX(nbytes)
|
||||
counter += 1
|
||||
if counter > 100:
|
||||
raise AssertionError(
|
||||
"INTERNAL ERROR: Black was not able to replace IPython magic. "
|
||||
"Please report a bug on https://github.com/psf/black/issues. "
|
||||
f"The magic might be helpful: {magic}"
|
||||
) from None
|
||||
if len(token) + 2 < len(magic):
|
||||
token = f"{token}."
|
||||
return f'"{token}"'
|
||||
|
||||
|
||||
def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
"""Replace cell magic with token.
|
||||
|
||||
Note that 'src' will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
|
||||
Example,
|
||||
|
||||
get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\\n')
|
||||
|
||||
becomes
|
||||
|
||||
"a794."
|
||||
ls =!ls
|
||||
|
||||
The replacement, along with the transformed code, is returned.
|
||||
"""
|
||||
replacements: List[Replacement] = []
|
||||
|
||||
tree = ast.parse(src)
|
||||
|
||||
cell_magic_finder = CellMagicFinder()
|
||||
cell_magic_finder.visit(tree)
|
||||
if cell_magic_finder.cell_magic is None:
|
||||
return src, replacements
|
||||
header = cell_magic_finder.cell_magic.header
|
||||
mask = get_token(src, header)
|
||||
replacements.append(Replacement(mask=mask, src=header))
|
||||
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
|
||||
|
||||
|
||||
def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
"""Replace magics within body of cell.
|
||||
|
||||
Note that 'src' will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
|
||||
Example, this
|
||||
|
||||
get_ipython().run_line_magic('matplotlib', 'inline')
|
||||
'foo'
|
||||
|
||||
becomes
|
||||
|
||||
"5e67db56d490fd39"
|
||||
'foo'
|
||||
|
||||
The replacement, along with the transformed code, are returned.
|
||||
"""
|
||||
replacements = []
|
||||
magic_finder = MagicFinder()
|
||||
magic_finder.visit(ast.parse(src))
|
||||
new_srcs = []
|
||||
for i, line in enumerate(src.splitlines(), start=1):
|
||||
if i in magic_finder.magics:
|
||||
offsets_and_magics = magic_finder.magics[i]
|
||||
if len(offsets_and_magics) != 1: # pragma: nocover
|
||||
raise AssertionError(
|
||||
f"Expecting one magic per line, got: {offsets_and_magics}\n"
|
||||
"Please report a bug on https://github.com/psf/black/issues."
|
||||
)
|
||||
col_offset, magic = (
|
||||
offsets_and_magics[0].col_offset,
|
||||
offsets_and_magics[0].magic,
|
||||
)
|
||||
mask = get_token(src, magic)
|
||||
replacements.append(Replacement(mask=mask, src=magic))
|
||||
line = line[:col_offset] + mask
|
||||
new_srcs.append(line)
|
||||
return "\n".join(new_srcs), replacements
|
||||
|
||||
|
||||
def unmask_cell(src: str, replacements: List[Replacement]) -> str:
|
||||
"""Remove replacements from cell.
|
||||
|
||||
For example
|
||||
|
||||
"9b20"
|
||||
foo = bar
|
||||
|
||||
becomes
|
||||
|
||||
%%time
|
||||
foo = bar
|
||||
"""
|
||||
for replacement in replacements:
|
||||
src = src.replace(replacement.mask, replacement.src)
|
||||
return src
|
||||
|
||||
|
||||
def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
|
||||
"""Check if attribute is IPython magic.
|
||||
|
||||
Note that the source of the abstract syntax tree
|
||||
will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
"""
|
||||
return (
|
||||
isinstance(node, ast.Attribute)
|
||||
and isinstance(node.value, ast.Call)
|
||||
and isinstance(node.value.func, ast.Name)
|
||||
and node.value.func.id == "get_ipython"
|
||||
)
|
||||
|
||||
|
||||
def _get_str_args(args: List[ast.expr]) -> List[str]:
|
||||
str_args = []
|
||||
for arg in args:
|
||||
assert isinstance(arg, ast.Str)
|
||||
str_args.append(arg.s)
|
||||
return str_args
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class CellMagic:
|
||||
name: str
|
||||
params: Optional[str]
|
||||
body: str
|
||||
|
||||
@property
|
||||
def header(self) -> str:
|
||||
if self.params:
|
||||
return f"%%{self.name} {self.params}"
|
||||
return f"%%{self.name}"
|
||||
|
||||
|
||||
# ast.NodeVisitor + dataclass = breakage under mypyc.
|
||||
class CellMagicFinder(ast.NodeVisitor):
|
||||
"""Find cell magics.
|
||||
|
||||
Note that the source of the abstract syntax tree
|
||||
will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
|
||||
For example,
|
||||
|
||||
%%time\n
|
||||
foo()
|
||||
|
||||
would have been transformed to
|
||||
|
||||
get_ipython().run_cell_magic('time', '', 'foo()\\n')
|
||||
|
||||
and we look for instances of the latter.
|
||||
"""
|
||||
|
||||
def __init__(self, cell_magic: Optional[CellMagic] = None) -> None:
|
||||
self.cell_magic = cell_magic
|
||||
|
||||
def visit_Expr(self, node: ast.Expr) -> None:
|
||||
"""Find cell magic, extract header and body."""
|
||||
if (
|
||||
isinstance(node.value, ast.Call)
|
||||
and _is_ipython_magic(node.value.func)
|
||||
and node.value.func.attr == "run_cell_magic"
|
||||
):
|
||||
args = _get_str_args(node.value.args)
|
||||
self.cell_magic = CellMagic(name=args[0], params=args[1], body=args[2])
|
||||
self.generic_visit(node)
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class OffsetAndMagic:
|
||||
col_offset: int
|
||||
magic: str
|
||||
|
||||
|
||||
# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here
|
||||
# as mypyc will generate broken code.
|
||||
class MagicFinder(ast.NodeVisitor):
|
||||
"""Visit cell to look for get_ipython calls.
|
||||
|
||||
Note that the source of the abstract syntax tree
|
||||
will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
|
||||
For example,
|
||||
|
||||
%matplotlib inline
|
||||
|
||||
would have been transformed to
|
||||
|
||||
get_ipython().run_line_magic('matplotlib', 'inline')
|
||||
|
||||
and we look for instances of the latter (and likewise for other
|
||||
types of magics).
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list)
|
||||
|
||||
def visit_Assign(self, node: ast.Assign) -> None:
|
||||
"""Look for system assign magics.
|
||||
|
||||
For example,
|
||||
|
||||
black_version = !black --version
|
||||
env = %env var
|
||||
|
||||
would have been (respectively) transformed to
|
||||
|
||||
black_version = get_ipython().getoutput('black --version')
|
||||
env = get_ipython().run_line_magic('env', 'var')
|
||||
|
||||
and we look for instances of any of the latter.
|
||||
"""
|
||||
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
|
||||
args = _get_str_args(node.value.args)
|
||||
if node.value.func.attr == "getoutput":
|
||||
src = f"!{args[0]}"
|
||||
elif node.value.func.attr == "run_line_magic":
|
||||
src = f"%{args[0]}"
|
||||
if args[1]:
|
||||
src += f" {args[1]}"
|
||||
else:
|
||||
raise AssertionError(
|
||||
f"Unexpected IPython magic {node.value.func.attr!r} found. "
|
||||
"Please report a bug on https://github.com/psf/black/issues."
|
||||
) from None
|
||||
self.magics[node.value.lineno].append(
|
||||
OffsetAndMagic(node.value.col_offset, src)
|
||||
)
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_Expr(self, node: ast.Expr) -> None:
|
||||
"""Look for magics in body of cell.
|
||||
|
||||
For examples,
|
||||
|
||||
!ls
|
||||
!!ls
|
||||
?ls
|
||||
??ls
|
||||
|
||||
would (respectively) get transformed to
|
||||
|
||||
get_ipython().system('ls')
|
||||
get_ipython().getoutput('ls')
|
||||
get_ipython().run_line_magic('pinfo', 'ls')
|
||||
get_ipython().run_line_magic('pinfo2', 'ls')
|
||||
|
||||
and we look for instances of any of the latter.
|
||||
"""
|
||||
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
|
||||
args = _get_str_args(node.value.args)
|
||||
if node.value.func.attr == "run_line_magic":
|
||||
if args[0] == "pinfo":
|
||||
src = f"?{args[1]}"
|
||||
elif args[0] == "pinfo2":
|
||||
src = f"??{args[1]}"
|
||||
else:
|
||||
src = f"%{args[0]}"
|
||||
if args[1]:
|
||||
src += f" {args[1]}"
|
||||
elif node.value.func.attr == "system":
|
||||
src = f"!{args[0]}"
|
||||
elif node.value.func.attr == "getoutput":
|
||||
src = f"!!{args[0]}"
|
||||
else:
|
||||
raise NothingChanged # unsupported magic.
|
||||
self.magics[node.value.lineno].append(
|
||||
OffsetAndMagic(node.value.col_offset, src)
|
||||
)
|
||||
self.generic_visit(node)
|
||||
1657
venv/lib/python3.12/site-packages/black/linegen.py
Normal file
1657
venv/lib/python3.12/site-packages/black/linegen.py
Normal file
File diff suppressed because it is too large
Load Diff
1104
venv/lib/python3.12/site-packages/black/lines.py
Normal file
1104
venv/lib/python3.12/site-packages/black/lines.py
Normal file
File diff suppressed because it is too large
Load Diff
252
venv/lib/python3.12/site-packages/black/mode.py
Normal file
252
venv/lib/python3.12/site-packages/black/mode.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""Data structures configuring Black behavior.
|
||||
|
||||
Mostly around Python language feature support per version and Black configuration
|
||||
chosen by the user.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum, auto
|
||||
from hashlib import sha256
|
||||
from operator import attrgetter
|
||||
from typing import Dict, Final, Set
|
||||
from warnings import warn
|
||||
|
||||
from black.const import DEFAULT_LINE_LENGTH
|
||||
|
||||
|
||||
class TargetVersion(Enum):
|
||||
PY33 = 3
|
||||
PY34 = 4
|
||||
PY35 = 5
|
||||
PY36 = 6
|
||||
PY37 = 7
|
||||
PY38 = 8
|
||||
PY39 = 9
|
||||
PY310 = 10
|
||||
PY311 = 11
|
||||
PY312 = 12
|
||||
|
||||
|
||||
class Feature(Enum):
|
||||
F_STRINGS = 2
|
||||
NUMERIC_UNDERSCORES = 3
|
||||
TRAILING_COMMA_IN_CALL = 4
|
||||
TRAILING_COMMA_IN_DEF = 5
|
||||
# The following two feature-flags are mutually exclusive, and exactly one should be
|
||||
# set for every version of python.
|
||||
ASYNC_IDENTIFIERS = 6
|
||||
ASYNC_KEYWORDS = 7
|
||||
ASSIGNMENT_EXPRESSIONS = 8
|
||||
POS_ONLY_ARGUMENTS = 9
|
||||
RELAXED_DECORATORS = 10
|
||||
PATTERN_MATCHING = 11
|
||||
UNPACKING_ON_FLOW = 12
|
||||
ANN_ASSIGN_EXTENDED_RHS = 13
|
||||
EXCEPT_STAR = 14
|
||||
VARIADIC_GENERICS = 15
|
||||
DEBUG_F_STRINGS = 16
|
||||
PARENTHESIZED_CONTEXT_MANAGERS = 17
|
||||
TYPE_PARAMS = 18
|
||||
FORCE_OPTIONAL_PARENTHESES = 50
|
||||
|
||||
# __future__ flags
|
||||
FUTURE_ANNOTATIONS = 51
|
||||
|
||||
|
||||
FUTURE_FLAG_TO_FEATURE: Final = {
|
||||
"annotations": Feature.FUTURE_ANNOTATIONS,
|
||||
}
|
||||
|
||||
|
||||
VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
|
||||
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY36: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_IDENTIFIERS,
|
||||
},
|
||||
TargetVersion.PY37: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
},
|
||||
TargetVersion.PY38: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
},
|
||||
TargetVersion.PY39: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.RELAXED_DECORATORS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
|
||||
},
|
||||
TargetVersion.PY310: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.RELAXED_DECORATORS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
|
||||
Feature.PATTERN_MATCHING,
|
||||
},
|
||||
TargetVersion.PY311: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.RELAXED_DECORATORS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
|
||||
Feature.PATTERN_MATCHING,
|
||||
Feature.EXCEPT_STAR,
|
||||
Feature.VARIADIC_GENERICS,
|
||||
},
|
||||
TargetVersion.PY312: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.RELAXED_DECORATORS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
|
||||
Feature.PATTERN_MATCHING,
|
||||
Feature.EXCEPT_STAR,
|
||||
Feature.VARIADIC_GENERICS,
|
||||
Feature.TYPE_PARAMS,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
|
||||
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
|
||||
|
||||
|
||||
class Preview(Enum):
|
||||
"""Individual preview style features."""
|
||||
|
||||
add_trailing_comma_consistently = auto()
|
||||
blank_line_after_nested_stub_class = auto()
|
||||
blank_line_between_nested_and_def_stub_file = auto()
|
||||
hex_codes_in_unicode_sequences = auto()
|
||||
improved_async_statements_handling = auto()
|
||||
multiline_string_handling = auto()
|
||||
no_blank_line_before_class_docstring = auto()
|
||||
prefer_splitting_right_hand_side_of_assignments = auto()
|
||||
# NOTE: string_processing requires wrap_long_dict_values_in_parens
|
||||
# for https://github.com/psf/black/issues/3117 to be fixed.
|
||||
string_processing = auto()
|
||||
parenthesize_conditional_expressions = auto()
|
||||
parenthesize_long_type_hints = auto()
|
||||
respect_magic_trailing_comma_in_return_type = auto()
|
||||
skip_magic_trailing_comma_in_subscript = auto()
|
||||
wrap_long_dict_values_in_parens = auto()
|
||||
wrap_multiple_context_managers_in_parens = auto()
|
||||
dummy_implementations = auto()
|
||||
walrus_subscript = auto()
|
||||
module_docstring_newlines = auto()
|
||||
accept_raw_docstrings = auto()
|
||||
fix_power_op_line_length = auto()
|
||||
allow_empty_first_line_before_new_block_or_comment = auto()
|
||||
|
||||
|
||||
class Deprecated(UserWarning):
|
||||
"""Visible deprecation warning."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class Mode:
|
||||
target_versions: Set[TargetVersion] = field(default_factory=set)
|
||||
line_length: int = DEFAULT_LINE_LENGTH
|
||||
string_normalization: bool = True
|
||||
is_pyi: bool = False
|
||||
is_ipynb: bool = False
|
||||
skip_source_first_line: bool = False
|
||||
magic_trailing_comma: bool = True
|
||||
experimental_string_processing: bool = False
|
||||
python_cell_magics: Set[str] = field(default_factory=set)
|
||||
preview: bool = False
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.experimental_string_processing:
|
||||
warn(
|
||||
"`experimental string processing` has been included in `preview`"
|
||||
" and deprecated. Use `preview` instead.",
|
||||
Deprecated,
|
||||
)
|
||||
|
||||
def __contains__(self, feature: Preview) -> bool:
|
||||
"""
|
||||
Provide `Preview.FEATURE in Mode` syntax that mirrors the ``preview`` flag.
|
||||
|
||||
The argument is not checked and features are not differentiated.
|
||||
They only exist to make development easier by clarifying intent.
|
||||
"""
|
||||
if feature is Preview.string_processing:
|
||||
return self.preview or self.experimental_string_processing
|
||||
return self.preview
|
||||
|
||||
def get_cache_key(self) -> str:
|
||||
if self.target_versions:
|
||||
version_str = ",".join(
|
||||
str(version.value)
|
||||
for version in sorted(self.target_versions, key=attrgetter("value"))
|
||||
)
|
||||
else:
|
||||
version_str = "-"
|
||||
parts = [
|
||||
version_str,
|
||||
str(self.line_length),
|
||||
str(int(self.string_normalization)),
|
||||
str(int(self.is_pyi)),
|
||||
str(int(self.is_ipynb)),
|
||||
str(int(self.skip_source_first_line)),
|
||||
str(int(self.magic_trailing_comma)),
|
||||
str(int(self.experimental_string_processing)),
|
||||
str(int(self.preview)),
|
||||
sha256((",".join(sorted(self.python_cell_magics))).encode()).hexdigest(),
|
||||
]
|
||||
return ".".join(parts)
|
||||
930
venv/lib/python3.12/site-packages/black/nodes.py
Normal file
930
venv/lib/python3.12/site-packages/black/nodes.py
Normal file
@@ -0,0 +1,930 @@
|
||||
"""
|
||||
blib2to3 Node/Leaf transformation-related utility functions.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from typing import Final, Generic, Iterator, List, Optional, Set, Tuple, TypeVar, Union
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeGuard
|
||||
else:
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
from mypy_extensions import mypyc_attr
|
||||
|
||||
from black.cache import CACHE_DIR
|
||||
from black.mode import Mode, Preview
|
||||
from black.strings import has_triple_quotes
|
||||
from blib2to3 import pygram
|
||||
from blib2to3.pgen2 import token
|
||||
from blib2to3.pytree import NL, Leaf, Node, type_repr
|
||||
|
||||
pygram.initialize(CACHE_DIR)
|
||||
syms: Final = pygram.python_symbols
|
||||
|
||||
|
||||
# types
|
||||
T = TypeVar("T")
|
||||
LN = Union[Leaf, Node]
|
||||
LeafID = int
|
||||
NodeType = int
|
||||
|
||||
|
||||
WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE}
|
||||
STATEMENT: Final = {
|
||||
syms.if_stmt,
|
||||
syms.while_stmt,
|
||||
syms.for_stmt,
|
||||
syms.try_stmt,
|
||||
syms.except_clause,
|
||||
syms.with_stmt,
|
||||
syms.funcdef,
|
||||
syms.classdef,
|
||||
syms.match_stmt,
|
||||
syms.case_block,
|
||||
}
|
||||
STANDALONE_COMMENT: Final = 153
|
||||
token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT"
|
||||
LOGIC_OPERATORS: Final = {"and", "or"}
|
||||
COMPARATORS: Final = {
|
||||
token.LESS,
|
||||
token.GREATER,
|
||||
token.EQEQUAL,
|
||||
token.NOTEQUAL,
|
||||
token.LESSEQUAL,
|
||||
token.GREATEREQUAL,
|
||||
}
|
||||
MATH_OPERATORS: Final = {
|
||||
token.VBAR,
|
||||
token.CIRCUMFLEX,
|
||||
token.AMPER,
|
||||
token.LEFTSHIFT,
|
||||
token.RIGHTSHIFT,
|
||||
token.PLUS,
|
||||
token.MINUS,
|
||||
token.STAR,
|
||||
token.SLASH,
|
||||
token.DOUBLESLASH,
|
||||
token.PERCENT,
|
||||
token.AT,
|
||||
token.TILDE,
|
||||
token.DOUBLESTAR,
|
||||
}
|
||||
STARS: Final = {token.STAR, token.DOUBLESTAR}
|
||||
VARARGS_SPECIALS: Final = STARS | {token.SLASH}
|
||||
VARARGS_PARENTS: Final = {
|
||||
syms.arglist,
|
||||
syms.argument, # double star in arglist
|
||||
syms.trailer, # single argument to call
|
||||
syms.typedargslist,
|
||||
syms.varargslist, # lambdas
|
||||
}
|
||||
UNPACKING_PARENTS: Final = {
|
||||
syms.atom, # single element of a list or set literal
|
||||
syms.dictsetmaker,
|
||||
syms.listmaker,
|
||||
syms.testlist_gexp,
|
||||
syms.testlist_star_expr,
|
||||
syms.subject_expr,
|
||||
syms.pattern,
|
||||
}
|
||||
TEST_DESCENDANTS: Final = {
|
||||
syms.test,
|
||||
syms.lambdef,
|
||||
syms.or_test,
|
||||
syms.and_test,
|
||||
syms.not_test,
|
||||
syms.comparison,
|
||||
syms.star_expr,
|
||||
syms.expr,
|
||||
syms.xor_expr,
|
||||
syms.and_expr,
|
||||
syms.shift_expr,
|
||||
syms.arith_expr,
|
||||
syms.trailer,
|
||||
syms.term,
|
||||
syms.power,
|
||||
}
|
||||
TYPED_NAMES: Final = {syms.tname, syms.tname_star}
|
||||
ASSIGNMENTS: Final = {
|
||||
"=",
|
||||
"+=",
|
||||
"-=",
|
||||
"*=",
|
||||
"@=",
|
||||
"/=",
|
||||
"%=",
|
||||
"&=",
|
||||
"|=",
|
||||
"^=",
|
||||
"<<=",
|
||||
">>=",
|
||||
"**=",
|
||||
"//=",
|
||||
}
|
||||
|
||||
IMPLICIT_TUPLE: Final = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
|
||||
BRACKET: Final = {
|
||||
token.LPAR: token.RPAR,
|
||||
token.LSQB: token.RSQB,
|
||||
token.LBRACE: token.RBRACE,
|
||||
}
|
||||
OPENING_BRACKETS: Final = set(BRACKET.keys())
|
||||
CLOSING_BRACKETS: Final = set(BRACKET.values())
|
||||
BRACKETS: Final = OPENING_BRACKETS | CLOSING_BRACKETS
|
||||
ALWAYS_NO_SPACE: Final = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
|
||||
|
||||
RARROW = 55
|
||||
|
||||
|
||||
@mypyc_attr(allow_interpreted_subclasses=True)
|
||||
class Visitor(Generic[T]):
|
||||
"""Basic lib2to3 visitor that yields things of type `T` on `visit()`."""
|
||||
|
||||
def visit(self, node: LN) -> Iterator[T]:
|
||||
"""Main method to visit `node` and its children.
|
||||
|
||||
It tries to find a `visit_*()` method for the given `node.type`, like
|
||||
`visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects.
|
||||
If no dedicated `visit_*()` method is found, chooses `visit_default()`
|
||||
instead.
|
||||
|
||||
Then yields objects of type `T` from the selected visitor.
|
||||
"""
|
||||
if node.type < 256:
|
||||
name = token.tok_name[node.type]
|
||||
else:
|
||||
name = str(type_repr(node.type))
|
||||
# We explicitly branch on whether a visitor exists (instead of
|
||||
# using self.visit_default as the default arg to getattr) in order
|
||||
# to save needing to create a bound method object and so mypyc can
|
||||
# generate a native call to visit_default.
|
||||
visitf = getattr(self, f"visit_{name}", None)
|
||||
if visitf:
|
||||
yield from visitf(node)
|
||||
else:
|
||||
yield from self.visit_default(node)
|
||||
|
||||
def visit_default(self, node: LN) -> Iterator[T]:
|
||||
"""Default `visit_*()` implementation. Recurses to children of `node`."""
|
||||
if isinstance(node, Node):
|
||||
for child in node.children:
|
||||
yield from self.visit(child)
|
||||
|
||||
|
||||
def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: # noqa: C901
|
||||
"""Return whitespace prefix if needed for the given `leaf`.
|
||||
|
||||
`complex_subscript` signals whether the given leaf is part of a subscription
|
||||
which has non-trivial arguments, like arithmetic expressions or function calls.
|
||||
"""
|
||||
NO: Final[str] = ""
|
||||
SPACE: Final[str] = " "
|
||||
DOUBLESPACE: Final[str] = " "
|
||||
t = leaf.type
|
||||
p = leaf.parent
|
||||
v = leaf.value
|
||||
if t in ALWAYS_NO_SPACE:
|
||||
return NO
|
||||
|
||||
if t == token.COMMENT:
|
||||
return DOUBLESPACE
|
||||
|
||||
assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
|
||||
if t == token.COLON and p.type not in {
|
||||
syms.subscript,
|
||||
syms.subscriptlist,
|
||||
syms.sliceop,
|
||||
}:
|
||||
return NO
|
||||
|
||||
prev = leaf.prev_sibling
|
||||
if not prev:
|
||||
prevp = preceding_leaf(p)
|
||||
if not prevp or prevp.type in OPENING_BRACKETS:
|
||||
return NO
|
||||
|
||||
if t == token.COLON:
|
||||
if prevp.type == token.COLON:
|
||||
return NO
|
||||
|
||||
elif prevp.type != token.COMMA and not complex_subscript:
|
||||
return NO
|
||||
|
||||
return SPACE
|
||||
|
||||
if prevp.type == token.EQUAL:
|
||||
if prevp.parent:
|
||||
if prevp.parent.type in {
|
||||
syms.arglist,
|
||||
syms.argument,
|
||||
syms.parameters,
|
||||
syms.varargslist,
|
||||
}:
|
||||
return NO
|
||||
|
||||
elif prevp.parent.type == syms.typedargslist:
|
||||
# A bit hacky: if the equal sign has whitespace, it means we
|
||||
# previously found it's a typed argument. So, we're using
|
||||
# that, too.
|
||||
return prevp.prefix
|
||||
|
||||
elif (
|
||||
prevp.type == token.STAR
|
||||
and parent_type(prevp) == syms.star_expr
|
||||
and parent_type(prevp.parent) == syms.subscriptlist
|
||||
):
|
||||
# No space between typevar tuples.
|
||||
return NO
|
||||
|
||||
elif prevp.type in VARARGS_SPECIALS:
|
||||
if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
|
||||
return NO
|
||||
|
||||
elif prevp.type == token.COLON:
|
||||
if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
|
||||
return SPACE if complex_subscript else NO
|
||||
|
||||
elif (
|
||||
prevp.parent
|
||||
and prevp.parent.type == syms.factor
|
||||
and prevp.type in MATH_OPERATORS
|
||||
):
|
||||
return NO
|
||||
|
||||
elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator:
|
||||
# no space in decorators
|
||||
return NO
|
||||
|
||||
elif prev.type in OPENING_BRACKETS:
|
||||
return NO
|
||||
|
||||
if p.type in {syms.parameters, syms.arglist}:
|
||||
# untyped function signatures or calls
|
||||
if not prev or prev.type != token.COMMA:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.varargslist:
|
||||
# lambdas
|
||||
if prev and prev.type != token.COMMA:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.typedargslist:
|
||||
# typed function signatures
|
||||
if not prev:
|
||||
return NO
|
||||
|
||||
if t == token.EQUAL:
|
||||
if prev.type not in TYPED_NAMES:
|
||||
return NO
|
||||
|
||||
elif prev.type == token.EQUAL:
|
||||
# A bit hacky: if the equal sign has whitespace, it means we
|
||||
# previously found it's a typed argument. So, we're using that, too.
|
||||
return prev.prefix
|
||||
|
||||
elif prev.type != token.COMMA:
|
||||
return NO
|
||||
|
||||
elif p.type in TYPED_NAMES:
|
||||
# type names
|
||||
if not prev:
|
||||
prevp = preceding_leaf(p)
|
||||
if not prevp or prevp.type != token.COMMA:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.trailer:
|
||||
# attributes and calls
|
||||
if t == token.LPAR or t == token.RPAR:
|
||||
return NO
|
||||
|
||||
if not prev:
|
||||
if t == token.DOT or t == token.LSQB:
|
||||
return NO
|
||||
|
||||
elif prev.type != token.COMMA:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.argument:
|
||||
# single argument
|
||||
if t == token.EQUAL:
|
||||
return NO
|
||||
|
||||
if not prev:
|
||||
prevp = preceding_leaf(p)
|
||||
if not prevp or prevp.type == token.LPAR:
|
||||
return NO
|
||||
|
||||
elif prev.type in {token.EQUAL} | VARARGS_SPECIALS:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.decorator:
|
||||
# decorators
|
||||
return NO
|
||||
|
||||
elif p.type == syms.dotted_name:
|
||||
if prev:
|
||||
return NO
|
||||
|
||||
prevp = preceding_leaf(p)
|
||||
if not prevp or prevp.type == token.AT or prevp.type == token.DOT:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.classdef:
|
||||
if t == token.LPAR:
|
||||
return NO
|
||||
|
||||
if prev and prev.type == token.LPAR:
|
||||
return NO
|
||||
|
||||
elif p.type in {syms.subscript, syms.sliceop}:
|
||||
# indexing
|
||||
if not prev:
|
||||
assert p.parent is not None, "subscripts are always parented"
|
||||
if p.parent.type == syms.subscriptlist:
|
||||
return SPACE
|
||||
|
||||
return NO
|
||||
|
||||
elif Preview.walrus_subscript in mode and (
|
||||
t == token.COLONEQUAL or prev.type == token.COLONEQUAL
|
||||
):
|
||||
return SPACE
|
||||
|
||||
elif not complex_subscript:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.atom:
|
||||
if prev and t == token.DOT:
|
||||
# dots, but not the first one.
|
||||
return NO
|
||||
|
||||
elif p.type == syms.dictsetmaker:
|
||||
# dict unpacking
|
||||
if prev and prev.type == token.DOUBLESTAR:
|
||||
return NO
|
||||
|
||||
elif p.type in {syms.factor, syms.star_expr}:
|
||||
# unary ops
|
||||
if not prev:
|
||||
prevp = preceding_leaf(p)
|
||||
if not prevp or prevp.type in OPENING_BRACKETS:
|
||||
return NO
|
||||
|
||||
prevp_parent = prevp.parent
|
||||
assert prevp_parent is not None
|
||||
if prevp.type == token.COLON and prevp_parent.type in {
|
||||
syms.subscript,
|
||||
syms.sliceop,
|
||||
}:
|
||||
return NO
|
||||
|
||||
elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
|
||||
return NO
|
||||
|
||||
elif t in {token.NAME, token.NUMBER, token.STRING}:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.import_from:
|
||||
if t == token.DOT:
|
||||
if prev and prev.type == token.DOT:
|
||||
return NO
|
||||
|
||||
elif t == token.NAME:
|
||||
if v == "import":
|
||||
return SPACE
|
||||
|
||||
if prev and prev.type == token.DOT:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.sliceop:
|
||||
return NO
|
||||
|
||||
elif p.type == syms.except_clause:
|
||||
if t == token.STAR:
|
||||
return NO
|
||||
|
||||
return SPACE
|
||||
|
||||
|
||||
def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
|
||||
"""Return the first leaf that precedes `node`, if any."""
|
||||
while node:
|
||||
res = node.prev_sibling
|
||||
if res:
|
||||
if isinstance(res, Leaf):
|
||||
return res
|
||||
|
||||
try:
|
||||
return list(res.leaves())[-1]
|
||||
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
node = node.parent
|
||||
return None
|
||||
|
||||
|
||||
def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool:
|
||||
"""Return if the `node` and its previous siblings match types against the provided
|
||||
list of tokens; the provided `node`has its type matched against the last element in
|
||||
the list. `None` can be used as the first element to declare that the start of the
|
||||
list is anchored at the start of its parent's children."""
|
||||
if not tokens:
|
||||
return True
|
||||
if tokens[-1] is None:
|
||||
return node is None
|
||||
if not node:
|
||||
return False
|
||||
if node.type != tokens[-1]:
|
||||
return False
|
||||
return prev_siblings_are(node.prev_sibling, tokens[:-1])
|
||||
|
||||
|
||||
def parent_type(node: Optional[LN]) -> Optional[NodeType]:
|
||||
"""
|
||||
Returns:
|
||||
@node.parent.type, if @node is not None and has a parent.
|
||||
OR
|
||||
None, otherwise.
|
||||
"""
|
||||
if node is None or node.parent is None:
|
||||
return None
|
||||
|
||||
return node.parent.type
|
||||
|
||||
|
||||
def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]:
|
||||
"""Return the child of `ancestor` that contains `descendant`."""
|
||||
node: Optional[LN] = descendant
|
||||
while node and node.parent != ancestor:
|
||||
node = node.parent
|
||||
return node
|
||||
|
||||
|
||||
def replace_child(old_child: LN, new_child: LN) -> None:
|
||||
"""
|
||||
Side Effects:
|
||||
* If @old_child.parent is set, replace @old_child with @new_child in
|
||||
@old_child's underlying Node structure.
|
||||
OR
|
||||
* Otherwise, this function does nothing.
|
||||
"""
|
||||
parent = old_child.parent
|
||||
if not parent:
|
||||
return
|
||||
|
||||
child_idx = old_child.remove()
|
||||
if child_idx is not None:
|
||||
parent.insert_child(child_idx, new_child)
|
||||
|
||||
|
||||
def container_of(leaf: Leaf) -> LN:
|
||||
"""Return `leaf` or one of its ancestors that is the topmost container of it.
|
||||
|
||||
By "container" we mean a node where `leaf` is the very first child.
|
||||
"""
|
||||
same_prefix = leaf.prefix
|
||||
container: LN = leaf
|
||||
while container:
|
||||
parent = container.parent
|
||||
if parent is None:
|
||||
break
|
||||
|
||||
if parent.children[0].prefix != same_prefix:
|
||||
break
|
||||
|
||||
if parent.type == syms.file_input:
|
||||
break
|
||||
|
||||
if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
|
||||
break
|
||||
|
||||
container = parent
|
||||
return container
|
||||
|
||||
|
||||
def first_leaf_of(node: LN) -> Optional[Leaf]:
|
||||
"""Returns the first leaf of the node tree."""
|
||||
if isinstance(node, Leaf):
|
||||
return node
|
||||
if node.children:
|
||||
return first_leaf_of(node.children[0])
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def is_arith_like(node: LN) -> bool:
|
||||
"""Whether node is an arithmetic or a binary arithmetic expression"""
|
||||
return node.type in {
|
||||
syms.arith_expr,
|
||||
syms.shift_expr,
|
||||
syms.xor_expr,
|
||||
syms.and_expr,
|
||||
}
|
||||
|
||||
|
||||
def is_docstring(leaf: Leaf) -> bool:
|
||||
if prev_siblings_are(
|
||||
leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt]
|
||||
):
|
||||
return True
|
||||
|
||||
# Multiline docstring on the same line as the `def`.
|
||||
if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]):
|
||||
# `syms.parameters` is only used in funcdefs and async_funcdefs in the Python
|
||||
# grammar. We're safe to return True without further checks.
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_empty_tuple(node: LN) -> bool:
|
||||
"""Return True if `node` holds an empty tuple."""
|
||||
return (
|
||||
node.type == syms.atom
|
||||
and len(node.children) == 2
|
||||
and node.children[0].type == token.LPAR
|
||||
and node.children[1].type == token.RPAR
|
||||
)
|
||||
|
||||
|
||||
def is_one_tuple(node: LN) -> bool:
|
||||
"""Return True if `node` holds a tuple with one element, with or without parens."""
|
||||
if node.type == syms.atom:
|
||||
gexp = unwrap_singleton_parenthesis(node)
|
||||
if gexp is None or gexp.type != syms.testlist_gexp:
|
||||
return False
|
||||
|
||||
return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
|
||||
|
||||
return (
|
||||
node.type in IMPLICIT_TUPLE
|
||||
and len(node.children) == 2
|
||||
and node.children[1].type == token.COMMA
|
||||
)
|
||||
|
||||
|
||||
def is_tuple_containing_walrus(node: LN) -> bool:
|
||||
"""Return True if `node` holds a tuple that contains a walrus operator."""
|
||||
if node.type != syms.atom:
|
||||
return False
|
||||
gexp = unwrap_singleton_parenthesis(node)
|
||||
if gexp is None or gexp.type != syms.testlist_gexp:
|
||||
return False
|
||||
|
||||
return any(child.type == syms.namedexpr_test for child in gexp.children)
|
||||
|
||||
|
||||
def is_one_sequence_between(
|
||||
opening: Leaf,
|
||||
closing: Leaf,
|
||||
leaves: List[Leaf],
|
||||
brackets: Tuple[int, int] = (token.LPAR, token.RPAR),
|
||||
) -> bool:
|
||||
"""Return True if content between `opening` and `closing` is a one-sequence."""
|
||||
if (opening.type, closing.type) != brackets:
|
||||
return False
|
||||
|
||||
depth = closing.bracket_depth + 1
|
||||
for _opening_index, leaf in enumerate(leaves):
|
||||
if leaf is opening:
|
||||
break
|
||||
|
||||
else:
|
||||
raise LookupError("Opening paren not found in `leaves`")
|
||||
|
||||
commas = 0
|
||||
_opening_index += 1
|
||||
for leaf in leaves[_opening_index:]:
|
||||
if leaf is closing:
|
||||
break
|
||||
|
||||
bracket_depth = leaf.bracket_depth
|
||||
if bracket_depth == depth and leaf.type == token.COMMA:
|
||||
commas += 1
|
||||
if leaf.parent and leaf.parent.type in {
|
||||
syms.arglist,
|
||||
syms.typedargslist,
|
||||
}:
|
||||
commas += 1
|
||||
break
|
||||
|
||||
return commas < 2
|
||||
|
||||
|
||||
def is_walrus_assignment(node: LN) -> bool:
|
||||
"""Return True iff `node` is of the shape ( test := test )"""
|
||||
inner = unwrap_singleton_parenthesis(node)
|
||||
return inner is not None and inner.type == syms.namedexpr_test
|
||||
|
||||
|
||||
def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool:
|
||||
"""Return True iff `node` is a trailer valid in a simple decorator"""
|
||||
return node.type == syms.trailer and (
|
||||
(
|
||||
len(node.children) == 2
|
||||
and node.children[0].type == token.DOT
|
||||
and node.children[1].type == token.NAME
|
||||
)
|
||||
# last trailer can be an argument-less parentheses pair
|
||||
or (
|
||||
last
|
||||
and len(node.children) == 2
|
||||
and node.children[0].type == token.LPAR
|
||||
and node.children[1].type == token.RPAR
|
||||
)
|
||||
# last trailer can be arguments
|
||||
or (
|
||||
last
|
||||
and len(node.children) == 3
|
||||
and node.children[0].type == token.LPAR
|
||||
# and node.children[1].type == syms.argument
|
||||
and node.children[2].type == token.RPAR
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def is_simple_decorator_expression(node: LN) -> bool:
|
||||
"""Return True iff `node` could be a 'dotted name' decorator
|
||||
|
||||
This function takes the node of the 'namedexpr_test' of the new decorator
|
||||
grammar and test if it would be valid under the old decorator grammar.
|
||||
|
||||
The old grammar was: decorator: @ dotted_name [arguments] NEWLINE
|
||||
The new grammar is : decorator: @ namedexpr_test NEWLINE
|
||||
"""
|
||||
if node.type == token.NAME:
|
||||
return True
|
||||
if node.type == syms.power:
|
||||
if node.children:
|
||||
return (
|
||||
node.children[0].type == token.NAME
|
||||
and all(map(is_simple_decorator_trailer, node.children[1:-1]))
|
||||
and (
|
||||
len(node.children) < 2
|
||||
or is_simple_decorator_trailer(node.children[-1], last=True)
|
||||
)
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def is_yield(node: LN) -> bool:
|
||||
"""Return True if `node` holds a `yield` or `yield from` expression."""
|
||||
if node.type == syms.yield_expr:
|
||||
return True
|
||||
|
||||
if is_name_token(node) and node.value == "yield":
|
||||
return True
|
||||
|
||||
if node.type != syms.atom:
|
||||
return False
|
||||
|
||||
if len(node.children) != 3:
|
||||
return False
|
||||
|
||||
lpar, expr, rpar = node.children
|
||||
if lpar.type == token.LPAR and rpar.type == token.RPAR:
|
||||
return is_yield(expr)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
|
||||
"""Return True if `leaf` is a star or double star in a vararg or kwarg.
|
||||
|
||||
If `within` includes VARARGS_PARENTS, this applies to function signatures.
|
||||
If `within` includes UNPACKING_PARENTS, it applies to right hand-side
|
||||
extended iterable unpacking (PEP 3132) and additional unpacking
|
||||
generalizations (PEP 448).
|
||||
"""
|
||||
if leaf.type not in VARARGS_SPECIALS or not leaf.parent:
|
||||
return False
|
||||
|
||||
p = leaf.parent
|
||||
if p.type == syms.star_expr:
|
||||
# Star expressions are also used as assignment targets in extended
|
||||
# iterable unpacking (PEP 3132). See what its parent is instead.
|
||||
if not p.parent:
|
||||
return False
|
||||
|
||||
p = p.parent
|
||||
|
||||
return p.type in within
|
||||
|
||||
|
||||
def is_multiline_string(leaf: Leaf) -> bool:
|
||||
"""Return True if `leaf` is a multiline string that actually spans many lines."""
|
||||
return has_triple_quotes(leaf.value) and "\n" in leaf.value
|
||||
|
||||
|
||||
def is_funcdef(node: Node) -> bool:
|
||||
return node.type == syms.funcdef
|
||||
|
||||
|
||||
def is_stub_suite(node: Node) -> bool:
|
||||
"""Return True if `node` is a suite with a stub body."""
|
||||
|
||||
# If there is a comment, we want to keep it.
|
||||
if node.prefix.strip():
|
||||
return False
|
||||
|
||||
if (
|
||||
len(node.children) != 4
|
||||
or node.children[0].type != token.NEWLINE
|
||||
or node.children[1].type != token.INDENT
|
||||
or node.children[3].type != token.DEDENT
|
||||
):
|
||||
return False
|
||||
|
||||
if node.children[3].prefix.strip():
|
||||
return False
|
||||
|
||||
return is_stub_body(node.children[2])
|
||||
|
||||
|
||||
def is_stub_body(node: LN) -> bool:
|
||||
"""Return True if `node` is a simple statement containing an ellipsis."""
|
||||
if not isinstance(node, Node) or node.type != syms.simple_stmt:
|
||||
return False
|
||||
|
||||
if len(node.children) != 2:
|
||||
return False
|
||||
|
||||
child = node.children[0]
|
||||
return (
|
||||
not child.prefix.strip()
|
||||
and child.type == syms.atom
|
||||
and len(child.children) == 3
|
||||
and all(leaf == Leaf(token.DOT, ".") for leaf in child.children)
|
||||
)
|
||||
|
||||
|
||||
def is_atom_with_invisible_parens(node: LN) -> bool:
|
||||
"""Given a `LN`, determines whether it's an atom `node` with invisible
|
||||
parens. Useful in dedupe-ing and normalizing parens.
|
||||
"""
|
||||
if isinstance(node, Leaf) or node.type != syms.atom:
|
||||
return False
|
||||
|
||||
first, last = node.children[0], node.children[-1]
|
||||
return (
|
||||
isinstance(first, Leaf)
|
||||
and first.type == token.LPAR
|
||||
and first.value == ""
|
||||
and isinstance(last, Leaf)
|
||||
and last.type == token.RPAR
|
||||
and last.value == ""
|
||||
)
|
||||
|
||||
|
||||
def is_empty_par(leaf: Leaf) -> bool:
|
||||
return is_empty_lpar(leaf) or is_empty_rpar(leaf)
|
||||
|
||||
|
||||
def is_empty_lpar(leaf: Leaf) -> bool:
|
||||
return leaf.type == token.LPAR and leaf.value == ""
|
||||
|
||||
|
||||
def is_empty_rpar(leaf: Leaf) -> bool:
|
||||
return leaf.type == token.RPAR and leaf.value == ""
|
||||
|
||||
|
||||
def is_import(leaf: Leaf) -> bool:
|
||||
"""Return True if the given leaf starts an import statement."""
|
||||
p = leaf.parent
|
||||
t = leaf.type
|
||||
v = leaf.value
|
||||
return bool(
|
||||
t == token.NAME
|
||||
and (
|
||||
(v == "import" and p and p.type == syms.import_name)
|
||||
or (v == "from" and p and p.type == syms.import_from)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def is_with_or_async_with_stmt(leaf: Leaf) -> bool:
|
||||
"""Return True if the given leaf starts a with or async with statement."""
|
||||
return bool(
|
||||
leaf.type == token.NAME
|
||||
and leaf.value == "with"
|
||||
and leaf.parent
|
||||
and leaf.parent.type == syms.with_stmt
|
||||
) or bool(
|
||||
leaf.type == token.ASYNC
|
||||
and leaf.next_sibling
|
||||
and leaf.next_sibling.type == syms.with_stmt
|
||||
)
|
||||
|
||||
|
||||
def is_async_stmt_or_funcdef(leaf: Leaf) -> bool:
|
||||
"""Return True if the given leaf starts an async def/for/with statement.
|
||||
|
||||
Note that `async def` can be either an `async_stmt` or `async_funcdef`,
|
||||
the latter is used when it has decorators.
|
||||
"""
|
||||
return bool(
|
||||
leaf.type == token.ASYNC
|
||||
and leaf.parent
|
||||
and leaf.parent.type in {syms.async_stmt, syms.async_funcdef}
|
||||
)
|
||||
|
||||
|
||||
def is_type_comment(leaf: Leaf) -> bool:
|
||||
"""Return True if the given leaf is a type comment. This function should only
|
||||
be used for general type comments (excluding ignore annotations, which should
|
||||
use `is_type_ignore_comment`). Note that general type comments are no longer
|
||||
used in modern version of Python, this function may be deprecated in the future."""
|
||||
t = leaf.type
|
||||
v = leaf.value
|
||||
return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:")
|
||||
|
||||
|
||||
def is_type_ignore_comment(leaf: Leaf) -> bool:
|
||||
"""Return True if the given leaf is a type comment with ignore annotation."""
|
||||
t = leaf.type
|
||||
v = leaf.value
|
||||
return t in {token.COMMENT, STANDALONE_COMMENT} and is_type_ignore_comment_string(v)
|
||||
|
||||
|
||||
def is_type_ignore_comment_string(value: str) -> bool:
|
||||
"""Return True if the given string match with type comment with
|
||||
ignore annotation."""
|
||||
return value.startswith("# type: ignore")
|
||||
|
||||
|
||||
def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None:
|
||||
"""Wrap `child` in parentheses.
|
||||
|
||||
This replaces `child` with an atom holding the parentheses and the old
|
||||
child. That requires moving the prefix.
|
||||
|
||||
If `visible` is False, the leaves will be valueless (and thus invisible).
|
||||
"""
|
||||
lpar = Leaf(token.LPAR, "(" if visible else "")
|
||||
rpar = Leaf(token.RPAR, ")" if visible else "")
|
||||
prefix = child.prefix
|
||||
child.prefix = ""
|
||||
index = child.remove() or 0
|
||||
new_child = Node(syms.atom, [lpar, child, rpar])
|
||||
new_child.prefix = prefix
|
||||
parent.insert_child(index, new_child)
|
||||
|
||||
|
||||
def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]:
|
||||
"""Returns `wrapped` if `node` is of the shape ( wrapped ).
|
||||
|
||||
Parenthesis can be optional. Returns None otherwise"""
|
||||
if len(node.children) != 3:
|
||||
return None
|
||||
|
||||
lpar, wrapped, rpar = node.children
|
||||
if not (lpar.type == token.LPAR and rpar.type == token.RPAR):
|
||||
return None
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def ensure_visible(leaf: Leaf) -> None:
|
||||
"""Make sure parentheses are visible.
|
||||
|
||||
They could be invisible as part of some statements (see
|
||||
:func:`normalize_invisible_parens` and :func:`visit_import_from`).
|
||||
"""
|
||||
if leaf.type == token.LPAR:
|
||||
leaf.value = "("
|
||||
elif leaf.type == token.RPAR:
|
||||
leaf.value = ")"
|
||||
|
||||
|
||||
def is_name_token(nl: NL) -> TypeGuard[Leaf]:
|
||||
return nl.type == token.NAME
|
||||
|
||||
|
||||
def is_lpar_token(nl: NL) -> TypeGuard[Leaf]:
|
||||
return nl.type == token.LPAR
|
||||
|
||||
|
||||
def is_rpar_token(nl: NL) -> TypeGuard[Leaf]:
|
||||
return nl.type == token.RPAR
|
||||
|
||||
|
||||
def is_string_token(nl: NL) -> TypeGuard[Leaf]:
|
||||
return nl.type == token.STRING
|
||||
|
||||
|
||||
def is_number_token(nl: NL) -> TypeGuard[Leaf]:
|
||||
return nl.type == token.NUMBER
|
||||
|
||||
|
||||
def is_part_of_annotation(leaf: Leaf) -> bool:
|
||||
"""Returns whether this leaf is part of type annotations."""
|
||||
ancestor = leaf.parent
|
||||
while ancestor is not None:
|
||||
if ancestor.prev_sibling and ancestor.prev_sibling.type == token.RARROW:
|
||||
return True
|
||||
if ancestor.parent and ancestor.parent.type == syms.tname:
|
||||
return True
|
||||
ancestor = ancestor.parent
|
||||
return False
|
||||
61
venv/lib/python3.12/site-packages/black/numerics.py
Normal file
61
venv/lib/python3.12/site-packages/black/numerics.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""
|
||||
Formatting numeric literals.
|
||||
"""
|
||||
|
||||
from blib2to3.pytree import Leaf
|
||||
|
||||
|
||||
def format_hex(text: str) -> str:
|
||||
"""
|
||||
Formats a hexadecimal string like "0x12B3"
|
||||
"""
|
||||
before, after = text[:2], text[2:]
|
||||
return f"{before}{after.upper()}"
|
||||
|
||||
|
||||
def format_scientific_notation(text: str) -> str:
|
||||
"""Formats a numeric string utilizing scentific notation"""
|
||||
before, after = text.split("e")
|
||||
sign = ""
|
||||
if after.startswith("-"):
|
||||
after = after[1:]
|
||||
sign = "-"
|
||||
elif after.startswith("+"):
|
||||
after = after[1:]
|
||||
before = format_float_or_int_string(before)
|
||||
return f"{before}e{sign}{after}"
|
||||
|
||||
|
||||
def format_complex_number(text: str) -> str:
|
||||
"""Formats a complex string like `10j`"""
|
||||
number = text[:-1]
|
||||
suffix = text[-1]
|
||||
return f"{format_float_or_int_string(number)}{suffix}"
|
||||
|
||||
|
||||
def format_float_or_int_string(text: str) -> str:
|
||||
"""Formats a float string like "1.0"."""
|
||||
if "." not in text:
|
||||
return text
|
||||
|
||||
before, after = text.split(".")
|
||||
return f"{before or 0}.{after or 0}"
|
||||
|
||||
|
||||
def normalize_numeric_literal(leaf: Leaf) -> None:
|
||||
"""Normalizes numeric (float, int, and complex) literals.
|
||||
|
||||
All letters used in the representation are normalized to lowercase."""
|
||||
text = leaf.value.lower()
|
||||
if text.startswith(("0o", "0b")):
|
||||
# Leave octal and binary literals alone.
|
||||
pass
|
||||
elif text.startswith("0x"):
|
||||
text = format_hex(text)
|
||||
elif "e" in text:
|
||||
text = format_scientific_notation(text)
|
||||
elif text.endswith("j"):
|
||||
text = format_complex_number(text)
|
||||
else:
|
||||
text = format_float_or_int_string(text)
|
||||
leaf.value = text
|
||||
105
venv/lib/python3.12/site-packages/black/output.py
Normal file
105
venv/lib/python3.12/site-packages/black/output.py
Normal file
@@ -0,0 +1,105 @@
|
||||
"""Nice output for Black.
|
||||
|
||||
The double calls are for patching purposes in tests.
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from typing import Any, Optional
|
||||
|
||||
from click import echo, style
|
||||
from mypy_extensions import mypyc_attr
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def _out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None:
|
||||
if message is not None:
|
||||
if "bold" not in styles:
|
||||
styles["bold"] = True
|
||||
message = style(message, **styles)
|
||||
echo(message, nl=nl, err=True)
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def _err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None:
|
||||
if message is not None:
|
||||
if "fg" not in styles:
|
||||
styles["fg"] = "red"
|
||||
message = style(message, **styles)
|
||||
echo(message, nl=nl, err=True)
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None:
|
||||
_out(message, nl=nl, **styles)
|
||||
|
||||
|
||||
def err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None:
|
||||
_err(message, nl=nl, **styles)
|
||||
|
||||
|
||||
def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str:
|
||||
"""Return a unified diff string between each cell in notebooks `a` and `b`."""
|
||||
a_nb = json.loads(a)
|
||||
b_nb = json.loads(b)
|
||||
diff_lines = [
|
||||
diff(
|
||||
"".join(a_nb["cells"][cell_number]["source"]) + "\n",
|
||||
"".join(b_nb["cells"][cell_number]["source"]) + "\n",
|
||||
f"{a_name}:cell_{cell_number}",
|
||||
f"{b_name}:cell_{cell_number}",
|
||||
)
|
||||
for cell_number, cell in enumerate(a_nb["cells"])
|
||||
if cell["cell_type"] == "code"
|
||||
]
|
||||
return "".join(diff_lines)
|
||||
|
||||
|
||||
def diff(a: str, b: str, a_name: str, b_name: str) -> str:
|
||||
"""Return a unified diff string between strings `a` and `b`."""
|
||||
import difflib
|
||||
|
||||
a_lines = a.splitlines(keepends=True)
|
||||
b_lines = b.splitlines(keepends=True)
|
||||
diff_lines = []
|
||||
for line in difflib.unified_diff(
|
||||
a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5
|
||||
):
|
||||
# Work around https://bugs.python.org/issue2142
|
||||
# See:
|
||||
# https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html
|
||||
if line[-1] == "\n":
|
||||
diff_lines.append(line)
|
||||
else:
|
||||
diff_lines.append(line + "\n")
|
||||
diff_lines.append("\\ No newline at end of file\n")
|
||||
return "".join(diff_lines)
|
||||
|
||||
|
||||
def color_diff(contents: str) -> str:
|
||||
"""Inject the ANSI color codes to the diff."""
|
||||
lines = contents.split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith("+++") or line.startswith("---"):
|
||||
line = "\033[1m" + line + "\033[0m" # bold, reset
|
||||
elif line.startswith("@@"):
|
||||
line = "\033[36m" + line + "\033[0m" # cyan, reset
|
||||
elif line.startswith("+"):
|
||||
line = "\033[32m" + line + "\033[0m" # green, reset
|
||||
elif line.startswith("-"):
|
||||
line = "\033[31m" + line + "\033[0m" # red, reset
|
||||
lines[i] = line
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str:
|
||||
"""Dump `output` to a temporary file. Return path to the file."""
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
|
||||
) as f:
|
||||
for lines in output:
|
||||
f.write(lines)
|
||||
if ensure_final_newline and lines and lines[-1] != "\n":
|
||||
f.write("\n")
|
||||
return f.name
|
||||
216
venv/lib/python3.12/site-packages/black/parsing.py
Normal file
216
venv/lib/python3.12/site-packages/black/parsing.py
Normal file
@@ -0,0 +1,216 @@
|
||||
"""
|
||||
Parse Python code and perform AST validation.
|
||||
"""
|
||||
|
||||
import ast
|
||||
import sys
|
||||
from typing import Iterable, Iterator, List, Set, Tuple
|
||||
|
||||
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
|
||||
from black.nodes import syms
|
||||
from blib2to3 import pygram
|
||||
from blib2to3.pgen2 import driver
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
from blib2to3.pgen2.parse import ParseError
|
||||
from blib2to3.pgen2.tokenize import TokenError
|
||||
from blib2to3.pytree import Leaf, Node
|
||||
|
||||
|
||||
class InvalidInput(ValueError):
|
||||
"""Raised when input source code fails all parse attempts."""
|
||||
|
||||
|
||||
def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
|
||||
if not target_versions:
|
||||
# No target_version specified, so try all grammars.
|
||||
return [
|
||||
# Python 3.7-3.9
|
||||
pygram.python_grammar_async_keywords,
|
||||
# Python 3.0-3.6
|
||||
pygram.python_grammar,
|
||||
# Python 3.10+
|
||||
pygram.python_grammar_soft_keywords,
|
||||
]
|
||||
|
||||
grammars = []
|
||||
# If we have to parse both, try to parse async as a keyword first
|
||||
if not supports_feature(
|
||||
target_versions, Feature.ASYNC_IDENTIFIERS
|
||||
) and not supports_feature(target_versions, Feature.PATTERN_MATCHING):
|
||||
# Python 3.7-3.9
|
||||
grammars.append(pygram.python_grammar_async_keywords)
|
||||
if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
|
||||
# Python 3.0-3.6
|
||||
grammars.append(pygram.python_grammar)
|
||||
if any(Feature.PATTERN_MATCHING in VERSION_TO_FEATURES[v] for v in target_versions):
|
||||
# Python 3.10+
|
||||
grammars.append(pygram.python_grammar_soft_keywords)
|
||||
|
||||
# At least one of the above branches must have been taken, because every Python
|
||||
# version has exactly one of the two 'ASYNC_*' flags
|
||||
return grammars
|
||||
|
||||
|
||||
def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
|
||||
"""Given a string with source, return the lib2to3 Node."""
|
||||
if not src_txt.endswith("\n"):
|
||||
src_txt += "\n"
|
||||
|
||||
grammars = get_grammars(set(target_versions))
|
||||
errors = {}
|
||||
for grammar in grammars:
|
||||
drv = driver.Driver(grammar)
|
||||
try:
|
||||
result = drv.parse_string(src_txt, True)
|
||||
break
|
||||
|
||||
except ParseError as pe:
|
||||
lineno, column = pe.context[1]
|
||||
lines = src_txt.splitlines()
|
||||
try:
|
||||
faulty_line = lines[lineno - 1]
|
||||
except IndexError:
|
||||
faulty_line = "<line number missing in source>"
|
||||
errors[grammar.version] = InvalidInput(
|
||||
f"Cannot parse: {lineno}:{column}: {faulty_line}"
|
||||
)
|
||||
|
||||
except TokenError as te:
|
||||
# In edge cases these are raised; and typically don't have a "faulty_line".
|
||||
lineno, column = te.args[1]
|
||||
errors[grammar.version] = InvalidInput(
|
||||
f"Cannot parse: {lineno}:{column}: {te.args[0]}"
|
||||
)
|
||||
|
||||
else:
|
||||
# Choose the latest version when raising the actual parsing error.
|
||||
assert len(errors) >= 1
|
||||
exc = errors[max(errors)]
|
||||
raise exc from None
|
||||
|
||||
if isinstance(result, Leaf):
|
||||
result = Node(syms.file_input, [result])
|
||||
return result
|
||||
|
||||
|
||||
def matches_grammar(src_txt: str, grammar: Grammar) -> bool:
|
||||
drv = driver.Driver(grammar)
|
||||
try:
|
||||
drv.parse_string(src_txt, True)
|
||||
except (ParseError, TokenError, IndentationError):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def lib2to3_unparse(node: Node) -> str:
|
||||
"""Given a lib2to3 node, return its string representation."""
|
||||
code = str(node)
|
||||
return code
|
||||
|
||||
|
||||
def parse_single_version(
|
||||
src: str, version: Tuple[int, int], *, type_comments: bool
|
||||
) -> ast.AST:
|
||||
filename = "<unknown>"
|
||||
return ast.parse(
|
||||
src, filename, feature_version=version, type_comments=type_comments
|
||||
)
|
||||
|
||||
|
||||
def parse_ast(src: str) -> ast.AST:
|
||||
# TODO: support Python 4+ ;)
|
||||
versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)]
|
||||
|
||||
first_error = ""
|
||||
for version in sorted(versions, reverse=True):
|
||||
try:
|
||||
return parse_single_version(src, version, type_comments=True)
|
||||
except SyntaxError as e:
|
||||
if not first_error:
|
||||
first_error = str(e)
|
||||
|
||||
# Try to parse without type comments
|
||||
for version in sorted(versions, reverse=True):
|
||||
try:
|
||||
return parse_single_version(src, version, type_comments=False)
|
||||
except SyntaxError:
|
||||
pass
|
||||
|
||||
raise SyntaxError(first_error)
|
||||
|
||||
|
||||
def _normalize(lineend: str, value: str) -> str:
|
||||
# To normalize, we strip any leading and trailing space from
|
||||
# each line...
|
||||
stripped: List[str] = [i.strip() for i in value.splitlines()]
|
||||
normalized = lineend.join(stripped)
|
||||
# ...and remove any blank lines at the beginning and end of
|
||||
# the whole string
|
||||
return normalized.strip()
|
||||
|
||||
|
||||
def stringify_ast(node: ast.AST, depth: int = 0) -> Iterator[str]:
|
||||
"""Simple visitor generating strings to compare ASTs by content."""
|
||||
|
||||
if (
|
||||
isinstance(node, ast.Constant)
|
||||
and isinstance(node.value, str)
|
||||
and node.kind == "u"
|
||||
):
|
||||
# It's a quirk of history that we strip the u prefix over here. We used to
|
||||
# rewrite the AST nodes for Python version compatibility and we never copied
|
||||
# over the kind
|
||||
node.kind = None
|
||||
|
||||
yield f"{' ' * depth}{node.__class__.__name__}("
|
||||
|
||||
for field in sorted(node._fields): # noqa: F402
|
||||
# TypeIgnore has only one field 'lineno' which breaks this comparison
|
||||
if isinstance(node, ast.TypeIgnore):
|
||||
break
|
||||
|
||||
try:
|
||||
value: object = getattr(node, field)
|
||||
except AttributeError:
|
||||
continue
|
||||
|
||||
yield f"{' ' * (depth+1)}{field}="
|
||||
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
# Ignore nested tuples within del statements, because we may insert
|
||||
# parentheses and they change the AST.
|
||||
if (
|
||||
field == "targets"
|
||||
and isinstance(node, ast.Delete)
|
||||
and isinstance(item, ast.Tuple)
|
||||
):
|
||||
for elt in item.elts:
|
||||
yield from stringify_ast(elt, depth + 2)
|
||||
|
||||
elif isinstance(item, ast.AST):
|
||||
yield from stringify_ast(item, depth + 2)
|
||||
|
||||
elif isinstance(value, ast.AST):
|
||||
yield from stringify_ast(value, depth + 2)
|
||||
|
||||
else:
|
||||
normalized: object
|
||||
if (
|
||||
isinstance(node, ast.Constant)
|
||||
and field == "value"
|
||||
and isinstance(value, str)
|
||||
):
|
||||
# Constant strings may be indented across newlines, if they are
|
||||
# docstrings; fold spaces after newlines when comparing. Similarly,
|
||||
# trailing and leading space may be removed.
|
||||
normalized = _normalize("\n", value)
|
||||
elif field == "type_comment" and isinstance(value, str):
|
||||
# Trailing whitespace in type comments is removed.
|
||||
normalized = value.rstrip()
|
||||
else:
|
||||
normalized = value
|
||||
yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}"
|
||||
|
||||
yield f"{' ' * depth}) # /{node.__class__.__name__}"
|
||||
0
venv/lib/python3.12/site-packages/black/py.typed
Normal file
0
venv/lib/python3.12/site-packages/black/py.typed
Normal file
107
venv/lib/python3.12/site-packages/black/report.py
Normal file
107
venv/lib/python3.12/site-packages/black/report.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""
|
||||
Summarize Black runs to users.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from click import style
|
||||
|
||||
from black.output import err, out
|
||||
|
||||
|
||||
class Changed(Enum):
|
||||
NO = 0
|
||||
CACHED = 1
|
||||
YES = 2
|
||||
|
||||
|
||||
class NothingChanged(UserWarning):
|
||||
"""Raised when reformatted code is the same as source."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class Report:
|
||||
"""Provides a reformatting counter. Can be rendered with `str(report)`."""
|
||||
|
||||
check: bool = False
|
||||
diff: bool = False
|
||||
quiet: bool = False
|
||||
verbose: bool = False
|
||||
change_count: int = 0
|
||||
same_count: int = 0
|
||||
failure_count: int = 0
|
||||
|
||||
def done(self, src: Path, changed: Changed) -> None:
|
||||
"""Increment the counter for successful reformatting. Write out a message."""
|
||||
if changed is Changed.YES:
|
||||
reformatted = "would reformat" if self.check or self.diff else "reformatted"
|
||||
if self.verbose or not self.quiet:
|
||||
out(f"{reformatted} {src}")
|
||||
self.change_count += 1
|
||||
else:
|
||||
if self.verbose:
|
||||
if changed is Changed.NO:
|
||||
msg = f"{src} already well formatted, good job."
|
||||
else:
|
||||
msg = f"{src} wasn't modified on disk since last run."
|
||||
out(msg, bold=False)
|
||||
self.same_count += 1
|
||||
|
||||
def failed(self, src: Path, message: str) -> None:
|
||||
"""Increment the counter for failed reformatting. Write out a message."""
|
||||
err(f"error: cannot format {src}: {message}")
|
||||
self.failure_count += 1
|
||||
|
||||
def path_ignored(self, path: Path, message: str) -> None:
|
||||
if self.verbose:
|
||||
out(f"{path} ignored: {message}", bold=False)
|
||||
|
||||
@property
|
||||
def return_code(self) -> int:
|
||||
"""Return the exit code that the app should use.
|
||||
|
||||
This considers the current state of changed files and failures:
|
||||
- if there were any failures, return 123;
|
||||
- if any files were changed and --check is being used, return 1;
|
||||
- otherwise return 0.
|
||||
"""
|
||||
# According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
|
||||
# 126 we have special return codes reserved by the shell.
|
||||
if self.failure_count:
|
||||
return 123
|
||||
|
||||
elif self.change_count and self.check:
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Render a color report of the current state.
|
||||
|
||||
Use `click.unstyle` to remove colors.
|
||||
"""
|
||||
if self.check or self.diff:
|
||||
reformatted = "would be reformatted"
|
||||
unchanged = "would be left unchanged"
|
||||
failed = "would fail to reformat"
|
||||
else:
|
||||
reformatted = "reformatted"
|
||||
unchanged = "left unchanged"
|
||||
failed = "failed to reformat"
|
||||
report = []
|
||||
if self.change_count:
|
||||
s = "s" if self.change_count > 1 else ""
|
||||
report.append(
|
||||
style(f"{self.change_count} file{s} ", bold=True, fg="blue")
|
||||
+ style(f"{reformatted}", bold=True)
|
||||
)
|
||||
|
||||
if self.same_count:
|
||||
s = "s" if self.same_count > 1 else ""
|
||||
report.append(style(f"{self.same_count} file{s} ", fg="blue") + unchanged)
|
||||
if self.failure_count:
|
||||
s = "s" if self.failure_count > 1 else ""
|
||||
report.append(style(f"{self.failure_count} file{s} {failed}", fg="red"))
|
||||
return ", ".join(report) + "."
|
||||
28
venv/lib/python3.12/site-packages/black/rusty.py
Normal file
28
venv/lib/python3.12/site-packages/black/rusty.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""An error-handling model influenced by that used by the Rust programming language
|
||||
|
||||
See https://doc.rust-lang.org/book/ch09-00-error-handling.html.
|
||||
"""
|
||||
|
||||
from typing import Generic, TypeVar, Union
|
||||
|
||||
T = TypeVar("T")
|
||||
E = TypeVar("E", bound=Exception)
|
||||
|
||||
|
||||
class Ok(Generic[T]):
|
||||
def __init__(self, value: T) -> None:
|
||||
self._value = value
|
||||
|
||||
def ok(self) -> T:
|
||||
return self._value
|
||||
|
||||
|
||||
class Err(Generic[E]):
|
||||
def __init__(self, e: E) -> None:
|
||||
self._e = e
|
||||
|
||||
def err(self) -> E:
|
||||
return self._e
|
||||
|
||||
|
||||
Result = Union[Ok[T], Err[E]]
|
||||
329
venv/lib/python3.12/site-packages/black/strings.py
Normal file
329
venv/lib/python3.12/site-packages/black/strings.py
Normal file
@@ -0,0 +1,329 @@
|
||||
"""
|
||||
Simple formatting on strings. Further string formatting code is in trans.py.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from typing import Final, List, Match, Pattern
|
||||
|
||||
from black._width_table import WIDTH_TABLE
|
||||
from blib2to3.pytree import Leaf
|
||||
|
||||
STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters.
|
||||
STRING_PREFIX_RE: Final = re.compile(
|
||||
r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL
|
||||
)
|
||||
FIRST_NON_WHITESPACE_RE: Final = re.compile(r"\s*\t+\s*(\S)")
|
||||
UNICODE_ESCAPE_RE: Final = re.compile(
|
||||
r"(?P<backslashes>\\+)(?P<body>"
|
||||
r"(u(?P<u>[a-fA-F0-9]{4}))" # Character with 16-bit hex value xxxx
|
||||
r"|(U(?P<U>[a-fA-F0-9]{8}))" # Character with 32-bit hex value xxxxxxxx
|
||||
r"|(x(?P<x>[a-fA-F0-9]{2}))" # Character with hex value hh
|
||||
r"|(N\{(?P<N>[a-zA-Z0-9 \-]{2,})\})" # Character named name in the Unicode database
|
||||
r")",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
|
||||
"""Replace `regex` with `replacement` twice on `original`.
|
||||
|
||||
This is used by string normalization to perform replaces on
|
||||
overlapping matches.
|
||||
"""
|
||||
return regex.sub(replacement, regex.sub(replacement, original))
|
||||
|
||||
|
||||
def has_triple_quotes(string: str) -> bool:
|
||||
"""
|
||||
Returns:
|
||||
True iff @string starts with three quotation characters.
|
||||
"""
|
||||
raw_string = string.lstrip(STRING_PREFIX_CHARS)
|
||||
return raw_string[:3] in {'"""', "'''"}
|
||||
|
||||
|
||||
def lines_with_leading_tabs_expanded(s: str) -> List[str]:
|
||||
"""
|
||||
Splits string into lines and expands only leading tabs (following the normal
|
||||
Python rules)
|
||||
"""
|
||||
lines = []
|
||||
for line in s.splitlines():
|
||||
# Find the index of the first non-whitespace character after a string of
|
||||
# whitespace that includes at least one tab
|
||||
match = FIRST_NON_WHITESPACE_RE.match(line)
|
||||
if match:
|
||||
first_non_whitespace_idx = match.start(1)
|
||||
|
||||
lines.append(
|
||||
line[:first_non_whitespace_idx].expandtabs()
|
||||
+ line[first_non_whitespace_idx:]
|
||||
)
|
||||
else:
|
||||
lines.append(line)
|
||||
return lines
|
||||
|
||||
|
||||
def fix_docstring(docstring: str, prefix: str) -> str:
|
||||
# https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
|
||||
if not docstring:
|
||||
return ""
|
||||
lines = lines_with_leading_tabs_expanded(docstring)
|
||||
# Determine minimum indentation (first line doesn't count):
|
||||
indent = sys.maxsize
|
||||
for line in lines[1:]:
|
||||
stripped = line.lstrip()
|
||||
if stripped:
|
||||
indent = min(indent, len(line) - len(stripped))
|
||||
# Remove indentation (first line is special):
|
||||
trimmed = [lines[0].strip()]
|
||||
if indent < sys.maxsize:
|
||||
last_line_idx = len(lines) - 2
|
||||
for i, line in enumerate(lines[1:]):
|
||||
stripped_line = line[indent:].rstrip()
|
||||
if stripped_line or i == last_line_idx:
|
||||
trimmed.append(prefix + stripped_line)
|
||||
else:
|
||||
trimmed.append("")
|
||||
return "\n".join(trimmed)
|
||||
|
||||
|
||||
def get_string_prefix(string: str) -> str:
|
||||
"""
|
||||
Pre-conditions:
|
||||
* assert_is_leaf_string(@string)
|
||||
|
||||
Returns:
|
||||
@string's prefix (e.g. '', 'r', 'f', or 'rf').
|
||||
"""
|
||||
assert_is_leaf_string(string)
|
||||
|
||||
prefix = ""
|
||||
prefix_idx = 0
|
||||
while string[prefix_idx] in STRING_PREFIX_CHARS:
|
||||
prefix += string[prefix_idx]
|
||||
prefix_idx += 1
|
||||
|
||||
return prefix
|
||||
|
||||
|
||||
def assert_is_leaf_string(string: str) -> None:
|
||||
"""
|
||||
Checks the pre-condition that @string has the format that you would expect
|
||||
of `leaf.value` where `leaf` is some Leaf such that `leaf.type ==
|
||||
token.STRING`. A more precise description of the pre-conditions that are
|
||||
checked are listed below.
|
||||
|
||||
Pre-conditions:
|
||||
* @string starts with either ', ", <prefix>', or <prefix>" where
|
||||
`set(<prefix>)` is some subset of `set(STRING_PREFIX_CHARS)`.
|
||||
* @string ends with a quote character (' or ").
|
||||
|
||||
Raises:
|
||||
AssertionError(...) if the pre-conditions listed above are not
|
||||
satisfied.
|
||||
"""
|
||||
dquote_idx = string.find('"')
|
||||
squote_idx = string.find("'")
|
||||
if -1 in [dquote_idx, squote_idx]:
|
||||
quote_idx = max(dquote_idx, squote_idx)
|
||||
else:
|
||||
quote_idx = min(squote_idx, dquote_idx)
|
||||
|
||||
assert (
|
||||
0 <= quote_idx < len(string) - 1
|
||||
), f"{string!r} is missing a starting quote character (' or \")."
|
||||
assert string[-1] in (
|
||||
"'",
|
||||
'"',
|
||||
), f"{string!r} is missing an ending quote character (' or \")."
|
||||
assert set(string[:quote_idx]).issubset(
|
||||
set(STRING_PREFIX_CHARS)
|
||||
), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}."
|
||||
|
||||
|
||||
def normalize_string_prefix(s: str) -> str:
|
||||
"""Make all string prefixes lowercase."""
|
||||
match = STRING_PREFIX_RE.match(s)
|
||||
assert match is not None, f"failed to match string {s!r}"
|
||||
orig_prefix = match.group(1)
|
||||
new_prefix = (
|
||||
orig_prefix.replace("F", "f")
|
||||
.replace("B", "b")
|
||||
.replace("U", "")
|
||||
.replace("u", "")
|
||||
)
|
||||
|
||||
# Python syntax guarantees max 2 prefixes and that one of them is "r"
|
||||
if len(new_prefix) == 2 and "r" != new_prefix[0].lower():
|
||||
new_prefix = new_prefix[::-1]
|
||||
return f"{new_prefix}{match.group(2)}"
|
||||
|
||||
|
||||
# Re(gex) does actually cache patterns internally but this still improves
|
||||
# performance on a long list literal of strings by 5-9% since lru_cache's
|
||||
# caching overhead is much lower.
|
||||
@lru_cache(maxsize=64)
|
||||
def _cached_compile(pattern: str) -> Pattern[str]:
|
||||
return re.compile(pattern)
|
||||
|
||||
|
||||
def normalize_string_quotes(s: str) -> str:
|
||||
"""Prefer double quotes but only if it doesn't cause more escaping.
|
||||
|
||||
Adds or removes backslashes as appropriate. Doesn't parse and fix
|
||||
strings nested in f-strings.
|
||||
"""
|
||||
value = s.lstrip(STRING_PREFIX_CHARS)
|
||||
if value[:3] == '"""':
|
||||
return s
|
||||
|
||||
elif value[:3] == "'''":
|
||||
orig_quote = "'''"
|
||||
new_quote = '"""'
|
||||
elif value[0] == '"':
|
||||
orig_quote = '"'
|
||||
new_quote = "'"
|
||||
else:
|
||||
orig_quote = "'"
|
||||
new_quote = '"'
|
||||
first_quote_pos = s.find(orig_quote)
|
||||
if first_quote_pos == -1:
|
||||
return s # There's an internal error
|
||||
|
||||
prefix = s[:first_quote_pos]
|
||||
unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
|
||||
escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
|
||||
escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
|
||||
body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)]
|
||||
if "r" in prefix.casefold():
|
||||
if unescaped_new_quote.search(body):
|
||||
# There's at least one unescaped new_quote in this raw string
|
||||
# so converting is impossible
|
||||
return s
|
||||
|
||||
# Do not introduce or remove backslashes in raw strings
|
||||
new_body = body
|
||||
else:
|
||||
# remove unnecessary escapes
|
||||
new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
|
||||
if body != new_body:
|
||||
# Consider the string without unnecessary escapes as the original
|
||||
body = new_body
|
||||
s = f"{prefix}{orig_quote}{body}{orig_quote}"
|
||||
new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
|
||||
new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
|
||||
if "f" in prefix.casefold():
|
||||
matches = re.findall(
|
||||
r"""
|
||||
(?:(?<!\{)|^)\{ # start of the string or a non-{ followed by a single {
|
||||
([^{].*?) # contents of the brackets except if begins with {{
|
||||
\}(?:(?!\})|$) # A } followed by end of the string or a non-}
|
||||
""",
|
||||
new_body,
|
||||
re.VERBOSE,
|
||||
)
|
||||
for m in matches:
|
||||
if "\\" in str(m):
|
||||
# Do not introduce backslashes in interpolated expressions
|
||||
return s
|
||||
|
||||
if new_quote == '"""' and new_body[-1:] == '"':
|
||||
# edge case:
|
||||
new_body = new_body[:-1] + '\\"'
|
||||
orig_escape_count = body.count("\\")
|
||||
new_escape_count = new_body.count("\\")
|
||||
if new_escape_count > orig_escape_count:
|
||||
return s # Do not introduce more escaping
|
||||
|
||||
if new_escape_count == orig_escape_count and orig_quote == '"':
|
||||
return s # Prefer double quotes
|
||||
|
||||
return f"{prefix}{new_quote}{new_body}{new_quote}"
|
||||
|
||||
|
||||
def normalize_unicode_escape_sequences(leaf: Leaf) -> None:
|
||||
"""Replace hex codes in Unicode escape sequences with lowercase representation."""
|
||||
text = leaf.value
|
||||
prefix = get_string_prefix(text)
|
||||
if "r" in prefix.lower():
|
||||
return
|
||||
|
||||
def replace(m: Match[str]) -> str:
|
||||
groups = m.groupdict()
|
||||
back_slashes = groups["backslashes"]
|
||||
|
||||
if len(back_slashes) % 2 == 0:
|
||||
return back_slashes + groups["body"]
|
||||
|
||||
if groups["u"]:
|
||||
# \u
|
||||
return back_slashes + "u" + groups["u"].lower()
|
||||
elif groups["U"]:
|
||||
# \U
|
||||
return back_slashes + "U" + groups["U"].lower()
|
||||
elif groups["x"]:
|
||||
# \x
|
||||
return back_slashes + "x" + groups["x"].lower()
|
||||
else:
|
||||
assert groups["N"], f"Unexpected match: {m}"
|
||||
# \N{}
|
||||
return back_slashes + "N{" + groups["N"].upper() + "}"
|
||||
|
||||
leaf.value = re.sub(UNICODE_ESCAPE_RE, replace, text)
|
||||
|
||||
|
||||
@lru_cache(maxsize=4096)
|
||||
def char_width(char: str) -> int:
|
||||
"""Return the width of a single character as it would be displayed in a
|
||||
terminal or editor (which respects Unicode East Asian Width).
|
||||
|
||||
Full width characters are counted as 2, while half width characters are
|
||||
counted as 1. Also control characters are counted as 0.
|
||||
"""
|
||||
table = WIDTH_TABLE
|
||||
codepoint = ord(char)
|
||||
highest = len(table) - 1
|
||||
lowest = 0
|
||||
idx = highest // 2
|
||||
while True:
|
||||
start_codepoint, end_codepoint, width = table[idx]
|
||||
if codepoint < start_codepoint:
|
||||
highest = idx - 1
|
||||
elif codepoint > end_codepoint:
|
||||
lowest = idx + 1
|
||||
else:
|
||||
return 0 if width < 0 else width
|
||||
if highest < lowest:
|
||||
break
|
||||
idx = (highest + lowest) // 2
|
||||
return 1
|
||||
|
||||
|
||||
def str_width(line_str: str) -> int:
|
||||
"""Return the width of `line_str` as it would be displayed in a terminal
|
||||
or editor (which respects Unicode East Asian Width).
|
||||
|
||||
You could utilize this function to determine, for example, if a string
|
||||
is too wide to display in a terminal or editor.
|
||||
"""
|
||||
if line_str.isascii():
|
||||
# Fast path for a line consisting of only ASCII characters
|
||||
return len(line_str)
|
||||
return sum(map(char_width, line_str))
|
||||
|
||||
|
||||
def count_chars_in_width(line_str: str, max_width: int) -> int:
|
||||
"""Count the number of characters in `line_str` that would fit in a
|
||||
terminal or editor of `max_width` (which respects Unicode East Asian
|
||||
Width).
|
||||
"""
|
||||
total_width = 0
|
||||
for i, char in enumerate(line_str):
|
||||
width = char_width(char)
|
||||
if width + total_width > max_width:
|
||||
return i
|
||||
total_width += width
|
||||
return len(line_str)
|
||||
2438
venv/lib/python3.12/site-packages/black/trans.py
Normal file
2438
venv/lib/python3.12/site-packages/black/trans.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user