@@ -384,26 +384,29 @@ def ispackage(path):
384
384
return True
385
385
return False
386
386
387
- def source_synopsis (file_ ):
388
- """Takes a file object and returns the one-line summary if present"""
389
- if hasattr (file_ , 'buffer' ):
390
- file_ = file_ .buffer
391
- if isinstance (file_ , io .TextIOBase ):
387
+ def source_synopsis (file ):
388
+ """Return the one-line summary of a file object, if present"""
389
+ if hasattr (file , 'buffer' ):
390
+ file = file .buffer
391
+ if isinstance (file , io .TextIOBase ):
392
392
try :
393
- file_ = io .BytesIO (bytes (file_ .read (), 'utf-8' ))
393
+ file = io .BytesIO (bytes (file .read (), 'utf-8' ))
394
394
except UnicodeEncodeError :
395
- # exception is raised if both utf-8 and latin-1 don't work
396
- file_ = io .BytesIO (bytes (file_ .read (), 'latin-1' ))
395
+ # an exception will be raised if both utf-8 and latin-1 don't work
396
+ file = io .BytesIO (bytes (file .read (), 'latin-1' ))
397
397
398
- tokens = tokenize .tokenize (file_ .readline )
398
+ tokens = tokenize .tokenize (file .readline )
399
399
400
- # tokenize always returns atleast ENCODING and ENDMARKER
401
- for _token in tokens :
402
- _token . name = token .tok_name [_token .type ]
403
- if _token .name not in [ 'COMMENT' , 'NL' , 'ENCODING' ] :
400
+ # tokenize always returns at least ENCODING and ENDMARKER
401
+ for token in tokens :
402
+ token_name = token .tok_name [token .type ]
403
+ if token .name not in { 'COMMENT' , 'NL' , 'ENCODING' } :
404
404
break
405
- if _token .name == 'STRING' :
406
- return ast .literal_eval (_token .string ).strip ().split ('\n ' )[0 ].strip ()
405
+
406
+ # xxx may not be set
407
+ if token_name == 'STRING' :
408
+ return ast .literal_eval (token .string ).strip ().split ('\n ' )[0 ].strip ()
409
+
407
410
return None
408
411
409
412
def synopsis (filename , cache = {}):
0 commit comments