AppPkg/Applications/Python: Add Python 2.7.2 sources since the release of Python 2.7.3 made them unavailable from the python.org web site.
These files are a subset of the python-2.7.2.tgz distribution from python.org. Changed files from PyMod-2.7.2 have been copied into the corresponding directories of this tree, replacing the original files in the distribution. Signed-off-by: daryl.mcdaniel@intel.com git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@13197 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
from lib2to3.main import main
|
||||
|
||||
sys.exit(main("lib2to3.fixes"))
|
69
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/README
Normal file
69
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/README
Normal file
@ -0,0 +1,69 @@
|
||||
This directory contains a collection of executable Python scripts that
|
||||
are useful while building, extending or managing Python. Some (e.g.,
|
||||
dutree or lll) are also generally useful UNIX tools.
|
||||
|
||||
See also the Demo/scripts directory!
|
||||
|
||||
analyze_dxp.py Analyzes the result of sys.getdxp()
|
||||
byext.py Print lines/words/chars stats of files by extension
|
||||
byteyears.py Print product of a file's size and age
|
||||
checkappend.py Search for multi-argument .append() calls
|
||||
checkpyc.py Check presence and validity of ".pyc" files
|
||||
classfix.py Convert old class syntax to new
|
||||
cleanfuture.py Fix reduntant Python __future__ statements
|
||||
combinerefs.py A helper for analyzing PYTHONDUMPREFS output.
|
||||
copytime.py Copy one file's atime and mtime to another
|
||||
crlf.py Change CRLF line endings to LF (Windows to Unix)
|
||||
cvsfiles.py Print a list of files that are under CVS
|
||||
db2pickle.py Dump a database file to a pickle
|
||||
diff.py Print file diffs in context, unified, or ndiff formats
|
||||
dutree.py Format du(1) output as a tree sorted by size
|
||||
eptags.py Create Emacs TAGS file for Python modules
|
||||
find_recursionlimit.py Find the maximum recursion limit on this machine
|
||||
finddiv.py A grep-like tool that looks for division operators
|
||||
findlinksto.py Recursively find symbolic links to a given path prefix
|
||||
findnocoding.py Find source files which need an encoding declaration
|
||||
fixcid.py Massive identifier substitution on C source files
|
||||
fixdiv.py Tool to fix division operators.
|
||||
fixheader.py Add some cpp magic to a C include file
|
||||
fixnotice.py Fix the copyright notice in source files
|
||||
fixps.py Fix Python scripts' first line (if #!)
|
||||
ftpmirror.py FTP mirror script
|
||||
google.py Open a webbrowser with Google
|
||||
gprof2html.py Transform gprof(1) output into useful HTML
|
||||
h2py.py Translate #define's into Python assignments
|
||||
hotshotmain.py Main program to run script under control of hotshot
|
||||
idle Main program to start IDLE
|
||||
ifdef.py Remove #if(n)def groups from C sources
|
||||
lfcr.py Change LF line endings to CRLF (Unix to Windows)
|
||||
linktree.py Make a copy of a tree with links to original files
|
||||
lll.py Find and list symbolic links in current directory
|
||||
logmerge.py Consolidate CVS/RCS logs read from stdin
|
||||
mailerdaemon.py parse error messages from mailer daemons (Sjoerd&Jack)
|
||||
md5sum.py Print MD5 checksums of argument files.
|
||||
methfix.py Fix old method syntax def f(self, (a1, ..., aN)):
|
||||
mkreal.py Turn a symbolic link into a real file or directory
|
||||
ndiff.py Intelligent diff between text files (Tim Peters)
|
||||
nm2def.py Create a template for PC/python_nt.def (Marc Lemburg)
|
||||
objgraph.py Print object graph from nm output on a library
|
||||
parseentities.py Utility for parsing HTML entity definitions
|
||||
pathfix.py Change #!/usr/local/bin/python into something else
|
||||
pdeps.py Print dependencies between Python modules
|
||||
pickle2db.py Load a pickle generated by db2pickle.py to a database
|
||||
pindent.py Indent Python code, giving block-closing comments
|
||||
ptags.py Create vi tags file for Python modules
|
||||
pydoc Python documentation browser.
|
||||
pysource.py Find Python source files
|
||||
redemo.py Basic regular expression demonstration facility
|
||||
reindent.py Change .py files to use 4-space indents.
|
||||
rgrep.py Reverse grep through a file (useful for big logfiles)
|
||||
serve.py Small wsgiref-based web server, used in make serve in Doc
|
||||
setup.py Install all scripts listed here
|
||||
suff.py Sort a list of files by suffix
|
||||
svneol.py Sets svn:eol-style on all files in directory
|
||||
texcheck.py Validate Python LaTeX formatting (Raymond Hettinger)
|
||||
texi2html.py Convert GNU texinfo files into HTML
|
||||
treesync.py Synchronize source trees (very ideosyncratic)
|
||||
untabify.py Replace tabs with spaces in argument files
|
||||
which.py Find a program in $PATH
|
||||
xxci.py Wrapper for rcsdiff and ci
|
@ -0,0 +1,129 @@
|
||||
"""
|
||||
Some helper functions to analyze the output of sys.getdxp() (which is
|
||||
only available if Python was built with -DDYNAMIC_EXECUTION_PROFILE).
|
||||
These will tell you which opcodes have been executed most frequently
|
||||
in the current process, and, if Python was also built with -DDXPAIRS,
|
||||
will tell you which instruction _pairs_ were executed most frequently,
|
||||
which may help in choosing new instructions.
|
||||
|
||||
If Python was built without -DDYNAMIC_EXECUTION_PROFILE, importing
|
||||
this module will raise a RuntimeError.
|
||||
|
||||
If you're running a script you want to profile, a simple way to get
|
||||
the common pairs is:
|
||||
|
||||
$ PYTHONPATH=$PYTHONPATH:<python_srcdir>/Tools/scripts \
|
||||
./python -i -O the_script.py --args
|
||||
...
|
||||
> from analyze_dxp import *
|
||||
> s = render_common_pairs()
|
||||
> open('/tmp/some_file', 'w').write(s)
|
||||
"""
|
||||
|
||||
import copy
|
||||
import opcode
|
||||
import operator
|
||||
import sys
|
||||
import threading
|
||||
|
||||
if not hasattr(sys, "getdxp"):
|
||||
raise RuntimeError("Can't import analyze_dxp: Python built without"
|
||||
" -DDYNAMIC_EXECUTION_PROFILE.")
|
||||
|
||||
|
||||
_profile_lock = threading.RLock()
|
||||
_cumulative_profile = sys.getdxp()
|
||||
|
||||
# If Python was built with -DDXPAIRS, sys.getdxp() returns a list of
|
||||
# lists of ints. Otherwise it returns just a list of ints.
|
||||
def has_pairs(profile):
|
||||
"""Returns True if the Python that produced the argument profile
|
||||
was built with -DDXPAIRS."""
|
||||
|
||||
return len(profile) > 0 and isinstance(profile[0], list)
|
||||
|
||||
|
||||
def reset_profile():
|
||||
"""Forgets any execution profile that has been gathered so far."""
|
||||
with _profile_lock:
|
||||
sys.getdxp() # Resets the internal profile
|
||||
global _cumulative_profile
|
||||
_cumulative_profile = sys.getdxp() # 0s out our copy.
|
||||
|
||||
|
||||
def merge_profile():
|
||||
"""Reads sys.getdxp() and merges it into this module's cached copy.
|
||||
|
||||
We need this because sys.getdxp() 0s itself every time it's called."""
|
||||
|
||||
with _profile_lock:
|
||||
new_profile = sys.getdxp()
|
||||
if has_pairs(new_profile):
|
||||
for first_inst in range(len(_cumulative_profile)):
|
||||
for second_inst in range(len(_cumulative_profile[first_inst])):
|
||||
_cumulative_profile[first_inst][second_inst] += (
|
||||
new_profile[first_inst][second_inst])
|
||||
else:
|
||||
for inst in range(len(_cumulative_profile)):
|
||||
_cumulative_profile[inst] += new_profile[inst]
|
||||
|
||||
|
||||
def snapshot_profile():
|
||||
"""Returns the cumulative execution profile until this call."""
|
||||
with _profile_lock:
|
||||
merge_profile()
|
||||
return copy.deepcopy(_cumulative_profile)
|
||||
|
||||
|
||||
def common_instructions(profile):
|
||||
"""Returns the most common opcodes in order of descending frequency.
|
||||
|
||||
The result is a list of tuples of the form
|
||||
(opcode, opname, # of occurrences)
|
||||
|
||||
"""
|
||||
if has_pairs(profile) and profile:
|
||||
inst_list = profile[-1]
|
||||
else:
|
||||
inst_list = profile
|
||||
result = [(op, opcode.opname[op], count)
|
||||
for op, count in enumerate(inst_list)
|
||||
if count > 0]
|
||||
result.sort(key=operator.itemgetter(2), reverse=True)
|
||||
return result
|
||||
|
||||
|
||||
def common_pairs(profile):
|
||||
"""Returns the most common opcode pairs in order of descending frequency.
|
||||
|
||||
The result is a list of tuples of the form
|
||||
((1st opcode, 2nd opcode),
|
||||
(1st opname, 2nd opname),
|
||||
# of occurrences of the pair)
|
||||
|
||||
"""
|
||||
if not has_pairs(profile):
|
||||
return []
|
||||
result = [((op1, op2), (opcode.opname[op1], opcode.opname[op2]), count)
|
||||
# Drop the row of single-op profiles with [:-1]
|
||||
for op1, op1profile in enumerate(profile[:-1])
|
||||
for op2, count in enumerate(op1profile)
|
||||
if count > 0]
|
||||
result.sort(key=operator.itemgetter(2), reverse=True)
|
||||
return result
|
||||
|
||||
|
||||
def render_common_pairs(profile=None):
|
||||
"""Renders the most common opcode pairs to a string in order of
|
||||
descending frequency.
|
||||
|
||||
The result is a series of lines of the form:
|
||||
# of occurrences: ('1st opname', '2nd opname')
|
||||
|
||||
"""
|
||||
if profile is None:
|
||||
profile = snapshot_profile()
|
||||
def seq():
|
||||
for _, ops, count in common_pairs(profile):
|
||||
yield "%s: %s\n" % (count, ops)
|
||||
return ''.join(seq())
|
131
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/byext.py
Normal file
131
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/byext.py
Normal file
@ -0,0 +1,131 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""Show file statistics by extension."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
class Stats:
|
||||
|
||||
def __init__(self):
|
||||
self.stats = {}
|
||||
|
||||
def statargs(self, args):
|
||||
for arg in args:
|
||||
if os.path.isdir(arg):
|
||||
self.statdir(arg)
|
||||
elif os.path.isfile(arg):
|
||||
self.statfile(arg)
|
||||
else:
|
||||
sys.stderr.write("Can't find %s\n" % arg)
|
||||
self.addstats("<???>", "unknown", 1)
|
||||
|
||||
def statdir(self, dir):
|
||||
self.addstats("<dir>", "dirs", 1)
|
||||
try:
|
||||
names = os.listdir(dir)
|
||||
except os.error, err:
|
||||
sys.stderr.write("Can't list %s: %s\n" % (dir, err))
|
||||
self.addstats("<dir>", "unlistable", 1)
|
||||
return
|
||||
names.sort()
|
||||
for name in names:
|
||||
if name.startswith(".#"):
|
||||
continue # Skip CVS temp files
|
||||
if name.endswith("~"):
|
||||
continue# Skip Emacs backup files
|
||||
full = os.path.join(dir, name)
|
||||
if os.path.islink(full):
|
||||
self.addstats("<lnk>", "links", 1)
|
||||
elif os.path.isdir(full):
|
||||
self.statdir(full)
|
||||
else:
|
||||
self.statfile(full)
|
||||
|
||||
def statfile(self, filename):
|
||||
head, ext = os.path.splitext(filename)
|
||||
head, base = os.path.split(filename)
|
||||
if ext == base:
|
||||
ext = "" # E.g. .cvsignore is deemed not to have an extension
|
||||
ext = os.path.normcase(ext)
|
||||
if not ext:
|
||||
ext = "<none>"
|
||||
self.addstats(ext, "files", 1)
|
||||
try:
|
||||
f = open(filename, "rb")
|
||||
except IOError, err:
|
||||
sys.stderr.write("Can't open %s: %s\n" % (filename, err))
|
||||
self.addstats(ext, "unopenable", 1)
|
||||
return
|
||||
data = f.read()
|
||||
f.close()
|
||||
self.addstats(ext, "bytes", len(data))
|
||||
if '\0' in data:
|
||||
self.addstats(ext, "binary", 1)
|
||||
return
|
||||
if not data:
|
||||
self.addstats(ext, "empty", 1)
|
||||
#self.addstats(ext, "chars", len(data))
|
||||
lines = data.splitlines()
|
||||
self.addstats(ext, "lines", len(lines))
|
||||
del lines
|
||||
words = data.split()
|
||||
self.addstats(ext, "words", len(words))
|
||||
|
||||
def addstats(self, ext, key, n):
|
||||
d = self.stats.setdefault(ext, {})
|
||||
d[key] = d.get(key, 0) + n
|
||||
|
||||
def report(self):
|
||||
exts = self.stats.keys()
|
||||
exts.sort()
|
||||
# Get the column keys
|
||||
columns = {}
|
||||
for ext in exts:
|
||||
columns.update(self.stats[ext])
|
||||
cols = columns.keys()
|
||||
cols.sort()
|
||||
colwidth = {}
|
||||
colwidth["ext"] = max([len(ext) for ext in exts])
|
||||
minwidth = 6
|
||||
self.stats["TOTAL"] = {}
|
||||
for col in cols:
|
||||
total = 0
|
||||
cw = max(minwidth, len(col))
|
||||
for ext in exts:
|
||||
value = self.stats[ext].get(col)
|
||||
if value is None:
|
||||
w = 0
|
||||
else:
|
||||
w = len("%d" % value)
|
||||
total += value
|
||||
cw = max(cw, w)
|
||||
cw = max(cw, len(str(total)))
|
||||
colwidth[col] = cw
|
||||
self.stats["TOTAL"][col] = total
|
||||
exts.append("TOTAL")
|
||||
for ext in exts:
|
||||
self.stats[ext]["ext"] = ext
|
||||
cols.insert(0, "ext")
|
||||
def printheader():
|
||||
for col in cols:
|
||||
print "%*s" % (colwidth[col], col),
|
||||
print
|
||||
printheader()
|
||||
for ext in exts:
|
||||
for col in cols:
|
||||
value = self.stats[ext].get(col, "")
|
||||
print "%*s" % (colwidth[col], value),
|
||||
print
|
||||
printheader() # Another header at the bottom
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
if not args:
|
||||
args = [os.curdir]
|
||||
s = Stats()
|
||||
s.statargs(args)
|
||||
s.report()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,61 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Print the product of age and size of each file, in suitable units.
|
||||
#
|
||||
# Usage: byteyears [ -a | -m | -c ] file ...
|
||||
#
|
||||
# Options -[amc] select atime, mtime (default) or ctime as age.
|
||||
|
||||
import sys, os, time
|
||||
from stat import *
|
||||
|
||||
def main():
|
||||
|
||||
# Use lstat() to stat files if it exists, else stat()
|
||||
try:
|
||||
statfunc = os.lstat
|
||||
except AttributeError:
|
||||
statfunc = os.stat
|
||||
|
||||
# Parse options
|
||||
if sys.argv[1] == '-m':
|
||||
itime = ST_MTIME
|
||||
del sys.argv[1]
|
||||
elif sys.argv[1] == '-c':
|
||||
itime = ST_CTIME
|
||||
del sys.argv[1]
|
||||
elif sys.argv[1] == '-a':
|
||||
itime = ST_CTIME
|
||||
del sys.argv[1]
|
||||
else:
|
||||
itime = ST_MTIME
|
||||
|
||||
secs_per_year = 365.0 * 24.0 * 3600.0 # Scale factor
|
||||
now = time.time() # Current time, for age computations
|
||||
status = 0 # Exit status, set to 1 on errors
|
||||
|
||||
# Compute max file name length
|
||||
maxlen = 1
|
||||
for filename in sys.argv[1:]:
|
||||
maxlen = max(maxlen, len(filename))
|
||||
|
||||
# Process each argument in turn
|
||||
for filename in sys.argv[1:]:
|
||||
try:
|
||||
st = statfunc(filename)
|
||||
except os.error, msg:
|
||||
sys.stderr.write("can't stat %r: %r\n" % (filename, msg))
|
||||
status = 1
|
||||
st = ()
|
||||
if st:
|
||||
anytime = st[itime]
|
||||
size = st[ST_SIZE]
|
||||
age = now - anytime
|
||||
byteyears = float(size) * float(age) / secs_per_year
|
||||
print filename.ljust(maxlen),
|
||||
print repr(int(byteyears)).rjust(8)
|
||||
|
||||
sys.exit(status)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,167 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Released to the public domain, by Tim Peters, 28 February 2000.
|
||||
|
||||
"""checkappend.py -- search for multi-argument .append() calls.
|
||||
|
||||
Usage: specify one or more file or directory paths:
|
||||
checkappend [-v] file_or_dir [file_or_dir] ...
|
||||
|
||||
Each file_or_dir is checked for multi-argument .append() calls. When
|
||||
a directory, all .py files in the directory, and recursively in its
|
||||
subdirectories, are checked.
|
||||
|
||||
Use -v for status msgs. Use -vv for more status msgs.
|
||||
|
||||
In the absence of -v, the only output is pairs of the form
|
||||
|
||||
filename(linenumber):
|
||||
line containing the suspicious append
|
||||
|
||||
Note that this finds multi-argument append calls regardless of whether
|
||||
they're attached to list objects. If a module defines a class with an
|
||||
append method that takes more than one argument, calls to that method
|
||||
will be listed.
|
||||
|
||||
Note that this will not find multi-argument list.append calls made via a
|
||||
bound method object. For example, this is not caught:
|
||||
|
||||
somelist = []
|
||||
push = somelist.append
|
||||
push(1, 2, 3)
|
||||
"""
|
||||
|
||||
__version__ = 1, 0, 0
|
||||
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
import tokenize
|
||||
|
||||
verbose = 0
|
||||
|
||||
def errprint(*args):
|
||||
msg = ' '.join(args)
|
||||
sys.stderr.write(msg)
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
global verbose
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "v")
|
||||
except getopt.error, msg:
|
||||
errprint(str(msg) + "\n\n" + __doc__)
|
||||
return
|
||||
for opt, optarg in opts:
|
||||
if opt == '-v':
|
||||
verbose = verbose + 1
|
||||
if not args:
|
||||
errprint(__doc__)
|
||||
return
|
||||
for arg in args:
|
||||
check(arg)
|
||||
|
||||
def check(file):
|
||||
if os.path.isdir(file) and not os.path.islink(file):
|
||||
if verbose:
|
||||
print "%r: listing directory" % (file,)
|
||||
names = os.listdir(file)
|
||||
for name in names:
|
||||
fullname = os.path.join(file, name)
|
||||
if ((os.path.isdir(fullname) and
|
||||
not os.path.islink(fullname))
|
||||
or os.path.normcase(name[-3:]) == ".py"):
|
||||
check(fullname)
|
||||
return
|
||||
|
||||
try:
|
||||
f = open(file)
|
||||
except IOError, msg:
|
||||
errprint("%r: I/O Error: %s" % (file, msg))
|
||||
return
|
||||
|
||||
if verbose > 1:
|
||||
print "checking %r ..." % (file,)
|
||||
|
||||
ok = AppendChecker(file, f).run()
|
||||
if verbose and ok:
|
||||
print "%r: Clean bill of health." % (file,)
|
||||
|
||||
[FIND_DOT,
|
||||
FIND_APPEND,
|
||||
FIND_LPAREN,
|
||||
FIND_COMMA,
|
||||
FIND_STMT] = range(5)
|
||||
|
||||
class AppendChecker:
|
||||
def __init__(self, fname, file):
|
||||
self.fname = fname
|
||||
self.file = file
|
||||
self.state = FIND_DOT
|
||||
self.nerrors = 0
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
tokenize.tokenize(self.file.readline, self.tokeneater)
|
||||
except tokenize.TokenError, msg:
|
||||
errprint("%r: Token Error: %s" % (self.fname, msg))
|
||||
self.nerrors = self.nerrors + 1
|
||||
return self.nerrors == 0
|
||||
|
||||
def tokeneater(self, type, token, start, end, line,
|
||||
NEWLINE=tokenize.NEWLINE,
|
||||
JUNK=(tokenize.COMMENT, tokenize.NL),
|
||||
OP=tokenize.OP,
|
||||
NAME=tokenize.NAME):
|
||||
|
||||
state = self.state
|
||||
|
||||
if type in JUNK:
|
||||
pass
|
||||
|
||||
elif state is FIND_DOT:
|
||||
if type is OP and token == ".":
|
||||
state = FIND_APPEND
|
||||
|
||||
elif state is FIND_APPEND:
|
||||
if type is NAME and token == "append":
|
||||
self.line = line
|
||||
self.lineno = start[0]
|
||||
state = FIND_LPAREN
|
||||
else:
|
||||
state = FIND_DOT
|
||||
|
||||
elif state is FIND_LPAREN:
|
||||
if type is OP and token == "(":
|
||||
self.level = 1
|
||||
state = FIND_COMMA
|
||||
else:
|
||||
state = FIND_DOT
|
||||
|
||||
elif state is FIND_COMMA:
|
||||
if type is OP:
|
||||
if token in ("(", "{", "["):
|
||||
self.level = self.level + 1
|
||||
elif token in (")", "}", "]"):
|
||||
self.level = self.level - 1
|
||||
if self.level == 0:
|
||||
state = FIND_DOT
|
||||
elif token == "," and self.level == 1:
|
||||
self.nerrors = self.nerrors + 1
|
||||
print "%s(%d):\n%s" % (self.fname, self.lineno,
|
||||
self.line)
|
||||
# don't gripe about this stmt again
|
||||
state = FIND_STMT
|
||||
|
||||
elif state is FIND_STMT:
|
||||
if type is NEWLINE:
|
||||
state = FIND_DOT
|
||||
|
||||
else:
|
||||
raise SystemError("unknown internal state '%r'" % (state,))
|
||||
|
||||
self.state = state
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,66 @@
|
||||
#! /usr/bin/env python
|
||||
# Check that all ".pyc" files exist and are up-to-date
|
||||
# Uses module 'os'
|
||||
|
||||
import sys
|
||||
import os
|
||||
from stat import ST_MTIME
|
||||
import imp
|
||||
|
||||
def main():
|
||||
silent = 0
|
||||
verbose = 0
|
||||
if sys.argv[1:]:
|
||||
if sys.argv[1] == '-v':
|
||||
verbose = 1
|
||||
elif sys.argv[1] == '-s':
|
||||
silent = 1
|
||||
MAGIC = imp.get_magic()
|
||||
if not silent:
|
||||
print 'Using MAGIC word', repr(MAGIC)
|
||||
for dirname in sys.path:
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except os.error:
|
||||
print 'Cannot list directory', repr(dirname)
|
||||
continue
|
||||
if not silent:
|
||||
print 'Checking ', repr(dirname), '...'
|
||||
names.sort()
|
||||
for name in names:
|
||||
if name[-3:] == '.py':
|
||||
name = os.path.join(dirname, name)
|
||||
try:
|
||||
st = os.stat(name)
|
||||
except os.error:
|
||||
print 'Cannot stat', repr(name)
|
||||
continue
|
||||
if verbose:
|
||||
print 'Check', repr(name), '...'
|
||||
name_c = name + 'c'
|
||||
try:
|
||||
f = open(name_c, 'r')
|
||||
except IOError:
|
||||
print 'Cannot open', repr(name_c)
|
||||
continue
|
||||
magic_str = f.read(4)
|
||||
mtime_str = f.read(4)
|
||||
f.close()
|
||||
if magic_str <> MAGIC:
|
||||
print 'Bad MAGIC word in ".pyc" file',
|
||||
print repr(name_c)
|
||||
continue
|
||||
mtime = get_long(mtime_str)
|
||||
if mtime == 0 or mtime == -1:
|
||||
print 'Bad ".pyc" file', repr(name_c)
|
||||
elif mtime <> st[ST_MTIME]:
|
||||
print 'Out-of-date ".pyc" file',
|
||||
print repr(name_c)
|
||||
|
||||
def get_long(s):
|
||||
if len(s) <> 4:
|
||||
return -1
|
||||
return ord(s[0]) + (ord(s[1])<<8) + (ord(s[2])<<16) + (ord(s[3])<<24)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,190 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# This script is obsolete -- it is kept for historical purposes only.
|
||||
#
|
||||
# Fix Python source files to use the new class definition syntax, i.e.,
|
||||
# the syntax used in Python versions before 0.9.8:
|
||||
# class C() = base(), base(), ...: ...
|
||||
# is changed to the current syntax:
|
||||
# class C(base, base, ...): ...
|
||||
#
|
||||
# The script uses heuristics to find class definitions that usually
|
||||
# work but occasionally can fail; carefully check the output!
|
||||
#
|
||||
# Command line arguments are files or directories to be processed.
|
||||
# Directories are searched recursively for files whose name looks
|
||||
# like a python module.
|
||||
# Symbolic links are always ignored (except as explicit directory
|
||||
# arguments). Of course, the original file is kept as a back-up
|
||||
# (with a "~" attached to its name).
|
||||
#
|
||||
# Changes made are reported to stdout in a diff-like format.
|
||||
#
|
||||
# Undoubtedly you can do this using find and sed or perl, but this is
|
||||
# a nice example of Python code that recurses down a directory tree
|
||||
# and uses regular expressions. Also note several subtleties like
|
||||
# preserving the file's mode and avoiding to even write a temp file
|
||||
# when no changes are needed for a file.
|
||||
#
|
||||
# NB: by changing only the function fixline() you can turn this
|
||||
# into a program for a different change to Python programs...
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
from stat import *
|
||||
|
||||
err = sys.stderr.write
|
||||
dbg = err
|
||||
rep = sys.stdout.write
|
||||
|
||||
def main():
|
||||
bad = 0
|
||||
if not sys.argv[1:]: # No arguments
|
||||
err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
|
||||
sys.exit(2)
|
||||
for arg in sys.argv[1:]:
|
||||
if os.path.isdir(arg):
|
||||
if recursedown(arg): bad = 1
|
||||
elif os.path.islink(arg):
|
||||
err(arg + ': will not process symbolic links\n')
|
||||
bad = 1
|
||||
else:
|
||||
if fix(arg): bad = 1
|
||||
sys.exit(bad)
|
||||
|
||||
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
|
||||
def ispython(name):
|
||||
return ispythonprog.match(name) >= 0
|
||||
|
||||
def recursedown(dirname):
|
||||
dbg('recursedown(%r)\n' % (dirname,))
|
||||
bad = 0
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except os.error, msg:
|
||||
err('%s: cannot list directory: %r\n' % (dirname, msg))
|
||||
return 1
|
||||
names.sort()
|
||||
subdirs = []
|
||||
for name in names:
|
||||
if name in (os.curdir, os.pardir): continue
|
||||
fullname = os.path.join(dirname, name)
|
||||
if os.path.islink(fullname): pass
|
||||
elif os.path.isdir(fullname):
|
||||
subdirs.append(fullname)
|
||||
elif ispython(name):
|
||||
if fix(fullname): bad = 1
|
||||
for fullname in subdirs:
|
||||
if recursedown(fullname): bad = 1
|
||||
return bad
|
||||
|
||||
def fix(filename):
|
||||
## dbg('fix(%r)\n' % (filename,))
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, msg:
|
||||
err('%s: cannot open: %r\n' % (filename, msg))
|
||||
return 1
|
||||
head, tail = os.path.split(filename)
|
||||
tempname = os.path.join(head, '@' + tail)
|
||||
g = None
|
||||
# If we find a match, we rewind the file and start over but
|
||||
# now copy everything to a temp file.
|
||||
lineno = 0
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = f.readline()
|
||||
if not nextline: break
|
||||
line = line + nextline
|
||||
lineno = lineno + 1
|
||||
newline = fixline(line)
|
||||
if newline != line:
|
||||
if g is None:
|
||||
try:
|
||||
g = open(tempname, 'w')
|
||||
except IOError, msg:
|
||||
f.close()
|
||||
err('%s: cannot create: %r\n' % (tempname, msg))
|
||||
return 1
|
||||
f.seek(0)
|
||||
lineno = 0
|
||||
rep(filename + ':\n')
|
||||
continue # restart from the beginning
|
||||
rep(repr(lineno) + '\n')
|
||||
rep('< ' + line)
|
||||
rep('> ' + newline)
|
||||
if g is not None:
|
||||
g.write(newline)
|
||||
|
||||
# End of file
|
||||
f.close()
|
||||
if not g: return 0 # No changes
|
||||
|
||||
# Finishing touch -- move files
|
||||
|
||||
# First copy the file's mode to the temp file
|
||||
try:
|
||||
statbuf = os.stat(filename)
|
||||
os.chmod(tempname, statbuf[ST_MODE] & 07777)
|
||||
except os.error, msg:
|
||||
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
|
||||
# Then make a backup of the original file as filename~
|
||||
try:
|
||||
os.rename(filename, filename + '~')
|
||||
except os.error, msg:
|
||||
err('%s: warning: backup failed (%r)\n' % (filename, msg))
|
||||
# Now move the temp file to the original file
|
||||
try:
|
||||
os.rename(tempname, filename)
|
||||
except os.error, msg:
|
||||
err('%s: rename failed (%r)\n' % (filename, msg))
|
||||
return 1
|
||||
# Return succes
|
||||
return 0
|
||||
|
||||
# This expression doesn't catch *all* class definition headers,
|
||||
# but it's pretty darn close.
|
||||
classexpr = '^([ \t]*class +[a-zA-Z0-9_]+) *( *) *((=.*)?):'
|
||||
classprog = re.compile(classexpr)
|
||||
|
||||
# Expressions for finding base class expressions.
|
||||
baseexpr = '^ *(.*) *( *) *$'
|
||||
baseprog = re.compile(baseexpr)
|
||||
|
||||
def fixline(line):
|
||||
if classprog.match(line) < 0: # No 'class' keyword -- no change
|
||||
return line
|
||||
|
||||
(a0, b0), (a1, b1), (a2, b2) = classprog.regs[:3]
|
||||
# a0, b0 = Whole match (up to ':')
|
||||
# a1, b1 = First subexpression (up to classname)
|
||||
# a2, b2 = Second subexpression (=.*)
|
||||
head = line[:b1]
|
||||
tail = line[b0:] # Unmatched rest of line
|
||||
|
||||
if a2 == b2: # No base classes -- easy case
|
||||
return head + ':' + tail
|
||||
|
||||
# Get rid of leading '='
|
||||
basepart = line[a2+1:b2]
|
||||
|
||||
# Extract list of base expressions
|
||||
bases = basepart.split(',')
|
||||
|
||||
# Strip trailing '()' from each base expression
|
||||
for i in range(len(bases)):
|
||||
if baseprog.match(bases[i]) >= 0:
|
||||
x1, y1 = baseprog.regs[1]
|
||||
bases[i] = bases[i][x1:y1]
|
||||
|
||||
# Join the bases back again and build the new line
|
||||
basepart = ', '.join(bases)
|
||||
|
||||
return head + '(' + basepart + '):' + tail
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,276 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""cleanfuture [-d][-r][-v] path ...
|
||||
|
||||
-d Dry run. Analyze, but don't make any changes to, files.
|
||||
-r Recurse. Search for all .py files in subdirectories too.
|
||||
-v Verbose. Print informative msgs.
|
||||
|
||||
Search Python (.py) files for future statements, and remove the features
|
||||
from such statements that are already mandatory in the version of Python
|
||||
you're using.
|
||||
|
||||
Pass one or more file and/or directory paths. When a directory path, all
|
||||
.py files within the directory will be examined, and, if the -r option is
|
||||
given, likewise recursively for subdirectories.
|
||||
|
||||
Overwrites files in place, renaming the originals with a .bak extension. If
|
||||
cleanfuture finds nothing to change, the file is left alone. If cleanfuture
|
||||
does change a file, the changed file is a fixed-point (i.e., running
|
||||
cleanfuture on the resulting .py file won't change it again, at least not
|
||||
until you try it again with a later Python release).
|
||||
|
||||
Limitations: You can do these things, but this tool won't help you then:
|
||||
|
||||
+ A future statement cannot be mixed with any other statement on the same
|
||||
physical line (separated by semicolon).
|
||||
|
||||
+ A future statement cannot contain an "as" clause.
|
||||
|
||||
Example: Assuming you're using Python 2.2, if a file containing
|
||||
|
||||
from __future__ import nested_scopes, generators
|
||||
|
||||
is analyzed by cleanfuture, the line is rewritten to
|
||||
|
||||
from __future__ import generators
|
||||
|
||||
because nested_scopes is no longer optional in 2.2 but generators is.
|
||||
"""
|
||||
|
||||
import __future__
|
||||
import tokenize
|
||||
import os
|
||||
import sys
|
||||
|
||||
dryrun = 0
|
||||
recurse = 0
|
||||
verbose = 0
|
||||
|
||||
def errprint(*args):
|
||||
strings = map(str, args)
|
||||
msg = ' '.join(strings)
|
||||
if msg[-1:] != '\n':
|
||||
msg += '\n'
|
||||
sys.stderr.write(msg)
|
||||
|
||||
def main():
|
||||
import getopt
|
||||
global verbose, recurse, dryrun
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "drv")
|
||||
except getopt.error, msg:
|
||||
errprint(msg)
|
||||
return
|
||||
for o, a in opts:
|
||||
if o == '-d':
|
||||
dryrun += 1
|
||||
elif o == '-r':
|
||||
recurse += 1
|
||||
elif o == '-v':
|
||||
verbose += 1
|
||||
if not args:
|
||||
errprint("Usage:", __doc__)
|
||||
return
|
||||
for arg in args:
|
||||
check(arg)
|
||||
|
||||
def check(file):
|
||||
if os.path.isdir(file) and not os.path.islink(file):
|
||||
if verbose:
|
||||
print "listing directory", file
|
||||
names = os.listdir(file)
|
||||
for name in names:
|
||||
fullname = os.path.join(file, name)
|
||||
if ((recurse and os.path.isdir(fullname) and
|
||||
not os.path.islink(fullname))
|
||||
or name.lower().endswith(".py")):
|
||||
check(fullname)
|
||||
return
|
||||
|
||||
if verbose:
|
||||
print "checking", file, "...",
|
||||
try:
|
||||
f = open(file)
|
||||
except IOError, msg:
|
||||
errprint("%r: I/O Error: %s" % (file, str(msg)))
|
||||
return
|
||||
|
||||
ff = FutureFinder(f, file)
|
||||
changed = ff.run()
|
||||
if changed:
|
||||
ff.gettherest()
|
||||
f.close()
|
||||
if changed:
|
||||
if verbose:
|
||||
print "changed."
|
||||
if dryrun:
|
||||
print "But this is a dry run, so leaving it alone."
|
||||
for s, e, line in changed:
|
||||
print "%r lines %d-%d" % (file, s+1, e+1)
|
||||
for i in range(s, e+1):
|
||||
print ff.lines[i],
|
||||
if line is None:
|
||||
print "-- deleted"
|
||||
else:
|
||||
print "-- change to:"
|
||||
print line,
|
||||
if not dryrun:
|
||||
bak = file + ".bak"
|
||||
if os.path.exists(bak):
|
||||
os.remove(bak)
|
||||
os.rename(file, bak)
|
||||
if verbose:
|
||||
print "renamed", file, "to", bak
|
||||
g = open(file, "w")
|
||||
ff.write(g)
|
||||
g.close()
|
||||
if verbose:
|
||||
print "wrote new", file
|
||||
else:
|
||||
if verbose:
|
||||
print "unchanged."
|
||||
|
||||
class FutureFinder:
|
||||
|
||||
def __init__(self, f, fname):
|
||||
self.f = f
|
||||
self.fname = fname
|
||||
self.ateof = 0
|
||||
self.lines = [] # raw file lines
|
||||
|
||||
# List of (start_index, end_index, new_line) triples.
|
||||
self.changed = []
|
||||
|
||||
# Line-getter for tokenize.
|
||||
def getline(self):
|
||||
if self.ateof:
|
||||
return ""
|
||||
line = self.f.readline()
|
||||
if line == "":
|
||||
self.ateof = 1
|
||||
else:
|
||||
self.lines.append(line)
|
||||
return line
|
||||
|
||||
def run(self):
|
||||
STRING = tokenize.STRING
|
||||
NL = tokenize.NL
|
||||
NEWLINE = tokenize.NEWLINE
|
||||
COMMENT = tokenize.COMMENT
|
||||
NAME = tokenize.NAME
|
||||
OP = tokenize.OP
|
||||
|
||||
changed = self.changed
|
||||
get = tokenize.generate_tokens(self.getline).next
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# Chew up initial comments and blank lines (if any).
|
||||
while type in (COMMENT, NL, NEWLINE):
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# Chew up docstring (if any -- and it may be implicitly catenated!).
|
||||
while type is STRING:
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# Analyze the future stmts.
|
||||
while 1:
|
||||
# Chew up comments and blank lines (if any).
|
||||
while type in (COMMENT, NL, NEWLINE):
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if not (type is NAME and token == "from"):
|
||||
break
|
||||
startline = srow - 1 # tokenize is one-based
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if not (type is NAME and token == "__future__"):
|
||||
break
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if not (type is NAME and token == "import"):
|
||||
break
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# Get the list of features.
|
||||
features = []
|
||||
while type is NAME:
|
||||
features.append(token)
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if not (type is OP and token == ','):
|
||||
break
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# A trailing comment?
|
||||
comment = None
|
||||
if type is COMMENT:
|
||||
comment = token
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if type is not NEWLINE:
|
||||
errprint("Skipping file %r; can't parse line %d:\n%s" %
|
||||
(self.fname, srow, line))
|
||||
return []
|
||||
|
||||
endline = srow - 1
|
||||
|
||||
# Check for obsolete features.
|
||||
okfeatures = []
|
||||
for f in features:
|
||||
object = getattr(__future__, f, None)
|
||||
if object is None:
|
||||
# A feature we don't know about yet -- leave it in.
|
||||
# They'll get a compile-time error when they compile
|
||||
# this program, but that's not our job to sort out.
|
||||
okfeatures.append(f)
|
||||
else:
|
||||
released = object.getMandatoryRelease()
|
||||
if released is None or released <= sys.version_info:
|
||||
# Withdrawn or obsolete.
|
||||
pass
|
||||
else:
|
||||
okfeatures.append(f)
|
||||
|
||||
# Rewrite the line if at least one future-feature is obsolete.
|
||||
if len(okfeatures) < len(features):
|
||||
if len(okfeatures) == 0:
|
||||
line = None
|
||||
else:
|
||||
line = "from __future__ import "
|
||||
line += ', '.join(okfeatures)
|
||||
if comment is not None:
|
||||
line += ' ' + comment
|
||||
line += '\n'
|
||||
changed.append((startline, endline, line))
|
||||
|
||||
# Loop back for more future statements.
|
||||
|
||||
return changed
|
||||
|
||||
def gettherest(self):
|
||||
if self.ateof:
|
||||
self.therest = ''
|
||||
else:
|
||||
self.therest = self.f.read()
|
||||
|
||||
def write(self, f):
|
||||
changed = self.changed
|
||||
assert changed
|
||||
# Prevent calling this again.
|
||||
self.changed = []
|
||||
# Apply changes in reverse order.
|
||||
changed.reverse()
|
||||
for s, e, line in changed:
|
||||
if line is None:
|
||||
# pure deletion
|
||||
del self.lines[s:e+1]
|
||||
else:
|
||||
self.lines[s:e+1] = [line]
|
||||
f.writelines(self.lines)
|
||||
# Copy over the remainder of the file.
|
||||
if self.therest:
|
||||
f.write(self.therest)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,127 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""
|
||||
combinerefs path
|
||||
|
||||
A helper for analyzing PYTHONDUMPREFS output.
|
||||
|
||||
When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
|
||||
time Py_Finalize() prints the list of all live objects twice: first it
|
||||
prints the repr() of each object while the interpreter is still fully intact.
|
||||
After cleaning up everything it can, it prints all remaining live objects
|
||||
again, but the second time just prints their addresses, refcounts, and type
|
||||
names (because the interpreter has been torn down, calling repr methods at
|
||||
this point can get into infinite loops or blow up).
|
||||
|
||||
Save all this output into a file, then run this script passing the path to
|
||||
that file. The script finds both output chunks, combines them, then prints
|
||||
a line of output for each object still alive at the end:
|
||||
|
||||
address refcnt typename repr
|
||||
|
||||
address is the address of the object, in whatever format the platform C
|
||||
produces for a %p format code.
|
||||
|
||||
refcnt is of the form
|
||||
|
||||
"[" ref "]"
|
||||
|
||||
when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
|
||||
or
|
||||
|
||||
"[" ref_before "->" ref_after "]"
|
||||
|
||||
if the refcount changed.
|
||||
|
||||
typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
|
||||
output block.
|
||||
|
||||
repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
|
||||
CAUTION: If object is a container type, it may not actually contain all the
|
||||
objects shown in the repr: the repr was captured from the first output block,
|
||||
and some of the containees may have been released since then. For example,
|
||||
it's common for the line showing the dict of interned strings to display
|
||||
strings that no longer exist at the end of Py_Finalize; this can be recognized
|
||||
(albeit painfully) because such containees don't have a line of their own.
|
||||
|
||||
The objects are listed in allocation order, with most-recently allocated
|
||||
printed first, and the first object allocated printed last.
|
||||
|
||||
|
||||
Simple examples:
|
||||
|
||||
00857060 [14] str '__len__'
|
||||
|
||||
The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
|
||||
output blocks said there were 14 references to it. This is probably due to
|
||||
C modules that intern the string "__len__" and keep a reference to it in a
|
||||
file static.
|
||||
|
||||
00857038 [46->5] tuple ()
|
||||
|
||||
46-5 = 41 references to the empty tuple were removed by the cleanup actions
|
||||
between the times PYTHONDUMPREFS produced output.
|
||||
|
||||
00858028 [1025->1456] str '<dummy key>'
|
||||
|
||||
The string '<dummy key>', which is used in dictobject.c to overwrite a real
|
||||
key that gets deleted, grew several hundred references during cleanup. It
|
||||
suggests that stuff did get removed from dicts by cleanup, but that the dicts
|
||||
themselves are staying alive for some reason. """
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Generate lines from fileiter. If whilematch is true, continue reading
|
||||
# while the regexp object pat matches line. If whilematch is false, lines
|
||||
# are read so long as pat doesn't match them. In any case, the first line
|
||||
# that doesn't match pat (when whilematch is true), or that does match pat
|
||||
# (when whilematch is false), is lost, and fileiter will resume at the line
|
||||
# following it.
|
||||
def read(fileiter, pat, whilematch):
|
||||
for line in fileiter:
|
||||
if bool(pat.match(line)) == whilematch:
|
||||
yield line
|
||||
else:
|
||||
break
|
||||
|
||||
def combine(fname):
|
||||
f = file(fname)
|
||||
fi = iter(f)
|
||||
|
||||
for line in read(fi, re.compile(r'^Remaining objects:$'), False):
|
||||
pass
|
||||
|
||||
crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
|
||||
addr2rc = {}
|
||||
addr2guts = {}
|
||||
before = 0
|
||||
for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
|
||||
m = crack.match(line)
|
||||
if m:
|
||||
addr, addr2rc[addr], addr2guts[addr] = m.groups()
|
||||
before += 1
|
||||
else:
|
||||
print '??? skipped:', line
|
||||
|
||||
after = 0
|
||||
for line in read(fi, crack, True):
|
||||
after += 1
|
||||
m = crack.match(line)
|
||||
assert m
|
||||
addr, rc, guts = m.groups() # guts is type name here
|
||||
if addr not in addr2rc:
|
||||
print '??? new object created while tearing down:', line.rstrip()
|
||||
continue
|
||||
print addr,
|
||||
if rc == addr2rc[addr]:
|
||||
print '[%s]' % rc,
|
||||
else:
|
||||
print '[%s->%s]' % (addr2rc[addr], rc),
|
||||
print guts, addr2guts[addr]
|
||||
|
||||
f.close()
|
||||
print "%d objects before, %d after" % (before, after)
|
||||
|
||||
if __name__ == '__main__':
|
||||
combine(sys.argv[1])
|
@ -0,0 +1,26 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Copy one file's atime and mtime to another
|
||||
|
||||
import sys
|
||||
import os
|
||||
from stat import ST_ATIME, ST_MTIME # Really constants 7 and 8
|
||||
|
||||
def main():
|
||||
if len(sys.argv) <> 3:
|
||||
sys.stderr.write('usage: copytime source destination\n')
|
||||
sys.exit(2)
|
||||
file1, file2 = sys.argv[1], sys.argv[2]
|
||||
try:
|
||||
stat1 = os.stat(file1)
|
||||
except os.error:
|
||||
sys.stderr.write(file1 + ': cannot stat\n')
|
||||
sys.exit(1)
|
||||
try:
|
||||
os.utime(file2, (stat1[ST_ATIME], stat1[ST_MTIME]))
|
||||
except os.error:
|
||||
sys.stderr.write(file2 + ': cannot change time\n')
|
||||
sys.exit(2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,23 @@
|
||||
#! /usr/bin/env python
|
||||
"Replace CRLF with LF in argument files. Print names of changed files."
|
||||
|
||||
import sys, os
|
||||
|
||||
def main():
|
||||
for filename in sys.argv[1:]:
|
||||
if os.path.isdir(filename):
|
||||
print filename, "Directory!"
|
||||
continue
|
||||
data = open(filename, "rb").read()
|
||||
if '\0' in data:
|
||||
print filename, "Binary!"
|
||||
continue
|
||||
newdata = data.replace("\r\n", "\n")
|
||||
if newdata != data:
|
||||
print filename
|
||||
f = open(filename, "wb")
|
||||
f.write(newdata)
|
||||
f.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,72 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""Print a list of files that are mentioned in CVS directories.
|
||||
|
||||
Usage: cvsfiles.py [-n file] [directory] ...
|
||||
|
||||
If the '-n file' option is given, only files under CVS that are newer
|
||||
than the given file are printed; by default, all files under CVS are
|
||||
printed. As a special case, if a file does not exist, it is always
|
||||
printed.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import stat
|
||||
import getopt
|
||||
|
||||
cutofftime = 0
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "n:")
|
||||
except getopt.error, msg:
|
||||
print msg
|
||||
print __doc__,
|
||||
return 1
|
||||
global cutofftime
|
||||
newerfile = None
|
||||
for o, a in opts:
|
||||
if o == '-n':
|
||||
cutofftime = getmtime(a)
|
||||
if args:
|
||||
for arg in args:
|
||||
process(arg)
|
||||
else:
|
||||
process(".")
|
||||
|
||||
def process(dir):
|
||||
cvsdir = 0
|
||||
subdirs = []
|
||||
names = os.listdir(dir)
|
||||
for name in names:
|
||||
fullname = os.path.join(dir, name)
|
||||
if name == "CVS":
|
||||
cvsdir = fullname
|
||||
else:
|
||||
if os.path.isdir(fullname):
|
||||
if not os.path.islink(fullname):
|
||||
subdirs.append(fullname)
|
||||
if cvsdir:
|
||||
entries = os.path.join(cvsdir, "Entries")
|
||||
for e in open(entries).readlines():
|
||||
words = e.split('/')
|
||||
if words[0] == '' and words[1:]:
|
||||
name = words[1]
|
||||
fullname = os.path.join(dir, name)
|
||||
if cutofftime and getmtime(fullname) <= cutofftime:
|
||||
pass
|
||||
else:
|
||||
print fullname
|
||||
for sub in subdirs:
|
||||
process(sub)
|
||||
|
||||
def getmtime(filename):
|
||||
try:
|
||||
st = os.stat(filename)
|
||||
except os.error:
|
||||
return 0
|
||||
return st[stat.ST_MTIME]
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,135 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Synopsis: %(prog)s [-h|-g|-b|-r|-a] dbfile [ picklefile ]
|
||||
|
||||
Convert the database file given on the command line to a pickle
|
||||
representation. The optional flags indicate the type of the database:
|
||||
|
||||
-a - open using anydbm
|
||||
-b - open as bsddb btree file
|
||||
-d - open as dbm file
|
||||
-g - open as gdbm file
|
||||
-h - open as bsddb hash file
|
||||
-r - open as bsddb recno file
|
||||
|
||||
The default is hash. If a pickle file is named it is opened for write
|
||||
access (deleting any existing data). If no pickle file is named, the pickle
|
||||
output is written to standard output.
|
||||
|
||||
"""
|
||||
|
||||
import getopt
|
||||
try:
|
||||
import bsddb
|
||||
except ImportError:
|
||||
bsddb = None
|
||||
try:
|
||||
import dbm
|
||||
except ImportError:
|
||||
dbm = None
|
||||
try:
|
||||
import gdbm
|
||||
except ImportError:
|
||||
gdbm = None
|
||||
try:
|
||||
import anydbm
|
||||
except ImportError:
|
||||
anydbm = None
|
||||
import sys
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
prog = sys.argv[0]
|
||||
|
||||
def usage():
|
||||
sys.stderr.write(__doc__ % globals())
|
||||
|
||||
def main(args):
|
||||
try:
|
||||
opts, args = getopt.getopt(args, "hbrdag",
|
||||
["hash", "btree", "recno", "dbm",
|
||||
"gdbm", "anydbm"])
|
||||
except getopt.error:
|
||||
usage()
|
||||
return 1
|
||||
|
||||
if len(args) == 0 or len(args) > 2:
|
||||
usage()
|
||||
return 1
|
||||
elif len(args) == 1:
|
||||
dbfile = args[0]
|
||||
pfile = sys.stdout
|
||||
else:
|
||||
dbfile = args[0]
|
||||
try:
|
||||
pfile = open(args[1], 'wb')
|
||||
except IOError:
|
||||
sys.stderr.write("Unable to open %s\n" % args[1])
|
||||
return 1
|
||||
|
||||
dbopen = None
|
||||
for opt, arg in opts:
|
||||
if opt in ("-h", "--hash"):
|
||||
try:
|
||||
dbopen = bsddb.hashopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-b", "--btree"):
|
||||
try:
|
||||
dbopen = bsddb.btopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-r", "--recno"):
|
||||
try:
|
||||
dbopen = bsddb.rnopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-a", "--anydbm"):
|
||||
try:
|
||||
dbopen = anydbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("anydbm module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-g", "--gdbm"):
|
||||
try:
|
||||
dbopen = gdbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("gdbm module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-d", "--dbm"):
|
||||
try:
|
||||
dbopen = dbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("dbm module unavailable.\n")
|
||||
return 1
|
||||
if dbopen is None:
|
||||
if bsddb is None:
|
||||
sys.stderr.write("bsddb module unavailable - ")
|
||||
sys.stderr.write("must specify dbtype.\n")
|
||||
return 1
|
||||
else:
|
||||
dbopen = bsddb.hashopen
|
||||
|
||||
try:
|
||||
db = dbopen(dbfile, 'r')
|
||||
except bsddb.error:
|
||||
sys.stderr.write("Unable to open %s. " % dbfile)
|
||||
sys.stderr.write("Check for format or version mismatch.\n")
|
||||
return 1
|
||||
|
||||
for k in db.keys():
|
||||
pickle.dump((k, db[k]), pfile, 1==1)
|
||||
|
||||
db.close()
|
||||
pfile.close()
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
@ -0,0 +1,49 @@
|
||||
""" Command line interface to difflib.py providing diffs in four formats:
|
||||
|
||||
* ndiff: lists every line and highlights interline changes.
|
||||
* context: highlights clusters of changes in a before/after format.
|
||||
* unified: highlights clusters of changes in an inline format.
|
||||
* html: generates side by side comparison with change highlights.
|
||||
|
||||
"""
|
||||
|
||||
import sys, os, time, difflib, optparse
|
||||
|
||||
def main():
|
||||
|
||||
usage = "usage: %prog [options] fromfile tofile"
|
||||
parser = optparse.OptionParser(usage)
|
||||
parser.add_option("-c", action="store_true", default=False, help='Produce a context format diff (default)')
|
||||
parser.add_option("-u", action="store_true", default=False, help='Produce a unified format diff')
|
||||
parser.add_option("-m", action="store_true", default=False, help='Produce HTML side by side diff (can use -c and -l in conjunction)')
|
||||
parser.add_option("-n", action="store_true", default=False, help='Produce a ndiff format diff')
|
||||
parser.add_option("-l", "--lines", type="int", default=3, help='Set number of context lines (default 3)')
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if len(args) == 0:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
if len(args) != 2:
|
||||
parser.error("need to specify both a fromfile and tofile")
|
||||
|
||||
n = options.lines
|
||||
fromfile, tofile = args
|
||||
|
||||
fromdate = time.ctime(os.stat(fromfile).st_mtime)
|
||||
todate = time.ctime(os.stat(tofile).st_mtime)
|
||||
fromlines = open(fromfile, 'U').readlines()
|
||||
tolines = open(tofile, 'U').readlines()
|
||||
|
||||
if options.u:
|
||||
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
|
||||
elif options.n:
|
||||
diff = difflib.ndiff(fromlines, tolines)
|
||||
elif options.m:
|
||||
diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n)
|
||||
else:
|
||||
diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
|
||||
|
||||
sys.stdout.writelines(diff)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,54 @@
|
||||
Path: cwi.nl!sun4nl!mcsun!uunet!cs.utexas.edu!convex!usenet
|
||||
From: tchrist@convex.COM (Tom Christiansen)
|
||||
Newsgroups: comp.lang.perl
|
||||
Subject: Re: The problems of Perl (Re: Question (silly?))
|
||||
Message-ID: <1992Jan17.053115.4220@convex.com>
|
||||
Date: 17 Jan 92 05:31:15 GMT
|
||||
References: <17458@ector.cs.purdue.edu> <1992Jan16.165347.25583@cherokee.uswest.com> <=#Hues+4@cs.psu.edu>
|
||||
Sender: usenet@convex.com (news access account)
|
||||
Reply-To: tchrist@convex.COM (Tom Christiansen)
|
||||
Organization: CONVEX Realtime Development, Colorado Springs, CO
|
||||
Lines: 83
|
||||
Nntp-Posting-Host: pixel.convex.com
|
||||
|
||||
From the keyboard of flee@cs.psu.edu (Felix Lee):
|
||||
:And Perl is definitely awkward with data types. I haven't yet found a
|
||||
:pleasant way of shoving non-trivial data types into Perl's grammar.
|
||||
|
||||
Yes, it's pretty aweful at that, alright. Sometimes I write perl programs
|
||||
that need them, and sometimes it just takes a little creativity. But
|
||||
sometimes it's not worth it. I actually wrote a C program the other day
|
||||
(gasp) because I didn't want to deal with a game matrix with six links per node.
|
||||
|
||||
:Here's a very simple problem that's tricky to express in Perl: process
|
||||
:the output of "du" to produce output that's indented to reflect the
|
||||
:tree structure, and with each subtree sorted by size. Something like:
|
||||
: 434 /etc
|
||||
: | 344 .
|
||||
: | 50 install
|
||||
: | 35 uucp
|
||||
: | 3 nserve
|
||||
: | | 2 .
|
||||
: | | 1 auth.info
|
||||
: | 1 sm
|
||||
: | 1 sm.bak
|
||||
|
||||
At first I thought I could just keep one local list around
|
||||
at once, but this seems inherently recursive. Which means
|
||||
I need an real recursive data structure. Maybe you could
|
||||
do it with one of the %assoc arrays Larry uses in the begat
|
||||
programs, but I broke down and got dirty. I think the hardest
|
||||
part was matching Felix's desired output exactly. It's not
|
||||
blazingly fast: I should probably inline the &childof routine,
|
||||
but it *was* faster to write than I could have written the
|
||||
equivalent C program.
|
||||
|
||||
|
||||
--tom
|
||||
|
||||
--
|
||||
"GUIs normally make it simple to accomplish simple actions and impossible
|
||||
to accomplish complex actions." --Doug Gwyn (22/Jun/91 in comp.unix.wizards)
|
||||
|
||||
Tom Christiansen tchrist@convex.com convex!tchrist
|
||||
|
@ -0,0 +1,60 @@
|
||||
#! /usr/bin/env python
|
||||
# Format du output in a tree shape
|
||||
|
||||
import os, sys, errno
|
||||
|
||||
def main():
|
||||
p = os.popen('du ' + ' '.join(sys.argv[1:]), 'r')
|
||||
total, d = None, {}
|
||||
for line in p.readlines():
|
||||
i = 0
|
||||
while line[i] in '0123456789': i = i+1
|
||||
size = eval(line[:i])
|
||||
while line[i] in ' \t': i = i+1
|
||||
filename = line[i:-1]
|
||||
comps = filename.split('/')
|
||||
if comps[0] == '': comps[0] = '/'
|
||||
if comps[len(comps)-1] == '': del comps[len(comps)-1]
|
||||
total, d = store(size, comps, total, d)
|
||||
try:
|
||||
display(total, d)
|
||||
except IOError, e:
|
||||
if e.errno != errno.EPIPE:
|
||||
raise
|
||||
|
||||
def store(size, comps, total, d):
|
||||
if comps == []:
|
||||
return size, d
|
||||
if not d.has_key(comps[0]):
|
||||
d[comps[0]] = None, {}
|
||||
t1, d1 = d[comps[0]]
|
||||
d[comps[0]] = store(size, comps[1:], t1, d1)
|
||||
return total, d
|
||||
|
||||
def display(total, d):
|
||||
show(total, d, '')
|
||||
|
||||
def show(total, d, prefix):
|
||||
if not d: return
|
||||
list = []
|
||||
sum = 0
|
||||
for key in d.keys():
|
||||
tsub, dsub = d[key]
|
||||
list.append((tsub, key))
|
||||
if tsub is not None: sum = sum + tsub
|
||||
## if sum < total:
|
||||
## list.append((total - sum, os.curdir))
|
||||
list.sort()
|
||||
list.reverse()
|
||||
width = len(repr(list[0][0]))
|
||||
for tsub, key in list:
|
||||
if tsub is None:
|
||||
psub = prefix
|
||||
else:
|
||||
print prefix + repr(tsub).rjust(width) + ' ' + key
|
||||
psub = prefix + ' '*(width-1) + '|' + ' '*(len(key)+1)
|
||||
if d.has_key(key):
|
||||
show(tsub, d[key][1], psub)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,56 @@
|
||||
#! /usr/bin/env python
|
||||
"""Create a TAGS file for Python programs, usable with GNU Emacs.
|
||||
|
||||
usage: eptags pyfiles...
|
||||
|
||||
The output TAGS file is usable with Emacs version 18, 19, 20.
|
||||
Tagged are:
|
||||
- functions (even inside other defs or classes)
|
||||
- classes
|
||||
|
||||
eptags warns about files it cannot open.
|
||||
eptags will not give warnings about duplicate tags.
|
||||
|
||||
BUGS:
|
||||
Because of tag duplication (methods with the same name in different
|
||||
classes), TAGS files are not very useful for most object-oriented
|
||||
python projects.
|
||||
"""
|
||||
import sys,re
|
||||
|
||||
expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z_][a-zA-Z0-9_]*)[ \t]*[:\(]'
|
||||
matcher = re.compile(expr)
|
||||
|
||||
def treat_file(filename, outfp):
|
||||
"""Append tags found in file named 'filename' to the open file 'outfp'"""
|
||||
try:
|
||||
fp = open(filename, 'r')
|
||||
except:
|
||||
sys.stderr.write('Cannot open %s\n'%filename)
|
||||
return
|
||||
charno = 0
|
||||
lineno = 0
|
||||
tags = []
|
||||
size = 0
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
lineno = lineno + 1
|
||||
m = matcher.search(line)
|
||||
if m:
|
||||
tag = m.group(0) + '\177%d,%d\n' % (lineno, charno)
|
||||
tags.append(tag)
|
||||
size = size + len(tag)
|
||||
charno = charno + len(line)
|
||||
outfp.write('\f\n%s,%d\n' % (filename,size))
|
||||
for tag in tags:
|
||||
outfp.write(tag)
|
||||
|
||||
def main():
|
||||
outfp = open('TAGS', 'w')
|
||||
for filename in sys.argv[1:]:
|
||||
treat_file(filename, outfp)
|
||||
|
||||
if __name__=="__main__":
|
||||
main()
|
@ -0,0 +1,117 @@
|
||||
#! /usr/bin/env python
|
||||
"""Find the maximum recursion limit that prevents interpreter termination.
|
||||
|
||||
This script finds the maximum safe recursion limit on a particular
|
||||
platform. If you need to change the recursion limit on your system,
|
||||
this script will tell you a safe upper bound. To use the new limit,
|
||||
call sys.setrecursionlimit().
|
||||
|
||||
This module implements several ways to create infinite recursion in
|
||||
Python. Different implementations end up pushing different numbers of
|
||||
C stack frames, depending on how many calls through Python's abstract
|
||||
C API occur.
|
||||
|
||||
After each round of tests, it prints a message:
|
||||
"Limit of NNNN is fine".
|
||||
|
||||
The highest printed value of "NNNN" is therefore the highest potentially
|
||||
safe limit for your system (which depends on the OS, architecture, but also
|
||||
the compilation flags). Please note that it is practically impossible to
|
||||
test all possible recursion paths in the interpreter, so the results of
|
||||
this test should not be trusted blindly -- although they give a good hint
|
||||
of which values are reasonable.
|
||||
|
||||
NOTE: When the C stack space allocated by your system is exceeded due
|
||||
to excessive recursion, exact behaviour depends on the platform, although
|
||||
the interpreter will always fail in a likely brutal way: either a
|
||||
segmentation fault, a MemoryError, or just a silent abort.
|
||||
|
||||
NB: A program that does not use __methods__ can set a higher limit.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import itertools
|
||||
|
||||
class RecursiveBlowup1:
|
||||
def __init__(self):
|
||||
self.__init__()
|
||||
|
||||
def test_init():
|
||||
return RecursiveBlowup1()
|
||||
|
||||
class RecursiveBlowup2:
|
||||
def __repr__(self):
|
||||
return repr(self)
|
||||
|
||||
def test_repr():
|
||||
return repr(RecursiveBlowup2())
|
||||
|
||||
class RecursiveBlowup4:
|
||||
def __add__(self, x):
|
||||
return x + self
|
||||
|
||||
def test_add():
|
||||
return RecursiveBlowup4() + RecursiveBlowup4()
|
||||
|
||||
class RecursiveBlowup5:
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self, attr)
|
||||
|
||||
def test_getattr():
|
||||
return RecursiveBlowup5().attr
|
||||
|
||||
class RecursiveBlowup6:
|
||||
def __getitem__(self, item):
|
||||
return self[item - 2] + self[item - 1]
|
||||
|
||||
def test_getitem():
|
||||
return RecursiveBlowup6()[5]
|
||||
|
||||
def test_recurse():
|
||||
return test_recurse()
|
||||
|
||||
def test_cpickle(_cache={}):
|
||||
try:
|
||||
import cPickle
|
||||
except ImportError:
|
||||
print "cannot import cPickle, skipped!"
|
||||
return
|
||||
l = None
|
||||
for n in itertools.count():
|
||||
try:
|
||||
l = _cache[n]
|
||||
continue # Already tried and it works, let's save some time
|
||||
except KeyError:
|
||||
for i in range(100):
|
||||
l = [l]
|
||||
cPickle.dumps(l, protocol=-1)
|
||||
_cache[n] = l
|
||||
|
||||
def check_limit(n, test_func_name):
|
||||
sys.setrecursionlimit(n)
|
||||
if test_func_name.startswith("test_"):
|
||||
print test_func_name[5:]
|
||||
else:
|
||||
print test_func_name
|
||||
test_func = globals()[test_func_name]
|
||||
try:
|
||||
test_func()
|
||||
# AttributeError can be raised because of the way e.g. PyDict_GetItem()
|
||||
# silences all exceptions and returns NULL, which is usually interpreted
|
||||
# as "missing attribute".
|
||||
except (RuntimeError, AttributeError):
|
||||
pass
|
||||
else:
|
||||
print "Yikes!"
|
||||
|
||||
limit = 1000
|
||||
while 1:
|
||||
check_limit(limit, "test_recurse")
|
||||
check_limit(limit, "test_add")
|
||||
check_limit(limit, "test_repr")
|
||||
check_limit(limit, "test_init")
|
||||
check_limit(limit, "test_getattr")
|
||||
check_limit(limit, "test_getitem")
|
||||
check_limit(limit, "test_cpickle")
|
||||
print "Limit of %d is fine" % limit
|
||||
limit = limit + 100
|
@ -0,0 +1,89 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""finddiv - a grep-like tool that looks for division operators.
|
||||
|
||||
Usage: finddiv [-l] file_or_directory ...
|
||||
|
||||
For directory arguments, all files in the directory whose name ends in
|
||||
.py are processed, and subdirectories are processed recursively.
|
||||
|
||||
This actually tokenizes the files to avoid false hits in comments or
|
||||
strings literals.
|
||||
|
||||
By default, this prints all lines containing a / or /= operator, in
|
||||
grep -n style. With the -l option specified, it prints the filename
|
||||
of files that contain at least one / or /= operator.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
import tokenize
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "lh")
|
||||
except getopt.error, msg:
|
||||
usage(msg)
|
||||
return 2
|
||||
if not args:
|
||||
usage("at least one file argument is required")
|
||||
return 2
|
||||
listnames = 0
|
||||
for o, a in opts:
|
||||
if o == "-h":
|
||||
print __doc__
|
||||
return
|
||||
if o == "-l":
|
||||
listnames = 1
|
||||
exit = None
|
||||
for filename in args:
|
||||
x = process(filename, listnames)
|
||||
exit = exit or x
|
||||
return exit
|
||||
|
||||
def usage(msg):
|
||||
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
|
||||
sys.stderr.write("Usage: %s [-l] file ...\n" % sys.argv[0])
|
||||
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
|
||||
|
||||
def process(filename, listnames):
|
||||
if os.path.isdir(filename):
|
||||
return processdir(filename, listnames)
|
||||
try:
|
||||
fp = open(filename)
|
||||
except IOError, msg:
|
||||
sys.stderr.write("Can't open: %s\n" % msg)
|
||||
return 1
|
||||
g = tokenize.generate_tokens(fp.readline)
|
||||
lastrow = None
|
||||
for type, token, (row, col), end, line in g:
|
||||
if token in ("/", "/="):
|
||||
if listnames:
|
||||
print filename
|
||||
break
|
||||
if row != lastrow:
|
||||
lastrow = row
|
||||
print "%s:%d:%s" % (filename, row, line),
|
||||
fp.close()
|
||||
|
||||
def processdir(dir, listnames):
|
||||
try:
|
||||
names = os.listdir(dir)
|
||||
except os.error, msg:
|
||||
sys.stderr.write("Can't list directory: %s\n" % dir)
|
||||
return 1
|
||||
files = []
|
||||
for name in names:
|
||||
fn = os.path.join(dir, name)
|
||||
if os.path.normcase(fn).endswith(".py") or os.path.isdir(fn):
|
||||
files.append(fn)
|
||||
files.sort(lambda a, b: cmp(os.path.normcase(a), os.path.normcase(b)))
|
||||
exit = None
|
||||
for fn in files:
|
||||
x = process(fn, listnames)
|
||||
exit = exit or x
|
||||
return exit
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -0,0 +1,43 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# findlinksto
|
||||
#
|
||||
# find symbolic links to a path matching a regular expression
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import getopt
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], '')
|
||||
if len(args) < 2:
|
||||
raise getopt.GetoptError('not enough arguments', None)
|
||||
except getopt.GetoptError, msg:
|
||||
sys.stdout = sys.stderr
|
||||
print msg
|
||||
print 'usage: findlinksto pattern directory ...'
|
||||
sys.exit(2)
|
||||
pat, dirs = args[0], args[1:]
|
||||
prog = re.compile(pat)
|
||||
for dirname in dirs:
|
||||
os.path.walk(dirname, visit, prog)
|
||||
|
||||
def visit(prog, dirname, names):
|
||||
if os.path.islink(dirname):
|
||||
names[:] = []
|
||||
return
|
||||
if os.path.ismount(dirname):
|
||||
print 'descend into', dirname
|
||||
for name in names:
|
||||
name = os.path.join(dirname, name)
|
||||
try:
|
||||
linkto = os.readlink(name)
|
||||
if prog.search(linkto) is not None:
|
||||
print name, '->', linkto
|
||||
except os.error:
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""List all those Python files that require a coding directive
|
||||
|
||||
Usage: nocoding.py dir1 [dir2...]
|
||||
"""
|
||||
|
||||
__author__ = "Oleg Broytmann, Georg Brandl"
|
||||
|
||||
import sys, os, re, getopt
|
||||
|
||||
# our pysource module finds Python source files
|
||||
try:
|
||||
import pysource
|
||||
except ImportError:
|
||||
# emulate the module with a simple os.walk
|
||||
class pysource:
|
||||
has_python_ext = looks_like_python = can_be_compiled = None
|
||||
def walk_python_files(self, paths, *args, **kwargs):
|
||||
for path in paths:
|
||||
if os.path.isfile(path):
|
||||
yield path.endswith(".py")
|
||||
elif os.path.isdir(path):
|
||||
for root, dirs, files in os.walk(path):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
yield os.path.join(root, filename)
|
||||
pysource = pysource()
|
||||
|
||||
|
||||
print >>sys.stderr, ("The pysource module is not available; "
|
||||
"no sophisticated Python source file search will be done.")
|
||||
|
||||
|
||||
decl_re = re.compile(r"coding[=:]\s*([-\w.]+)")
|
||||
|
||||
def get_declaration(line):
|
||||
match = decl_re.search(line)
|
||||
if match:
|
||||
return match.group(1)
|
||||
return ''
|
||||
|
||||
def has_correct_encoding(text, codec):
|
||||
try:
|
||||
unicode(text, codec)
|
||||
except UnicodeDecodeError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def needs_declaration(fullpath):
|
||||
try:
|
||||
infile = open(fullpath, 'rU')
|
||||
except IOError: # Oops, the file was removed - ignore it
|
||||
return None
|
||||
|
||||
line1 = infile.readline()
|
||||
line2 = infile.readline()
|
||||
|
||||
if get_declaration(line1) or get_declaration(line2):
|
||||
# the file does have an encoding declaration, so trust it
|
||||
infile.close()
|
||||
return False
|
||||
|
||||
# check the whole file for non-ASCII characters
|
||||
rest = infile.read()
|
||||
infile.close()
|
||||
|
||||
if has_correct_encoding(line1+line2+rest, "ascii"):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
usage = """Usage: %s [-cd] paths...
|
||||
-c: recognize Python source files trying to compile them
|
||||
-d: debug output""" % sys.argv[0]
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'cd')
|
||||
except getopt.error, msg:
|
||||
print >>sys.stderr, msg
|
||||
print >>sys.stderr, usage
|
||||
sys.exit(1)
|
||||
|
||||
is_python = pysource.looks_like_python
|
||||
debug = False
|
||||
|
||||
for o, a in opts:
|
||||
if o == '-c':
|
||||
is_python = pysource.can_be_compiled
|
||||
elif o == '-d':
|
||||
debug = True
|
||||
|
||||
if not args:
|
||||
print >>sys.stderr, usage
|
||||
sys.exit(1)
|
||||
|
||||
for fullpath in pysource.walk_python_files(args, is_python):
|
||||
if debug:
|
||||
print "Testing for coding: %s" % fullpath
|
||||
result = needs_declaration(fullpath)
|
||||
if result:
|
||||
print fullpath
|
314
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/fixcid.py
Normal file
314
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/fixcid.py
Normal file
@ -0,0 +1,314 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Perform massive identifier substitution on C source files.
|
||||
# This actually tokenizes the files (to some extent) so it can
|
||||
# avoid making substitutions inside strings or comments.
|
||||
# Inside strings, substitutions are never made; inside comments,
|
||||
# it is a user option (off by default).
|
||||
#
|
||||
# The substitutions are read from one or more files whose lines,
|
||||
# when not empty, after stripping comments starting with #,
|
||||
# must contain exactly two words separated by whitespace: the
|
||||
# old identifier and its replacement.
|
||||
#
|
||||
# The option -r reverses the sense of the substitutions (this may be
|
||||
# useful to undo a particular substitution).
|
||||
#
|
||||
# If the old identifier is prefixed with a '*' (with no intervening
|
||||
# whitespace), then it will not be substituted inside comments.
|
||||
#
|
||||
# Command line arguments are files or directories to be processed.
|
||||
# Directories are searched recursively for files whose name looks
|
||||
# like a C file (ends in .h or .c). The special filename '-' means
|
||||
# operate in filter mode: read stdin, write stdout.
|
||||
#
|
||||
# Symbolic links are always ignored (except as explicit directory
|
||||
# arguments).
|
||||
#
|
||||
# The original files are kept as back-up with a "~" suffix.
|
||||
#
|
||||
# Changes made are reported to stdout in a diff-like format.
|
||||
#
|
||||
# NB: by changing only the function fixline() you can turn this
|
||||
# into a program for different changes to C source files; by
|
||||
# changing the function wanted() you can make a different selection of
|
||||
# files.
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
from stat import *
|
||||
import getopt
|
||||
|
||||
err = sys.stderr.write
|
||||
dbg = err
|
||||
rep = sys.stdout.write
|
||||
|
||||
def usage():
|
||||
progname = sys.argv[0]
|
||||
err('Usage: ' + progname +
|
||||
' [-c] [-r] [-s file] ... file-or-directory ...\n')
|
||||
err('\n')
|
||||
err('-c : substitute inside comments\n')
|
||||
err('-r : reverse direction for following -s options\n')
|
||||
err('-s substfile : add a file of substitutions\n')
|
||||
err('\n')
|
||||
err('Each non-empty non-comment line in a substitution file must\n')
|
||||
err('contain exactly two words: an identifier and its replacement.\n')
|
||||
err('Comments start with a # character and end at end of line.\n')
|
||||
err('If an identifier is preceded with a *, it is not substituted\n')
|
||||
err('inside a comment even when -c is specified.\n')
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'crs:')
|
||||
except getopt.error, msg:
|
||||
err('Options error: ' + str(msg) + '\n')
|
||||
usage()
|
||||
sys.exit(2)
|
||||
bad = 0
|
||||
if not args: # No arguments
|
||||
usage()
|
||||
sys.exit(2)
|
||||
for opt, arg in opts:
|
||||
if opt == '-c':
|
||||
setdocomments()
|
||||
if opt == '-r':
|
||||
setreverse()
|
||||
if opt == '-s':
|
||||
addsubst(arg)
|
||||
for arg in args:
|
||||
if os.path.isdir(arg):
|
||||
if recursedown(arg): bad = 1
|
||||
elif os.path.islink(arg):
|
||||
err(arg + ': will not process symbolic links\n')
|
||||
bad = 1
|
||||
else:
|
||||
if fix(arg): bad = 1
|
||||
sys.exit(bad)
|
||||
|
||||
# Change this regular expression to select a different set of files
|
||||
Wanted = '^[a-zA-Z0-9_]+\.[ch]$'
|
||||
def wanted(name):
|
||||
return re.match(Wanted, name) >= 0
|
||||
|
||||
def recursedown(dirname):
|
||||
dbg('recursedown(%r)\n' % (dirname,))
|
||||
bad = 0
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except os.error, msg:
|
||||
err(dirname + ': cannot list directory: ' + str(msg) + '\n')
|
||||
return 1
|
||||
names.sort()
|
||||
subdirs = []
|
||||
for name in names:
|
||||
if name in (os.curdir, os.pardir): continue
|
||||
fullname = os.path.join(dirname, name)
|
||||
if os.path.islink(fullname): pass
|
||||
elif os.path.isdir(fullname):
|
||||
subdirs.append(fullname)
|
||||
elif wanted(name):
|
||||
if fix(fullname): bad = 1
|
||||
for fullname in subdirs:
|
||||
if recursedown(fullname): bad = 1
|
||||
return bad
|
||||
|
||||
def fix(filename):
|
||||
## dbg('fix(%r)\n' % (filename,))
|
||||
if filename == '-':
|
||||
# Filter mode
|
||||
f = sys.stdin
|
||||
g = sys.stdout
|
||||
else:
|
||||
# File replacement mode
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, msg:
|
||||
err(filename + ': cannot open: ' + str(msg) + '\n')
|
||||
return 1
|
||||
head, tail = os.path.split(filename)
|
||||
tempname = os.path.join(head, '@' + tail)
|
||||
g = None
|
||||
# If we find a match, we rewind the file and start over but
|
||||
# now copy everything to a temp file.
|
||||
lineno = 0
|
||||
initfixline()
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = f.readline()
|
||||
if not nextline: break
|
||||
line = line + nextline
|
||||
lineno = lineno + 1
|
||||
newline = fixline(line)
|
||||
if newline != line:
|
||||
if g is None:
|
||||
try:
|
||||
g = open(tempname, 'w')
|
||||
except IOError, msg:
|
||||
f.close()
|
||||
err(tempname+': cannot create: '+
|
||||
str(msg)+'\n')
|
||||
return 1
|
||||
f.seek(0)
|
||||
lineno = 0
|
||||
initfixline()
|
||||
rep(filename + ':\n')
|
||||
continue # restart from the beginning
|
||||
rep(repr(lineno) + '\n')
|
||||
rep('< ' + line)
|
||||
rep('> ' + newline)
|
||||
if g is not None:
|
||||
g.write(newline)
|
||||
|
||||
# End of file
|
||||
if filename == '-': return 0 # Done in filter mode
|
||||
f.close()
|
||||
if not g: return 0 # No changes
|
||||
|
||||
# Finishing touch -- move files
|
||||
|
||||
# First copy the file's mode to the temp file
|
||||
try:
|
||||
statbuf = os.stat(filename)
|
||||
os.chmod(tempname, statbuf[ST_MODE] & 07777)
|
||||
except os.error, msg:
|
||||
err(tempname + ': warning: chmod failed (' + str(msg) + ')\n')
|
||||
# Then make a backup of the original file as filename~
|
||||
try:
|
||||
os.rename(filename, filename + '~')
|
||||
except os.error, msg:
|
||||
err(filename + ': warning: backup failed (' + str(msg) + ')\n')
|
||||
# Now move the temp file to the original file
|
||||
try:
|
||||
os.rename(tempname, filename)
|
||||
except os.error, msg:
|
||||
err(filename + ': rename failed (' + str(msg) + ')\n')
|
||||
return 1
|
||||
# Return success
|
||||
return 0
|
||||
|
||||
# Tokenizing ANSI C (partly)
|
||||
|
||||
Identifier = '\(struct \)?[a-zA-Z_][a-zA-Z0-9_]+'
|
||||
String = '"\([^\n\\"]\|\\\\.\)*"'
|
||||
Char = '\'\([^\n\\\']\|\\\\.\)*\''
|
||||
CommentStart = '/\*'
|
||||
CommentEnd = '\*/'
|
||||
|
||||
Hexnumber = '0[xX][0-9a-fA-F]*[uUlL]*'
|
||||
Octnumber = '0[0-7]*[uUlL]*'
|
||||
Decnumber = '[1-9][0-9]*[uUlL]*'
|
||||
Intnumber = Hexnumber + '\|' + Octnumber + '\|' + Decnumber
|
||||
Exponent = '[eE][-+]?[0-9]+'
|
||||
Pointfloat = '\([0-9]+\.[0-9]*\|\.[0-9]+\)\(' + Exponent + '\)?'
|
||||
Expfloat = '[0-9]+' + Exponent
|
||||
Floatnumber = Pointfloat + '\|' + Expfloat
|
||||
Number = Floatnumber + '\|' + Intnumber
|
||||
|
||||
# Anything else is an operator -- don't list this explicitly because of '/*'
|
||||
|
||||
OutsideComment = (Identifier, Number, String, Char, CommentStart)
|
||||
OutsideCommentPattern = '(' + '|'.join(OutsideComment) + ')'
|
||||
OutsideCommentProgram = re.compile(OutsideCommentPattern)
|
||||
|
||||
InsideComment = (Identifier, Number, CommentEnd)
|
||||
InsideCommentPattern = '(' + '|'.join(InsideComment) + ')'
|
||||
InsideCommentProgram = re.compile(InsideCommentPattern)
|
||||
|
||||
def initfixline():
|
||||
global Program
|
||||
Program = OutsideCommentProgram
|
||||
|
||||
def fixline(line):
|
||||
global Program
|
||||
## print '-->', repr(line)
|
||||
i = 0
|
||||
while i < len(line):
|
||||
i = Program.search(line, i)
|
||||
if i < 0: break
|
||||
found = Program.group(0)
|
||||
## if Program is InsideCommentProgram: print '...',
|
||||
## else: print ' ',
|
||||
## print found
|
||||
if len(found) == 2:
|
||||
if found == '/*':
|
||||
Program = InsideCommentProgram
|
||||
elif found == '*/':
|
||||
Program = OutsideCommentProgram
|
||||
n = len(found)
|
||||
if Dict.has_key(found):
|
||||
subst = Dict[found]
|
||||
if Program is InsideCommentProgram:
|
||||
if not Docomments:
|
||||
print 'Found in comment:', found
|
||||
i = i + n
|
||||
continue
|
||||
if NotInComment.has_key(found):
|
||||
## print 'Ignored in comment:',
|
||||
## print found, '-->', subst
|
||||
## print 'Line:', line,
|
||||
subst = found
|
||||
## else:
|
||||
## print 'Substituting in comment:',
|
||||
## print found, '-->', subst
|
||||
## print 'Line:', line,
|
||||
line = line[:i] + subst + line[i+n:]
|
||||
n = len(subst)
|
||||
i = i + n
|
||||
return line
|
||||
|
||||
Docomments = 0
|
||||
def setdocomments():
|
||||
global Docomments
|
||||
Docomments = 1
|
||||
|
||||
Reverse = 0
|
||||
def setreverse():
|
||||
global Reverse
|
||||
Reverse = (not Reverse)
|
||||
|
||||
Dict = {}
|
||||
NotInComment = {}
|
||||
def addsubst(substfile):
|
||||
try:
|
||||
fp = open(substfile, 'r')
|
||||
except IOError, msg:
|
||||
err(substfile + ': cannot read substfile: ' + str(msg) + '\n')
|
||||
sys.exit(1)
|
||||
lineno = 0
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
try:
|
||||
i = line.index('#')
|
||||
except ValueError:
|
||||
i = -1 # Happens to delete trailing \n
|
||||
words = line[:i].split()
|
||||
if not words: continue
|
||||
if len(words) == 3 and words[0] == 'struct':
|
||||
words[:2] = [words[0] + ' ' + words[1]]
|
||||
elif len(words) <> 2:
|
||||
err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line))
|
||||
continue
|
||||
if Reverse:
|
||||
[value, key] = words
|
||||
else:
|
||||
[key, value] = words
|
||||
if value[0] == '*':
|
||||
value = value[1:]
|
||||
if key[0] == '*':
|
||||
key = key[1:]
|
||||
NotInComment[key] = value
|
||||
if Dict.has_key(key):
|
||||
err('%s:%r: warning: overriding: %r %r\n' % (substfile, lineno, key, value))
|
||||
err('%s:%r: warning: previous: %r\n' % (substfile, lineno, Dict[key]))
|
||||
Dict[key] = value
|
||||
fp.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
380
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/fixdiv.py
Normal file
380
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/fixdiv.py
Normal file
@ -0,0 +1,380 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""fixdiv - tool to fix division operators.
|
||||
|
||||
To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'.
|
||||
This runs the script `yourscript.py' while writing warning messages
|
||||
about all uses of the classic division operator to the file
|
||||
`warnings'. The warnings look like this:
|
||||
|
||||
<file>:<line>: DeprecationWarning: classic <type> division
|
||||
|
||||
The warnings are written to stderr, so you must use `2>' for the I/O
|
||||
redirect. I know of no way to redirect stderr on Windows in a DOS
|
||||
box, so you will have to modify the script to set sys.stderr to some
|
||||
kind of log file if you want to do this on Windows.
|
||||
|
||||
The warnings are not limited to the script; modules imported by the
|
||||
script may also trigger warnings. In fact a useful technique is to
|
||||
write a test script specifically intended to exercise all code in a
|
||||
particular module or set of modules.
|
||||
|
||||
Then run `python fixdiv.py warnings'. This first reads the warnings,
|
||||
looking for classic division warnings, and sorts them by file name and
|
||||
line number. Then, for each file that received at least one warning,
|
||||
it parses the file and tries to match the warnings up to the division
|
||||
operators found in the source code. If it is successful, it writes
|
||||
its findings to stdout, preceded by a line of dashes and a line of the
|
||||
form:
|
||||
|
||||
Index: <file>
|
||||
|
||||
If the only findings found are suggestions to change a / operator into
|
||||
a // operator, the output is acceptable input for the Unix 'patch'
|
||||
program.
|
||||
|
||||
Here are the possible messages on stdout (N stands for a line number):
|
||||
|
||||
- A plain-diff-style change ('NcN', a line marked by '<', a line
|
||||
containing '---', and a line marked by '>'):
|
||||
|
||||
A / operator was found that should be changed to //. This is the
|
||||
recommendation when only int and/or long arguments were seen.
|
||||
|
||||
- 'True division / operator at line N' and a line marked by '=':
|
||||
|
||||
A / operator was found that can remain unchanged. This is the
|
||||
recommendation when only float and/or complex arguments were seen.
|
||||
|
||||
- 'Ambiguous / operator (..., ...) at line N', line marked by '?':
|
||||
|
||||
A / operator was found for which int or long as well as float or
|
||||
complex arguments were seen. This is highly unlikely; if it occurs,
|
||||
you may have to restructure the code to keep the classic semantics,
|
||||
or maybe you don't care about the classic semantics.
|
||||
|
||||
- 'No conclusive evidence on line N', line marked by '*':
|
||||
|
||||
A / operator was found for which no warnings were seen. This could
|
||||
be code that was never executed, or code that was only executed
|
||||
with user-defined objects as arguments. You will have to
|
||||
investigate further. Note that // can be overloaded separately from
|
||||
/, using __floordiv__. True division can also be separately
|
||||
overloaded, using __truediv__. Classic division should be the same
|
||||
as either of those. (XXX should I add a warning for division on
|
||||
user-defined objects, to disambiguate this case from code that was
|
||||
never executed?)
|
||||
|
||||
- 'Phantom ... warnings for line N', line marked by '*':
|
||||
|
||||
A warning was seen for a line not containing a / operator. The most
|
||||
likely cause is a warning about code executed by 'exec' or eval()
|
||||
(see note below), or an indirect invocation of the / operator, for
|
||||
example via the div() function in the operator module. It could
|
||||
also be caused by a change to the file between the time the test
|
||||
script was run to collect warnings and the time fixdiv was run.
|
||||
|
||||
- 'More than one / operator in line N'; or
|
||||
'More than one / operator per statement in lines N-N':
|
||||
|
||||
The scanner found more than one / operator on a single line, or in a
|
||||
statement split across multiple lines. Because the warnings
|
||||
framework doesn't (and can't) show the offset within the line, and
|
||||
the code generator doesn't always give the correct line number for
|
||||
operations in a multi-line statement, we can't be sure whether all
|
||||
operators in the statement were executed. To be on the safe side,
|
||||
by default a warning is issued about this case. In practice, these
|
||||
cases are usually safe, and the -m option suppresses these warning.
|
||||
|
||||
- 'Can't find the / operator in line N', line marked by '*':
|
||||
|
||||
This really shouldn't happen. It means that the tokenize module
|
||||
reported a '/' operator but the line it returns didn't contain a '/'
|
||||
character at the indicated position.
|
||||
|
||||
- 'Bad warning for line N: XYZ', line marked by '*':
|
||||
|
||||
This really shouldn't happen. It means that a 'classic XYZ
|
||||
division' warning was read with XYZ being something other than
|
||||
'int', 'long', 'float', or 'complex'.
|
||||
|
||||
Notes:
|
||||
|
||||
- The augmented assignment operator /= is handled the same way as the
|
||||
/ operator.
|
||||
|
||||
- This tool never looks at the // operator; no warnings are ever
|
||||
generated for use of this operator.
|
||||
|
||||
- This tool never looks at the / operator when a future division
|
||||
statement is in effect; no warnings are generated in this case, and
|
||||
because the tool only looks at files for which at least one classic
|
||||
division warning was seen, it will never look at files containing a
|
||||
future division statement.
|
||||
|
||||
- Warnings may be issued for code not read from a file, but executed
|
||||
using an exec statement or the eval() function. These may have
|
||||
<string> in the filename position, in which case the fixdiv script
|
||||
will attempt and fail to open a file named '<string>' and issue a
|
||||
warning about this failure; or these may be reported as 'Phantom'
|
||||
warnings (see above). You're on your own to deal with these. You
|
||||
could make all recommended changes and add a future division
|
||||
statement to all affected files, and then re-run the test script; it
|
||||
should not issue any warnings. If there are any, and you have a
|
||||
hard time tracking down where they are generated, you can use the
|
||||
-Werror option to force an error instead of a first warning,
|
||||
generating a traceback.
|
||||
|
||||
- The tool should be run from the same directory as that from which
|
||||
the original script was run, otherwise it won't be able to open
|
||||
files given by relative pathnames.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import getopt
|
||||
import re
|
||||
import tokenize
|
||||
|
||||
multi_ok = 0
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "hm")
|
||||
except getopt.error, msg:
|
||||
usage(msg)
|
||||
return 2
|
||||
for o, a in opts:
|
||||
if o == "-h":
|
||||
print __doc__
|
||||
return
|
||||
if o == "-m":
|
||||
global multi_ok
|
||||
multi_ok = 1
|
||||
if not args:
|
||||
usage("at least one file argument is required")
|
||||
return 2
|
||||
if args[1:]:
|
||||
sys.stderr.write("%s: extra file arguments ignored\n", sys.argv[0])
|
||||
warnings = readwarnings(args[0])
|
||||
if warnings is None:
|
||||
return 1
|
||||
files = warnings.keys()
|
||||
if not files:
|
||||
print "No classic division warnings read from", args[0]
|
||||
return
|
||||
files.sort()
|
||||
exit = None
|
||||
for filename in files:
|
||||
x = process(filename, warnings[filename])
|
||||
exit = exit or x
|
||||
return exit
|
||||
|
||||
def usage(msg):
|
||||
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
|
||||
sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
|
||||
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
|
||||
|
||||
PATTERN = ("^(.+?):(\d+): DeprecationWarning: "
|
||||
"classic (int|long|float|complex) division$")
|
||||
|
||||
def readwarnings(warningsfile):
|
||||
prog = re.compile(PATTERN)
|
||||
try:
|
||||
f = open(warningsfile)
|
||||
except IOError, msg:
|
||||
sys.stderr.write("can't open: %s\n" % msg)
|
||||
return
|
||||
warnings = {}
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if not line:
|
||||
break
|
||||
m = prog.match(line)
|
||||
if not m:
|
||||
if line.find("division") >= 0:
|
||||
sys.stderr.write("Warning: ignored input " + line)
|
||||
continue
|
||||
filename, lineno, what = m.groups()
|
||||
list = warnings.get(filename)
|
||||
if list is None:
|
||||
warnings[filename] = list = []
|
||||
list.append((int(lineno), intern(what)))
|
||||
f.close()
|
||||
return warnings
|
||||
|
||||
def process(filename, list):
|
||||
print "-"*70
|
||||
assert list # if this fails, readwarnings() is broken
|
||||
try:
|
||||
fp = open(filename)
|
||||
except IOError, msg:
|
||||
sys.stderr.write("can't open: %s\n" % msg)
|
||||
return 1
|
||||
print "Index:", filename
|
||||
f = FileContext(fp)
|
||||
list.sort()
|
||||
index = 0 # list[:index] has been processed, list[index:] is still to do
|
||||
g = tokenize.generate_tokens(f.readline)
|
||||
while 1:
|
||||
startlineno, endlineno, slashes = lineinfo = scanline(g)
|
||||
if startlineno is None:
|
||||
break
|
||||
assert startlineno <= endlineno is not None
|
||||
orphans = []
|
||||
while index < len(list) and list[index][0] < startlineno:
|
||||
orphans.append(list[index])
|
||||
index += 1
|
||||
if orphans:
|
||||
reportphantomwarnings(orphans, f)
|
||||
warnings = []
|
||||
while index < len(list) and list[index][0] <= endlineno:
|
||||
warnings.append(list[index])
|
||||
index += 1
|
||||
if not slashes and not warnings:
|
||||
pass
|
||||
elif slashes and not warnings:
|
||||
report(slashes, "No conclusive evidence")
|
||||
elif warnings and not slashes:
|
||||
reportphantomwarnings(warnings, f)
|
||||
else:
|
||||
if len(slashes) > 1:
|
||||
if not multi_ok:
|
||||
rows = []
|
||||
lastrow = None
|
||||
for (row, col), line in slashes:
|
||||
if row == lastrow:
|
||||
continue
|
||||
rows.append(row)
|
||||
lastrow = row
|
||||
assert rows
|
||||
if len(rows) == 1:
|
||||
print "*** More than one / operator in line", rows[0]
|
||||
else:
|
||||
print "*** More than one / operator per statement",
|
||||
print "in lines %d-%d" % (rows[0], rows[-1])
|
||||
intlong = []
|
||||
floatcomplex = []
|
||||
bad = []
|
||||
for lineno, what in warnings:
|
||||
if what in ("int", "long"):
|
||||
intlong.append(what)
|
||||
elif what in ("float", "complex"):
|
||||
floatcomplex.append(what)
|
||||
else:
|
||||
bad.append(what)
|
||||
lastrow = None
|
||||
for (row, col), line in slashes:
|
||||
if row == lastrow:
|
||||
continue
|
||||
lastrow = row
|
||||
line = chop(line)
|
||||
if line[col:col+1] != "/":
|
||||
print "*** Can't find the / operator in line %d:" % row
|
||||
print "*", line
|
||||
continue
|
||||
if bad:
|
||||
print "*** Bad warning for line %d:" % row, bad
|
||||
print "*", line
|
||||
elif intlong and not floatcomplex:
|
||||
print "%dc%d" % (row, row)
|
||||
print "<", line
|
||||
print "---"
|
||||
print ">", line[:col] + "/" + line[col:]
|
||||
elif floatcomplex and not intlong:
|
||||
print "True division / operator at line %d:" % row
|
||||
print "=", line
|
||||
elif intlong and floatcomplex:
|
||||
print "*** Ambiguous / operator (%s, %s) at line %d:" % (
|
||||
"|".join(intlong), "|".join(floatcomplex), row)
|
||||
print "?", line
|
||||
fp.close()
|
||||
|
||||
def reportphantomwarnings(warnings, f):
|
||||
blocks = []
|
||||
lastrow = None
|
||||
lastblock = None
|
||||
for row, what in warnings:
|
||||
if row != lastrow:
|
||||
lastblock = [row]
|
||||
blocks.append(lastblock)
|
||||
lastblock.append(what)
|
||||
for block in blocks:
|
||||
row = block[0]
|
||||
whats = "/".join(block[1:])
|
||||
print "*** Phantom %s warnings for line %d:" % (whats, row)
|
||||
f.report(row, mark="*")
|
||||
|
||||
def report(slashes, message):
|
||||
lastrow = None
|
||||
for (row, col), line in slashes:
|
||||
if row != lastrow:
|
||||
print "*** %s on line %d:" % (message, row)
|
||||
print "*", chop(line)
|
||||
lastrow = row
|
||||
|
||||
class FileContext:
|
||||
def __init__(self, fp, window=5, lineno=1):
|
||||
self.fp = fp
|
||||
self.window = 5
|
||||
self.lineno = 1
|
||||
self.eoflookahead = 0
|
||||
self.lookahead = []
|
||||
self.buffer = []
|
||||
def fill(self):
|
||||
while len(self.lookahead) < self.window and not self.eoflookahead:
|
||||
line = self.fp.readline()
|
||||
if not line:
|
||||
self.eoflookahead = 1
|
||||
break
|
||||
self.lookahead.append(line)
|
||||
def readline(self):
|
||||
self.fill()
|
||||
if not self.lookahead:
|
||||
return ""
|
||||
line = self.lookahead.pop(0)
|
||||
self.buffer.append(line)
|
||||
self.lineno += 1
|
||||
return line
|
||||
def truncate(self):
|
||||
del self.buffer[-window:]
|
||||
def __getitem__(self, index):
|
||||
self.fill()
|
||||
bufstart = self.lineno - len(self.buffer)
|
||||
lookend = self.lineno + len(self.lookahead)
|
||||
if bufstart <= index < self.lineno:
|
||||
return self.buffer[index - bufstart]
|
||||
if self.lineno <= index < lookend:
|
||||
return self.lookahead[index - self.lineno]
|
||||
raise KeyError
|
||||
def report(self, first, last=None, mark="*"):
|
||||
if last is None:
|
||||
last = first
|
||||
for i in range(first, last+1):
|
||||
try:
|
||||
line = self[first]
|
||||
except KeyError:
|
||||
line = "<missing line>"
|
||||
print mark, chop(line)
|
||||
|
||||
def scanline(g):
|
||||
slashes = []
|
||||
startlineno = None
|
||||
endlineno = None
|
||||
for type, token, start, end, line in g:
|
||||
endlineno = end[0]
|
||||
if startlineno is None:
|
||||
startlineno = endlineno
|
||||
if token in ("/", "/="):
|
||||
slashes.append((start, line))
|
||||
if type == tokenize.NEWLINE:
|
||||
break
|
||||
return startlineno, endlineno, slashes
|
||||
|
||||
def chop(line):
|
||||
if line.endswith("\n"):
|
||||
return line[:-1]
|
||||
else:
|
||||
return line
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -0,0 +1,49 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Add some standard cpp magic to a header file
|
||||
|
||||
import sys
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
for filename in args:
|
||||
process(filename)
|
||||
|
||||
def process(filename):
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, msg:
|
||||
sys.stderr.write('%s: can\'t open: %s\n' % (filename, str(msg)))
|
||||
return
|
||||
data = f.read()
|
||||
f.close()
|
||||
if data[:2] <> '/*':
|
||||
sys.stderr.write('%s does not begin with C comment\n' % filename)
|
||||
return
|
||||
try:
|
||||
f = open(filename, 'w')
|
||||
except IOError, msg:
|
||||
sys.stderr.write('%s: can\'t write: %s\n' % (filename, str(msg)))
|
||||
return
|
||||
sys.stderr.write('Processing %s ...\n' % filename)
|
||||
magic = 'Py_'
|
||||
for c in filename:
|
||||
if ord(c)<=0x80 and c.isalnum():
|
||||
magic = magic + c.upper()
|
||||
else: magic = magic + '_'
|
||||
sys.stdout = f
|
||||
print '#ifndef', magic
|
||||
print '#define', magic
|
||||
print '#ifdef __cplusplus'
|
||||
print 'extern "C" {'
|
||||
print '#endif'
|
||||
print
|
||||
f.write(data)
|
||||
print
|
||||
print '#ifdef __cplusplus'
|
||||
print '}'
|
||||
print '#endif'
|
||||
print '#endif /*', '!'+magic, '*/'
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,113 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""(Ostensibly) fix copyright notices in files.
|
||||
|
||||
Actually, this sript will simply replace a block of text in a file from one
|
||||
string to another. It will only do this once though, i.e. not globally
|
||||
throughout the file. It writes a backup file and then does an os.rename()
|
||||
dance for atomicity.
|
||||
|
||||
Usage: fixnotices.py [options] [filenames]
|
||||
Options:
|
||||
-h / --help
|
||||
Print this message and exit
|
||||
|
||||
--oldnotice=file
|
||||
Use the notice in the file as the old (to be replaced) string, instead
|
||||
of the hard coded value in the script.
|
||||
|
||||
--newnotice=file
|
||||
Use the notice in the file as the new (replacement) string, instead of
|
||||
the hard coded value in the script.
|
||||
|
||||
--dry-run
|
||||
Don't actually make the changes, but print out the list of files that
|
||||
would change. When used with -v, a status will be printed for every
|
||||
file.
|
||||
|
||||
-v / --verbose
|
||||
Print a message for every file looked at, indicating whether the file
|
||||
is changed or not.
|
||||
"""
|
||||
|
||||
OLD_NOTICE = """/***********************************************************
|
||||
Copyright (c) 2000, BeOpen.com.
|
||||
Copyright (c) 1995-2000, Corporation for National Research Initiatives.
|
||||
Copyright (c) 1990-1995, Stichting Mathematisch Centrum.
|
||||
All rights reserved.
|
||||
|
||||
See the file "Misc/COPYRIGHT" for information on usage and
|
||||
redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
|
||||
******************************************************************/
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
|
||||
NEW_NOTICE = ""
|
||||
DRYRUN = 0
|
||||
VERBOSE = 0
|
||||
|
||||
|
||||
def usage(code, msg=''):
|
||||
print __doc__ % globals()
|
||||
if msg:
|
||||
print msg
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
def main():
|
||||
global DRYRUN, OLD_NOTICE, NEW_NOTICE, VERBOSE
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'hv',
|
||||
['help', 'oldnotice=', 'newnotice=',
|
||||
'dry-run', 'verbose'])
|
||||
except getopt.error, msg:
|
||||
usage(1, msg)
|
||||
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-v', '--verbose'):
|
||||
VERBOSE = 1
|
||||
elif opt == '--dry-run':
|
||||
DRYRUN = 1
|
||||
elif opt == '--oldnotice':
|
||||
fp = open(arg)
|
||||
OLD_NOTICE = fp.read()
|
||||
fp.close()
|
||||
elif opt == '--newnotice':
|
||||
fp = open(arg)
|
||||
NEW_NOTICE = fp.read()
|
||||
fp.close()
|
||||
|
||||
for arg in args:
|
||||
process(arg)
|
||||
|
||||
|
||||
def process(file):
|
||||
f = open(file)
|
||||
data = f.read()
|
||||
f.close()
|
||||
i = data.find(OLD_NOTICE)
|
||||
if i < 0:
|
||||
if VERBOSE:
|
||||
print 'no change:', file
|
||||
return
|
||||
elif DRYRUN or VERBOSE:
|
||||
print ' change:', file
|
||||
if DRYRUN:
|
||||
# Don't actually change the file
|
||||
return
|
||||
data = data[:i] + NEW_NOTICE + data[i+len(OLD_NOTICE):]
|
||||
new = file + ".new"
|
||||
backup = file + ".bak"
|
||||
f = open(new, "w")
|
||||
f.write(data)
|
||||
f.close()
|
||||
os.rename(file, backup)
|
||||
os.rename(new, file)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Fix Python script(s) to reference the interpreter via /usr/bin/env python.
|
||||
# Warning: this overwrites the file without making a backup.
|
||||
|
||||
import sys
|
||||
import re
|
||||
|
||||
|
||||
def main():
|
||||
for filename in sys.argv[1:]:
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, msg:
|
||||
print filename, ': can\'t open :', msg
|
||||
continue
|
||||
line = f.readline()
|
||||
if not re.match('^#! */usr/local/bin/python', line):
|
||||
print filename, ': not a /usr/local/bin/python script'
|
||||
f.close()
|
||||
continue
|
||||
rest = f.read()
|
||||
f.close()
|
||||
line = re.sub('/usr/local/bin/python',
|
||||
'/usr/bin/env python', line)
|
||||
print filename, ':', repr(line)
|
||||
f = open(filename, "w")
|
||||
f.write(line)
|
||||
f.write(rest)
|
||||
f.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,400 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""Mirror a remote ftp subtree into a local directory tree.
|
||||
|
||||
usage: ftpmirror [-v] [-q] [-i] [-m] [-n] [-r] [-s pat]
|
||||
[-l username [-p passwd [-a account]]]
|
||||
hostname[:port] [remotedir [localdir]]
|
||||
-v: verbose
|
||||
-q: quiet
|
||||
-i: interactive mode
|
||||
-m: macintosh server (NCSA telnet 2.4) (implies -n -s '*.o')
|
||||
-n: don't log in
|
||||
-r: remove local files/directories no longer pertinent
|
||||
-l username [-p passwd [-a account]]: login info (default .netrc or anonymous)
|
||||
-s pat: skip files matching pattern
|
||||
hostname: remote host w/ optional port separated by ':'
|
||||
remotedir: remote directory (default initial)
|
||||
localdir: local directory (default current)
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import getopt
|
||||
import ftplib
|
||||
import netrc
|
||||
from fnmatch import fnmatch
|
||||
|
||||
# Print usage message and exit
|
||||
def usage(*args):
|
||||
sys.stdout = sys.stderr
|
||||
for msg in args: print msg
|
||||
print __doc__
|
||||
sys.exit(2)
|
||||
|
||||
verbose = 1 # 0 for -q, 2 for -v
|
||||
interactive = 0
|
||||
mac = 0
|
||||
rmok = 0
|
||||
nologin = 0
|
||||
skippats = ['.', '..', '.mirrorinfo']
|
||||
|
||||
# Main program: parse command line and start processing
|
||||
def main():
|
||||
global verbose, interactive, mac, rmok, nologin
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'a:bil:mnp:qrs:v')
|
||||
except getopt.error, msg:
|
||||
usage(msg)
|
||||
login = ''
|
||||
passwd = ''
|
||||
account = ''
|
||||
if not args: usage('hostname missing')
|
||||
host = args[0]
|
||||
port = 0
|
||||
if ':' in host:
|
||||
host, port = host.split(':', 1)
|
||||
port = int(port)
|
||||
try:
|
||||
auth = netrc.netrc().authenticators(host)
|
||||
if auth is not None:
|
||||
login, account, passwd = auth
|
||||
except (netrc.NetrcParseError, IOError):
|
||||
pass
|
||||
for o, a in opts:
|
||||
if o == '-l': login = a
|
||||
if o == '-p': passwd = a
|
||||
if o == '-a': account = a
|
||||
if o == '-v': verbose = verbose + 1
|
||||
if o == '-q': verbose = 0
|
||||
if o == '-i': interactive = 1
|
||||
if o == '-m': mac = 1; nologin = 1; skippats.append('*.o')
|
||||
if o == '-n': nologin = 1
|
||||
if o == '-r': rmok = 1
|
||||
if o == '-s': skippats.append(a)
|
||||
remotedir = ''
|
||||
localdir = ''
|
||||
if args[1:]:
|
||||
remotedir = args[1]
|
||||
if args[2:]:
|
||||
localdir = args[2]
|
||||
if args[3:]: usage('too many arguments')
|
||||
#
|
||||
f = ftplib.FTP()
|
||||
if verbose: print "Connecting to '%s%s'..." % (host,
|
||||
(port and ":%d"%port or ""))
|
||||
f.connect(host,port)
|
||||
if not nologin:
|
||||
if verbose:
|
||||
print 'Logging in as %r...' % (login or 'anonymous')
|
||||
f.login(login, passwd, account)
|
||||
if verbose: print 'OK.'
|
||||
pwd = f.pwd()
|
||||
if verbose > 1: print 'PWD =', repr(pwd)
|
||||
if remotedir:
|
||||
if verbose > 1: print 'cwd(%s)' % repr(remotedir)
|
||||
f.cwd(remotedir)
|
||||
if verbose > 1: print 'OK.'
|
||||
pwd = f.pwd()
|
||||
if verbose > 1: print 'PWD =', repr(pwd)
|
||||
#
|
||||
mirrorsubdir(f, localdir)
|
||||
|
||||
# Core logic: mirror one subdirectory (recursively)
|
||||
def mirrorsubdir(f, localdir):
|
||||
pwd = f.pwd()
|
||||
if localdir and not os.path.isdir(localdir):
|
||||
if verbose: print 'Creating local directory', repr(localdir)
|
||||
try:
|
||||
makedir(localdir)
|
||||
except os.error, msg:
|
||||
print "Failed to establish local directory", repr(localdir)
|
||||
return
|
||||
infofilename = os.path.join(localdir, '.mirrorinfo')
|
||||
try:
|
||||
text = open(infofilename, 'r').read()
|
||||
except IOError, msg:
|
||||
text = '{}'
|
||||
try:
|
||||
info = eval(text)
|
||||
except (SyntaxError, NameError):
|
||||
print 'Bad mirror info in', repr(infofilename)
|
||||
info = {}
|
||||
subdirs = []
|
||||
listing = []
|
||||
if verbose: print 'Listing remote directory %r...' % (pwd,)
|
||||
f.retrlines('LIST', listing.append)
|
||||
filesfound = []
|
||||
for line in listing:
|
||||
if verbose > 1: print '-->', repr(line)
|
||||
if mac:
|
||||
# Mac listing has just filenames;
|
||||
# trailing / means subdirectory
|
||||
filename = line.strip()
|
||||
mode = '-'
|
||||
if filename[-1:] == '/':
|
||||
filename = filename[:-1]
|
||||
mode = 'd'
|
||||
infostuff = ''
|
||||
else:
|
||||
# Parse, assuming a UNIX listing
|
||||
words = line.split(None, 8)
|
||||
if len(words) < 6:
|
||||
if verbose > 1: print 'Skipping short line'
|
||||
continue
|
||||
filename = words[-1].lstrip()
|
||||
i = filename.find(" -> ")
|
||||
if i >= 0:
|
||||
# words[0] had better start with 'l'...
|
||||
if verbose > 1:
|
||||
print 'Found symbolic link %r' % (filename,)
|
||||
linkto = filename[i+4:]
|
||||
filename = filename[:i]
|
||||
infostuff = words[-5:-1]
|
||||
mode = words[0]
|
||||
skip = 0
|
||||
for pat in skippats:
|
||||
if fnmatch(filename, pat):
|
||||
if verbose > 1:
|
||||
print 'Skip pattern', repr(pat),
|
||||
print 'matches', repr(filename)
|
||||
skip = 1
|
||||
break
|
||||
if skip:
|
||||
continue
|
||||
if mode[0] == 'd':
|
||||
if verbose > 1:
|
||||
print 'Remembering subdirectory', repr(filename)
|
||||
subdirs.append(filename)
|
||||
continue
|
||||
filesfound.append(filename)
|
||||
if info.has_key(filename) and info[filename] == infostuff:
|
||||
if verbose > 1:
|
||||
print 'Already have this version of',repr(filename)
|
||||
continue
|
||||
fullname = os.path.join(localdir, filename)
|
||||
tempname = os.path.join(localdir, '@'+filename)
|
||||
if interactive:
|
||||
doit = askabout('file', filename, pwd)
|
||||
if not doit:
|
||||
if not info.has_key(filename):
|
||||
info[filename] = 'Not retrieved'
|
||||
continue
|
||||
try:
|
||||
os.unlink(tempname)
|
||||
except os.error:
|
||||
pass
|
||||
if mode[0] == 'l':
|
||||
if verbose:
|
||||
print "Creating symlink %r -> %r" % (filename, linkto)
|
||||
try:
|
||||
os.symlink(linkto, tempname)
|
||||
except IOError, msg:
|
||||
print "Can't create %r: %s" % (tempname, msg)
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
fp = open(tempname, 'wb')
|
||||
except IOError, msg:
|
||||
print "Can't create %r: %s" % (tempname, msg)
|
||||
continue
|
||||
if verbose:
|
||||
print 'Retrieving %r from %r as %r...' % (filename, pwd, fullname)
|
||||
if verbose:
|
||||
fp1 = LoggingFile(fp, 1024, sys.stdout)
|
||||
else:
|
||||
fp1 = fp
|
||||
t0 = time.time()
|
||||
try:
|
||||
f.retrbinary('RETR ' + filename,
|
||||
fp1.write, 8*1024)
|
||||
except ftplib.error_perm, msg:
|
||||
print msg
|
||||
t1 = time.time()
|
||||
bytes = fp.tell()
|
||||
fp.close()
|
||||
if fp1 != fp:
|
||||
fp1.close()
|
||||
try:
|
||||
os.unlink(fullname)
|
||||
except os.error:
|
||||
pass # Ignore the error
|
||||
try:
|
||||
os.rename(tempname, fullname)
|
||||
except os.error, msg:
|
||||
print "Can't rename %r to %r: %s" % (tempname, fullname, msg)
|
||||
continue
|
||||
info[filename] = infostuff
|
||||
writedict(info, infofilename)
|
||||
if verbose and mode[0] != 'l':
|
||||
dt = t1 - t0
|
||||
kbytes = bytes / 1024.0
|
||||
print int(round(kbytes)),
|
||||
print 'Kbytes in',
|
||||
print int(round(dt)),
|
||||
print 'seconds',
|
||||
if t1 > t0:
|
||||
print '(~%d Kbytes/sec)' % \
|
||||
int(round(kbytes/dt),)
|
||||
print
|
||||
#
|
||||
# Remove files from info that are no longer remote
|
||||
deletions = 0
|
||||
for filename in info.keys():
|
||||
if filename not in filesfound:
|
||||
if verbose:
|
||||
print "Removing obsolete info entry for",
|
||||
print repr(filename), "in", repr(localdir or ".")
|
||||
del info[filename]
|
||||
deletions = deletions + 1
|
||||
if deletions:
|
||||
writedict(info, infofilename)
|
||||
#
|
||||
# Remove local files that are no longer in the remote directory
|
||||
try:
|
||||
if not localdir: names = os.listdir(os.curdir)
|
||||
else: names = os.listdir(localdir)
|
||||
except os.error:
|
||||
names = []
|
||||
for name in names:
|
||||
if name[0] == '.' or info.has_key(name) or name in subdirs:
|
||||
continue
|
||||
skip = 0
|
||||
for pat in skippats:
|
||||
if fnmatch(name, pat):
|
||||
if verbose > 1:
|
||||
print 'Skip pattern', repr(pat),
|
||||
print 'matches', repr(name)
|
||||
skip = 1
|
||||
break
|
||||
if skip:
|
||||
continue
|
||||
fullname = os.path.join(localdir, name)
|
||||
if not rmok:
|
||||
if verbose:
|
||||
print 'Local file', repr(fullname),
|
||||
print 'is no longer pertinent'
|
||||
continue
|
||||
if verbose: print 'Removing local file/dir', repr(fullname)
|
||||
remove(fullname)
|
||||
#
|
||||
# Recursively mirror subdirectories
|
||||
for subdir in subdirs:
|
||||
if interactive:
|
||||
doit = askabout('subdirectory', subdir, pwd)
|
||||
if not doit: continue
|
||||
if verbose: print 'Processing subdirectory', repr(subdir)
|
||||
localsubdir = os.path.join(localdir, subdir)
|
||||
pwd = f.pwd()
|
||||
if verbose > 1:
|
||||
print 'Remote directory now:', repr(pwd)
|
||||
print 'Remote cwd', repr(subdir)
|
||||
try:
|
||||
f.cwd(subdir)
|
||||
except ftplib.error_perm, msg:
|
||||
print "Can't chdir to", repr(subdir), ":", repr(msg)
|
||||
else:
|
||||
if verbose: print 'Mirroring as', repr(localsubdir)
|
||||
mirrorsubdir(f, localsubdir)
|
||||
if verbose > 1: print 'Remote cwd ..'
|
||||
f.cwd('..')
|
||||
newpwd = f.pwd()
|
||||
if newpwd != pwd:
|
||||
print 'Ended up in wrong directory after cd + cd ..'
|
||||
print 'Giving up now.'
|
||||
break
|
||||
else:
|
||||
if verbose > 1: print 'OK.'
|
||||
|
||||
# Helper to remove a file or directory tree
|
||||
def remove(fullname):
|
||||
if os.path.isdir(fullname) and not os.path.islink(fullname):
|
||||
try:
|
||||
names = os.listdir(fullname)
|
||||
except os.error:
|
||||
names = []
|
||||
ok = 1
|
||||
for name in names:
|
||||
if not remove(os.path.join(fullname, name)):
|
||||
ok = 0
|
||||
if not ok:
|
||||
return 0
|
||||
try:
|
||||
os.rmdir(fullname)
|
||||
except os.error, msg:
|
||||
print "Can't remove local directory %r: %s" % (fullname, msg)
|
||||
return 0
|
||||
else:
|
||||
try:
|
||||
os.unlink(fullname)
|
||||
except os.error, msg:
|
||||
print "Can't remove local file %r: %s" % (fullname, msg)
|
||||
return 0
|
||||
return 1
|
||||
|
||||
# Wrapper around a file for writing to write a hash sign every block.
|
||||
class LoggingFile:
|
||||
def __init__(self, fp, blocksize, outfp):
|
||||
self.fp = fp
|
||||
self.bytes = 0
|
||||
self.hashes = 0
|
||||
self.blocksize = blocksize
|
||||
self.outfp = outfp
|
||||
def write(self, data):
|
||||
self.bytes = self.bytes + len(data)
|
||||
hashes = int(self.bytes) / self.blocksize
|
||||
while hashes > self.hashes:
|
||||
self.outfp.write('#')
|
||||
self.outfp.flush()
|
||||
self.hashes = self.hashes + 1
|
||||
self.fp.write(data)
|
||||
def close(self):
|
||||
self.outfp.write('\n')
|
||||
|
||||
# Ask permission to download a file.
|
||||
def askabout(filetype, filename, pwd):
|
||||
prompt = 'Retrieve %s %s from %s ? [ny] ' % (filetype, filename, pwd)
|
||||
while 1:
|
||||
reply = raw_input(prompt).strip().lower()
|
||||
if reply in ['y', 'ye', 'yes']:
|
||||
return 1
|
||||
if reply in ['', 'n', 'no', 'nop', 'nope']:
|
||||
return 0
|
||||
print 'Please answer yes or no.'
|
||||
|
||||
# Create a directory if it doesn't exist. Recursively create the
|
||||
# parent directory as well if needed.
|
||||
def makedir(pathname):
|
||||
if os.path.isdir(pathname):
|
||||
return
|
||||
dirname = os.path.dirname(pathname)
|
||||
if dirname: makedir(dirname)
|
||||
os.mkdir(pathname, 0777)
|
||||
|
||||
# Write a dictionary to a file in a way that can be read back using
|
||||
# rval() but is still somewhat readable (i.e. not a single long line).
|
||||
# Also creates a backup file.
|
||||
def writedict(dict, filename):
|
||||
dir, fname = os.path.split(filename)
|
||||
tempname = os.path.join(dir, '@' + fname)
|
||||
backup = os.path.join(dir, fname + '~')
|
||||
try:
|
||||
os.unlink(backup)
|
||||
except os.error:
|
||||
pass
|
||||
fp = open(tempname, 'w')
|
||||
fp.write('{\n')
|
||||
for key, value in dict.items():
|
||||
fp.write('%r: %r,\n' % (key, value))
|
||||
fp.write('}\n')
|
||||
fp.close()
|
||||
try:
|
||||
os.rename(filename, backup)
|
||||
except os.error:
|
||||
pass
|
||||
os.rename(tempname, filename)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,23 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
import sys, webbrowser
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
if not args:
|
||||
print "Usage: %s querystring" % sys.argv[0]
|
||||
return
|
||||
list = []
|
||||
for arg in args:
|
||||
if '+' in arg:
|
||||
arg = arg.replace('+', '%2B')
|
||||
if ' ' in arg:
|
||||
arg = '"%s"' % arg
|
||||
arg = arg.replace(' ', '+')
|
||||
list.append(arg)
|
||||
s = '+'.join(list)
|
||||
url = "http://www.google.com/search?q=%s" % s
|
||||
webbrowser.open(url)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,79 @@
|
||||
#! /usr/bin/env python2.3
|
||||
|
||||
"""Transform gprof(1) output into useful HTML."""
|
||||
|
||||
import re, os, sys, cgi, webbrowser
|
||||
|
||||
header = """\
|
||||
<html>
|
||||
<head>
|
||||
<title>gprof output (%s)</title>
|
||||
</head>
|
||||
<body>
|
||||
<pre>
|
||||
"""
|
||||
|
||||
trailer = """\
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
def add_escapes(input):
|
||||
for line in input:
|
||||
yield cgi.escape(line)
|
||||
|
||||
def main():
|
||||
filename = "gprof.out"
|
||||
if sys.argv[1:]:
|
||||
filename = sys.argv[1]
|
||||
outputfilename = filename + ".html"
|
||||
input = add_escapes(file(filename))
|
||||
output = file(outputfilename, "w")
|
||||
output.write(header % filename)
|
||||
for line in input:
|
||||
output.write(line)
|
||||
if line.startswith(" time"):
|
||||
break
|
||||
labels = {}
|
||||
for line in input:
|
||||
m = re.match(r"(.* )(\w+)\n", line)
|
||||
if not m:
|
||||
output.write(line)
|
||||
break
|
||||
stuff, fname = m.group(1, 2)
|
||||
labels[fname] = fname
|
||||
output.write('%s<a name="flat:%s" href="#call:%s">%s</a>\n' %
|
||||
(stuff, fname, fname, fname))
|
||||
for line in input:
|
||||
output.write(line)
|
||||
if line.startswith("index % time"):
|
||||
break
|
||||
for line in input:
|
||||
m = re.match(r"(.* )(\w+)(( <cycle.*>)? \[\d+\])\n", line)
|
||||
if not m:
|
||||
output.write(line)
|
||||
if line.startswith("Index by function name"):
|
||||
break
|
||||
continue
|
||||
prefix, fname, suffix = m.group(1, 2, 3)
|
||||
if fname not in labels:
|
||||
output.write(line)
|
||||
continue
|
||||
if line.startswith("["):
|
||||
output.write('%s<a name="call:%s" href="#flat:%s">%s</a>%s\n' %
|
||||
(prefix, fname, fname, fname, suffix))
|
||||
else:
|
||||
output.write('%s<a href="#call:%s">%s</a>%s\n' %
|
||||
(prefix, fname, fname, suffix))
|
||||
for line in input:
|
||||
for part in re.findall(r"(\w+(?:\.c)?|\W+)", line):
|
||||
if part in labels:
|
||||
part = '<a href="#call:%s">%s</a>' % (part, part)
|
||||
output.write(part)
|
||||
output.write(trailer)
|
||||
output.close()
|
||||
webbrowser.open("file:" + os.path.abspath(outputfilename))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
175
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/h2py.py
Normal file
175
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/h2py.py
Normal file
@ -0,0 +1,175 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Read #define's and translate to Python code.
|
||||
# Handle #include statements.
|
||||
# Handle #define macros with one argument.
|
||||
# Anything that isn't recognized or doesn't translate into valid
|
||||
# Python is ignored.
|
||||
|
||||
# Without filename arguments, acts as a filter.
|
||||
# If one or more filenames are given, output is written to corresponding
|
||||
# filenames in the local directory, translated to all uppercase, with
|
||||
# the extension replaced by ".py".
|
||||
|
||||
# By passing one or more options of the form "-i regular_expression"
|
||||
# you can specify additional strings to be ignored. This is useful
|
||||
# e.g. to ignore casts to u_long: simply specify "-i '(u_long)'".
|
||||
|
||||
# XXX To do:
|
||||
# - turn trailing C comments into Python comments
|
||||
# - turn C Boolean operators "&& || !" into Python "and or not"
|
||||
# - what to do about #if(def)?
|
||||
# - what to do about macros with multiple parameters?
|
||||
|
||||
import sys, re, getopt, os
|
||||
|
||||
p_define = re.compile('^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+')
|
||||
|
||||
p_macro = re.compile(
|
||||
'^[\t ]*#[\t ]*define[\t ]+'
|
||||
'([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+')
|
||||
|
||||
p_include = re.compile('^[\t ]*#[\t ]*include[\t ]+<([a-zA-Z0-9_/\.]+)')
|
||||
|
||||
p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
|
||||
p_cpp_comment = re.compile('//.*')
|
||||
|
||||
ignores = [p_comment, p_cpp_comment]
|
||||
|
||||
p_char = re.compile(r"'(\\.[^\\]*|[^\\])'")
|
||||
|
||||
p_hex = re.compile(r"0x([0-9a-fA-F]+)L?")
|
||||
|
||||
filedict = {}
|
||||
importable = {}
|
||||
|
||||
try:
|
||||
searchdirs=os.environ['include'].split(';')
|
||||
except KeyError:
|
||||
try:
|
||||
searchdirs=os.environ['INCLUDE'].split(';')
|
||||
except KeyError:
|
||||
try:
|
||||
if sys.platform.find("beos") == 0:
|
||||
searchdirs=os.environ['BEINCLUDES'].split(';')
|
||||
elif sys.platform.startswith("atheos"):
|
||||
searchdirs=os.environ['C_INCLUDE_PATH'].split(':')
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
searchdirs=['/usr/include']
|
||||
|
||||
def main():
|
||||
global filedict
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'i:')
|
||||
for o, a in opts:
|
||||
if o == '-i':
|
||||
ignores.append(re.compile(a))
|
||||
if not args:
|
||||
args = ['-']
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
sys.stdout.write('# Generated by h2py from stdin\n')
|
||||
process(sys.stdin, sys.stdout)
|
||||
else:
|
||||
fp = open(filename, 'r')
|
||||
outfile = os.path.basename(filename)
|
||||
i = outfile.rfind('.')
|
||||
if i > 0: outfile = outfile[:i]
|
||||
modname = outfile.upper()
|
||||
outfile = modname + '.py'
|
||||
outfp = open(outfile, 'w')
|
||||
outfp.write('# Generated by h2py from %s\n' % filename)
|
||||
filedict = {}
|
||||
for dir in searchdirs:
|
||||
if filename[:len(dir)] == dir:
|
||||
filedict[filename[len(dir)+1:]] = None # no '/' trailing
|
||||
importable[filename[len(dir)+1:]] = modname
|
||||
break
|
||||
process(fp, outfp)
|
||||
outfp.close()
|
||||
fp.close()
|
||||
|
||||
def pytify(body):
|
||||
# replace ignored patterns by spaces
|
||||
for p in ignores:
|
||||
body = p.sub(' ', body)
|
||||
# replace char literals by ord(...)
|
||||
body = p_char.sub("ord('\\1')", body)
|
||||
# Compute negative hexadecimal constants
|
||||
start = 0
|
||||
UMAX = 2*(sys.maxint+1)
|
||||
while 1:
|
||||
m = p_hex.search(body, start)
|
||||
if not m: break
|
||||
s,e = m.span()
|
||||
val = long(body[slice(*m.span(1))], 16)
|
||||
if val > sys.maxint:
|
||||
val -= UMAX
|
||||
body = body[:s] + "(" + str(val) + ")" + body[e:]
|
||||
start = s + 1
|
||||
return body
|
||||
|
||||
def process(fp, outfp, env = {}):
|
||||
lineno = 0
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
match = p_define.match(line)
|
||||
if match:
|
||||
# gobble up continuation lines
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = fp.readline()
|
||||
if not nextline: break
|
||||
lineno = lineno + 1
|
||||
line = line + nextline
|
||||
name = match.group(1)
|
||||
body = line[match.end():]
|
||||
body = pytify(body)
|
||||
ok = 0
|
||||
stmt = '%s = %s\n' % (name, body.strip())
|
||||
try:
|
||||
exec stmt in env
|
||||
except:
|
||||
sys.stderr.write('Skipping: %s' % stmt)
|
||||
else:
|
||||
outfp.write(stmt)
|
||||
match = p_macro.match(line)
|
||||
if match:
|
||||
macro, arg = match.group(1, 2)
|
||||
body = line[match.end():]
|
||||
body = pytify(body)
|
||||
stmt = 'def %s(%s): return %s\n' % (macro, arg, body)
|
||||
try:
|
||||
exec stmt in env
|
||||
except:
|
||||
sys.stderr.write('Skipping: %s' % stmt)
|
||||
else:
|
||||
outfp.write(stmt)
|
||||
match = p_include.match(line)
|
||||
if match:
|
||||
regs = match.regs
|
||||
a, b = regs[1]
|
||||
filename = line[a:b]
|
||||
if importable.has_key(filename):
|
||||
outfp.write('from %s import *\n' % importable[filename])
|
||||
elif not filedict.has_key(filename):
|
||||
filedict[filename] = None
|
||||
inclfp = None
|
||||
for dir in searchdirs:
|
||||
try:
|
||||
inclfp = open(dir + '/' + filename)
|
||||
break
|
||||
except IOError:
|
||||
pass
|
||||
if inclfp:
|
||||
outfp.write(
|
||||
'\n# Included from %s\n' % filename)
|
||||
process(inclfp, outfp, env)
|
||||
else:
|
||||
sys.stderr.write('Warning - could not find file %s\n' %
|
||||
filename)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
|
||||
"""
|
||||
Run a Python script under hotshot's control.
|
||||
|
||||
Adapted from a posting on python-dev by Walter D<>rwald
|
||||
|
||||
usage %prog [ %prog args ] filename [ filename args ]
|
||||
|
||||
Any arguments after the filename are used as sys.argv for the filename.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import optparse
|
||||
import os
|
||||
import hotshot
|
||||
import hotshot.stats
|
||||
|
||||
PROFILE = "hotshot.prof"
|
||||
|
||||
def run_hotshot(filename, profile, args):
|
||||
prof = hotshot.Profile(profile)
|
||||
sys.path.insert(0, os.path.dirname(filename))
|
||||
sys.argv = [filename] + args
|
||||
prof.run("execfile(%r)" % filename)
|
||||
prof.close()
|
||||
stats = hotshot.stats.load(profile)
|
||||
stats.sort_stats("time", "calls")
|
||||
|
||||
# print_stats uses unadorned print statements, so the only way
|
||||
# to force output to stderr is to reassign sys.stdout temporarily
|
||||
save_stdout = sys.stdout
|
||||
sys.stdout = sys.stderr
|
||||
stats.print_stats()
|
||||
sys.stdout = save_stdout
|
||||
|
||||
return 0
|
||||
|
||||
def main(args):
|
||||
parser = optparse.OptionParser(__doc__)
|
||||
parser.disable_interspersed_args()
|
||||
parser.add_option("-p", "--profile", action="store", default=PROFILE,
|
||||
dest="profile", help='Specify profile file to use')
|
||||
(options, args) = parser.parse_args(args)
|
||||
|
||||
if len(args) == 0:
|
||||
parser.print_help("missing script to execute")
|
||||
return 1
|
||||
|
||||
filename = args[0]
|
||||
return run_hotshot(filename, options.profile, args[1:])
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
@ -0,0 +1,5 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
from idlelib.PyShell import main
|
||||
if __name__ == '__main__':
|
||||
main()
|
112
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/ifdef.py
Normal file
112
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/ifdef.py
Normal file
@ -0,0 +1,112 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Selectively preprocess #ifdef / #ifndef statements.
|
||||
# Usage:
|
||||
# ifdef [-Dname] ... [-Uname] ... [file] ...
|
||||
#
|
||||
# This scans the file(s), looking for #ifdef and #ifndef preprocessor
|
||||
# commands that test for one of the names mentioned in the -D and -U
|
||||
# options. On standard output it writes a copy of the input file(s)
|
||||
# minus those code sections that are suppressed by the selected
|
||||
# combination of defined/undefined symbols. The #if(n)def/#else/#else
|
||||
# lines themselfs (if the #if(n)def tests for one of the mentioned
|
||||
# names) are removed as well.
|
||||
|
||||
# Features: Arbitrary nesting of recognized and unrecognized
|
||||
# preprocesor statements works correctly. Unrecognized #if* commands
|
||||
# are left in place, so it will never remove too much, only too
|
||||
# little. It does accept whitespace around the '#' character.
|
||||
|
||||
# Restrictions: There should be no comments or other symbols on the
|
||||
# #if(n)def lines. The effect of #define/#undef commands in the input
|
||||
# file or in included files is not taken into account. Tests using
|
||||
# #if and the defined() pseudo function are not recognized. The #elif
|
||||
# command is not recognized. Improperly nesting is not detected.
|
||||
# Lines that look like preprocessor commands but which are actually
|
||||
# part of comments or string literals will be mistaken for
|
||||
# preprocessor commands.
|
||||
|
||||
import sys
|
||||
import getopt
|
||||
|
||||
defs = []
|
||||
undefs = []
|
||||
|
||||
def main():
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'D:U:')
|
||||
for o, a in opts:
|
||||
if o == '-D':
|
||||
defs.append(a)
|
||||
if o == '-U':
|
||||
undefs.append(a)
|
||||
if not args:
|
||||
args = ['-']
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
process(sys.stdin, sys.stdout)
|
||||
else:
|
||||
f = open(filename, 'r')
|
||||
process(f, sys.stdout)
|
||||
f.close()
|
||||
|
||||
def process(fpi, fpo):
|
||||
keywords = ('if', 'ifdef', 'ifndef', 'else', 'endif')
|
||||
ok = 1
|
||||
stack = []
|
||||
while 1:
|
||||
line = fpi.readline()
|
||||
if not line: break
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = fpi.readline()
|
||||
if not nextline: break
|
||||
line = line + nextline
|
||||
tmp = line.strip()
|
||||
if tmp[:1] != '#':
|
||||
if ok: fpo.write(line)
|
||||
continue
|
||||
tmp = tmp[1:].strip()
|
||||
words = tmp.split()
|
||||
keyword = words[0]
|
||||
if keyword not in keywords:
|
||||
if ok: fpo.write(line)
|
||||
continue
|
||||
if keyword in ('ifdef', 'ifndef') and len(words) == 2:
|
||||
if keyword == 'ifdef':
|
||||
ko = 1
|
||||
else:
|
||||
ko = 0
|
||||
word = words[1]
|
||||
if word in defs:
|
||||
stack.append((ok, ko, word))
|
||||
if not ko: ok = 0
|
||||
elif word in undefs:
|
||||
stack.append((ok, not ko, word))
|
||||
if ko: ok = 0
|
||||
else:
|
||||
stack.append((ok, -1, word))
|
||||
if ok: fpo.write(line)
|
||||
elif keyword == 'if':
|
||||
stack.append((ok, -1, ''))
|
||||
if ok: fpo.write(line)
|
||||
elif keyword == 'else' and stack:
|
||||
s_ok, s_ko, s_word = stack[-1]
|
||||
if s_ko < 0:
|
||||
if ok: fpo.write(line)
|
||||
else:
|
||||
s_ko = not s_ko
|
||||
ok = s_ok
|
||||
if not s_ko: ok = 0
|
||||
stack[-1] = s_ok, s_ko, s_word
|
||||
elif keyword == 'endif' and stack:
|
||||
s_ok, s_ko, s_word = stack[-1]
|
||||
if s_ko < 0:
|
||||
if ok: fpo.write(line)
|
||||
del stack[-1]
|
||||
ok = s_ok
|
||||
else:
|
||||
sys.stderr.write('Unknown keyword %s\n' % keyword)
|
||||
if stack:
|
||||
sys.stderr.write('stack: %s\n' % stack)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,24 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"Replace LF with CRLF in argument files. Print names of changed files."
|
||||
|
||||
import sys, re, os
|
||||
|
||||
def main():
|
||||
for filename in sys.argv[1:]:
|
||||
if os.path.isdir(filename):
|
||||
print filename, "Directory!"
|
||||
continue
|
||||
data = open(filename, "rb").read()
|
||||
if '\0' in data:
|
||||
print filename, "Binary!"
|
||||
continue
|
||||
newdata = re.sub("\r?\n", "\r\n", data)
|
||||
if newdata != data:
|
||||
print filename
|
||||
f = open(filename, "wb")
|
||||
f.write(newdata)
|
||||
f.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,80 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# linktree
|
||||
#
|
||||
# Make a copy of a directory tree with symbolic links to all files in the
|
||||
# original tree.
|
||||
# All symbolic links go to a special symbolic link at the top, so you
|
||||
# can easily fix things if the original source tree moves.
|
||||
# See also "mkreal".
|
||||
#
|
||||
# usage: mklinks oldtree newtree
|
||||
|
||||
import sys, os
|
||||
|
||||
LINK = '.LINK' # Name of special symlink at the top.
|
||||
|
||||
debug = 0
|
||||
|
||||
def main():
|
||||
if not 3 <= len(sys.argv) <= 4:
|
||||
print 'usage:', sys.argv[0], 'oldtree newtree [linkto]'
|
||||
return 2
|
||||
oldtree, newtree = sys.argv[1], sys.argv[2]
|
||||
if len(sys.argv) > 3:
|
||||
link = sys.argv[3]
|
||||
link_may_fail = 1
|
||||
else:
|
||||
link = LINK
|
||||
link_may_fail = 0
|
||||
if not os.path.isdir(oldtree):
|
||||
print oldtree + ': not a directory'
|
||||
return 1
|
||||
try:
|
||||
os.mkdir(newtree, 0777)
|
||||
except os.error, msg:
|
||||
print newtree + ': cannot mkdir:', msg
|
||||
return 1
|
||||
linkname = os.path.join(newtree, link)
|
||||
try:
|
||||
os.symlink(os.path.join(os.pardir, oldtree), linkname)
|
||||
except os.error, msg:
|
||||
if not link_may_fail:
|
||||
print linkname + ': cannot symlink:', msg
|
||||
return 1
|
||||
else:
|
||||
print linkname + ': warning: cannot symlink:', msg
|
||||
linknames(oldtree, newtree, link)
|
||||
return 0
|
||||
|
||||
def linknames(old, new, link):
|
||||
if debug: print 'linknames', (old, new, link)
|
||||
try:
|
||||
names = os.listdir(old)
|
||||
except os.error, msg:
|
||||
print old + ': warning: cannot listdir:', msg
|
||||
return
|
||||
for name in names:
|
||||
if name not in (os.curdir, os.pardir):
|
||||
oldname = os.path.join(old, name)
|
||||
linkname = os.path.join(link, name)
|
||||
newname = os.path.join(new, name)
|
||||
if debug > 1: print oldname, newname, linkname
|
||||
if os.path.isdir(oldname) and \
|
||||
not os.path.islink(oldname):
|
||||
try:
|
||||
os.mkdir(newname, 0777)
|
||||
ok = 1
|
||||
except:
|
||||
print newname + \
|
||||
': warning: cannot mkdir:', msg
|
||||
ok = 0
|
||||
if ok:
|
||||
linkname = os.path.join(os.pardir,
|
||||
linkname)
|
||||
linknames(oldname, newname, linkname)
|
||||
else:
|
||||
os.symlink(linkname, newname)
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
28
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/lll.py
Normal file
28
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/lll.py
Normal file
@ -0,0 +1,28 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Find symbolic links and show where they point to.
|
||||
# Arguments are directories to search; default is current directory.
|
||||
# No recursion.
|
||||
# (This is a totally different program from "findsymlinks.py"!)
|
||||
|
||||
import sys, os
|
||||
|
||||
def lll(dirname):
|
||||
for name in os.listdir(dirname):
|
||||
if name not in (os.curdir, os.pardir):
|
||||
full = os.path.join(dirname, name)
|
||||
if os.path.islink(full):
|
||||
print name, '->', os.readlink(full)
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
if not args: args = [os.curdir]
|
||||
first = 1
|
||||
for arg in args:
|
||||
if len(args) > 1:
|
||||
if not first: print
|
||||
first = 0
|
||||
print arg + ':'
|
||||
lll(arg)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,185 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""Consolidate a bunch of CVS or RCS logs read from stdin.
|
||||
|
||||
Input should be the output of a CVS or RCS logging command, e.g.
|
||||
|
||||
cvs log -rrelease14:
|
||||
|
||||
which dumps all log messages from release1.4 upwards (assuming that
|
||||
release 1.4 was tagged with tag 'release14'). Note the trailing
|
||||
colon!
|
||||
|
||||
This collects all the revision records and outputs them sorted by date
|
||||
rather than by file, collapsing duplicate revision record, i.e.,
|
||||
records with the same message for different files.
|
||||
|
||||
The -t option causes it to truncate (discard) the last revision log
|
||||
entry; this is useful when using something like the above cvs log
|
||||
command, which shows the revisions including the given tag, while you
|
||||
probably want everything *since* that tag.
|
||||
|
||||
The -r option reverses the output (oldest first; the default is oldest
|
||||
last).
|
||||
|
||||
The -b tag option restricts the output to *only* checkin messages
|
||||
belonging to the given branch tag. The form -b HEAD restricts the
|
||||
output to checkin messages belonging to the CVS head (trunk). (It
|
||||
produces some output if tag is a non-branch tag, but this output is
|
||||
not very useful.)
|
||||
|
||||
-h prints this message and exits.
|
||||
|
||||
XXX This code was created by reverse engineering CVS 1.9 and RCS 5.7
|
||||
from their output.
|
||||
"""
|
||||
|
||||
import sys, errno, getopt, re
|
||||
|
||||
sep1 = '='*77 + '\n' # file separator
|
||||
sep2 = '-'*28 + '\n' # revision separator
|
||||
|
||||
def main():
|
||||
"""Main program"""
|
||||
truncate_last = 0
|
||||
reverse = 0
|
||||
branch = None
|
||||
opts, args = getopt.getopt(sys.argv[1:], "trb:h")
|
||||
for o, a in opts:
|
||||
if o == '-t':
|
||||
truncate_last = 1
|
||||
elif o == '-r':
|
||||
reverse = 1
|
||||
elif o == '-b':
|
||||
branch = a
|
||||
elif o == '-h':
|
||||
print __doc__
|
||||
sys.exit(0)
|
||||
database = []
|
||||
while 1:
|
||||
chunk = read_chunk(sys.stdin)
|
||||
if not chunk:
|
||||
break
|
||||
records = digest_chunk(chunk, branch)
|
||||
if truncate_last:
|
||||
del records[-1]
|
||||
database[len(database):] = records
|
||||
database.sort()
|
||||
if not reverse:
|
||||
database.reverse()
|
||||
format_output(database)
|
||||
|
||||
def read_chunk(fp):
|
||||
"""Read a chunk -- data for one file, ending with sep1.
|
||||
|
||||
Split the chunk in parts separated by sep2.
|
||||
|
||||
"""
|
||||
chunk = []
|
||||
lines = []
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
if line == sep1:
|
||||
if lines:
|
||||
chunk.append(lines)
|
||||
break
|
||||
if line == sep2:
|
||||
if lines:
|
||||
chunk.append(lines)
|
||||
lines = []
|
||||
else:
|
||||
lines.append(line)
|
||||
return chunk
|
||||
|
||||
def digest_chunk(chunk, branch=None):
|
||||
"""Digest a chunk -- extract working file name and revisions"""
|
||||
lines = chunk[0]
|
||||
key = 'Working file:'
|
||||
keylen = len(key)
|
||||
for line in lines:
|
||||
if line[:keylen] == key:
|
||||
working_file = line[keylen:].strip()
|
||||
break
|
||||
else:
|
||||
working_file = None
|
||||
if branch is None:
|
||||
pass
|
||||
elif branch == "HEAD":
|
||||
branch = re.compile(r"^\d+\.\d+$")
|
||||
else:
|
||||
revisions = {}
|
||||
key = 'symbolic names:\n'
|
||||
found = 0
|
||||
for line in lines:
|
||||
if line == key:
|
||||
found = 1
|
||||
elif found:
|
||||
if line[0] in '\t ':
|
||||
tag, rev = line.split()
|
||||
if tag[-1] == ':':
|
||||
tag = tag[:-1]
|
||||
revisions[tag] = rev
|
||||
else:
|
||||
found = 0
|
||||
rev = revisions.get(branch)
|
||||
branch = re.compile(r"^<>$") # <> to force a mismatch by default
|
||||
if rev:
|
||||
if rev.find('.0.') >= 0:
|
||||
rev = rev.replace('.0.', '.')
|
||||
branch = re.compile(r"^" + re.escape(rev) + r"\.\d+$")
|
||||
records = []
|
||||
for lines in chunk[1:]:
|
||||
revline = lines[0]
|
||||
dateline = lines[1]
|
||||
text = lines[2:]
|
||||
words = dateline.split()
|
||||
author = None
|
||||
if len(words) >= 3 and words[0] == 'date:':
|
||||
dateword = words[1]
|
||||
timeword = words[2]
|
||||
if timeword[-1:] == ';':
|
||||
timeword = timeword[:-1]
|
||||
date = dateword + ' ' + timeword
|
||||
if len(words) >= 5 and words[3] == 'author:':
|
||||
author = words[4]
|
||||
if author[-1:] == ';':
|
||||
author = author[:-1]
|
||||
else:
|
||||
date = None
|
||||
text.insert(0, revline)
|
||||
words = revline.split()
|
||||
if len(words) >= 2 and words[0] == 'revision':
|
||||
rev = words[1]
|
||||
else:
|
||||
# No 'revision' line -- weird...
|
||||
rev = None
|
||||
text.insert(0, revline)
|
||||
if branch:
|
||||
if rev is None or not branch.match(rev):
|
||||
continue
|
||||
records.append((date, working_file, rev, author, text))
|
||||
return records
|
||||
|
||||
def format_output(database):
|
||||
prevtext = None
|
||||
prev = []
|
||||
database.append((None, None, None, None, None)) # Sentinel
|
||||
for (date, working_file, rev, author, text) in database:
|
||||
if text != prevtext:
|
||||
if prev:
|
||||
print sep2,
|
||||
for (p_date, p_working_file, p_rev, p_author) in prev:
|
||||
print p_date, p_author, p_working_file, p_rev
|
||||
sys.stdout.writelines(prevtext)
|
||||
prev = []
|
||||
prev.append((date, working_file, rev, author))
|
||||
prevtext = text
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
except IOError, e:
|
||||
if e.errno != errno.EPIPE:
|
||||
raise
|
@ -0,0 +1,237 @@
|
||||
"""mailerdaemon - classes to parse mailer-daemon messages"""
|
||||
|
||||
import rfc822
|
||||
import calendar
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
|
||||
Unparseable = 'mailerdaemon.Unparseable'
|
||||
|
||||
class ErrorMessage(rfc822.Message):
|
||||
def __init__(self, fp):
|
||||
rfc822.Message.__init__(self, fp)
|
||||
self.sub = ''
|
||||
|
||||
def is_warning(self):
|
||||
sub = self.getheader('Subject')
|
||||
if not sub:
|
||||
return 0
|
||||
sub = sub.lower()
|
||||
if sub.startswith('waiting mail'): return 1
|
||||
if 'warning' in sub: return 1
|
||||
self.sub = sub
|
||||
return 0
|
||||
|
||||
def get_errors(self):
|
||||
for p in EMPARSERS:
|
||||
self.rewindbody()
|
||||
try:
|
||||
return p(self.fp, self.sub)
|
||||
except Unparseable:
|
||||
pass
|
||||
raise Unparseable
|
||||
|
||||
# List of re's or tuples of re's.
|
||||
# If a re, it should contain at least a group (?P<email>...) which
|
||||
# should refer to the email address. The re can also contain a group
|
||||
# (?P<reason>...) which should refer to the reason (error message).
|
||||
# If no reason is present, the emparse_list_reason list is used to
|
||||
# find a reason.
|
||||
# If a tuple, the tuple should contain 2 re's. The first re finds a
|
||||
# location, the second re is repeated one or more times to find
|
||||
# multiple email addresses. The second re is matched (not searched)
|
||||
# where the previous match ended.
|
||||
# The re's are compiled using the re module.
|
||||
emparse_list_list = [
|
||||
'error: (?P<reason>unresolvable): (?P<email>.+)',
|
||||
('----- The following addresses had permanent fatal errors -----\n',
|
||||
'(?P<email>[^ \n].*)\n( .*\n)?'),
|
||||
'remote execution.*\n.*rmail (?P<email>.+)',
|
||||
('The following recipients did not receive your message:\n\n',
|
||||
' +(?P<email>.*)\n(The following recipients did not receive your message:\n\n)?'),
|
||||
'------- Failure Reasons --------\n\n(?P<reason>.*)\n(?P<email>.*)',
|
||||
'^<(?P<email>.*)>:\n(?P<reason>.*)',
|
||||
'^(?P<reason>User mailbox exceeds allowed size): (?P<email>.+)',
|
||||
'^5\\d{2} <(?P<email>[^\n>]+)>\\.\\.\\. (?P<reason>.+)',
|
||||
'^Original-Recipient: rfc822;(?P<email>.*)',
|
||||
'^did not reach the following recipient\\(s\\):\n\n(?P<email>.*) on .*\n +(?P<reason>.*)',
|
||||
'^ <(?P<email>[^\n>]+)> \\.\\.\\. (?P<reason>.*)',
|
||||
'^Report on your message to: (?P<email>.*)\nReason: (?P<reason>.*)',
|
||||
'^Your message was not delivered to +(?P<email>.*)\n +for the following reason:\n +(?P<reason>.*)',
|
||||
'^ was not +(?P<email>[^ \n].*?) *\n.*\n.*\n.*\n because:.*\n +(?P<reason>[^ \n].*?) *\n',
|
||||
]
|
||||
# compile the re's in the list and store them in-place.
|
||||
for i in range(len(emparse_list_list)):
|
||||
x = emparse_list_list[i]
|
||||
if type(x) is type(''):
|
||||
x = re.compile(x, re.MULTILINE)
|
||||
else:
|
||||
xl = []
|
||||
for x in x:
|
||||
xl.append(re.compile(x, re.MULTILINE))
|
||||
x = tuple(xl)
|
||||
del xl
|
||||
emparse_list_list[i] = x
|
||||
del x
|
||||
del i
|
||||
|
||||
# list of re's used to find reasons (error messages).
|
||||
# if a string, "<>" is replaced by a copy of the email address.
|
||||
# The expressions are searched for in order. After the first match,
|
||||
# no more expressions are searched for. So, order is important.
|
||||
emparse_list_reason = [
|
||||
r'^5\d{2} <>\.\.\. (?P<reason>.*)',
|
||||
'<>\.\.\. (?P<reason>.*)',
|
||||
re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
|
||||
re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
|
||||
re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
|
||||
]
|
||||
emparse_list_from = re.compile('^From:', re.IGNORECASE|re.MULTILINE)
|
||||
def emparse_list(fp, sub):
|
||||
data = fp.read()
|
||||
res = emparse_list_from.search(data)
|
||||
if res is None:
|
||||
from_index = len(data)
|
||||
else:
|
||||
from_index = res.start(0)
|
||||
errors = []
|
||||
emails = []
|
||||
reason = None
|
||||
for regexp in emparse_list_list:
|
||||
if type(regexp) is type(()):
|
||||
res = regexp[0].search(data, 0, from_index)
|
||||
if res is not None:
|
||||
try:
|
||||
reason = res.group('reason')
|
||||
except IndexError:
|
||||
pass
|
||||
while 1:
|
||||
res = regexp[1].match(data, res.end(0), from_index)
|
||||
if res is None:
|
||||
break
|
||||
emails.append(res.group('email'))
|
||||
break
|
||||
else:
|
||||
res = regexp.search(data, 0, from_index)
|
||||
if res is not None:
|
||||
emails.append(res.group('email'))
|
||||
try:
|
||||
reason = res.group('reason')
|
||||
except IndexError:
|
||||
pass
|
||||
break
|
||||
if not emails:
|
||||
raise Unparseable
|
||||
if not reason:
|
||||
reason = sub
|
||||
if reason[:15] == 'returned mail: ':
|
||||
reason = reason[15:]
|
||||
for regexp in emparse_list_reason:
|
||||
if type(regexp) is type(''):
|
||||
for i in range(len(emails)-1,-1,-1):
|
||||
email = emails[i]
|
||||
exp = re.compile(re.escape(email).join(regexp.split('<>')), re.MULTILINE)
|
||||
res = exp.search(data)
|
||||
if res is not None:
|
||||
errors.append(' '.join((email.strip()+': '+res.group('reason')).split()))
|
||||
del emails[i]
|
||||
continue
|
||||
res = regexp.search(data)
|
||||
if res is not None:
|
||||
reason = res.group('reason')
|
||||
break
|
||||
for email in emails:
|
||||
errors.append(' '.join((email.strip()+': '+reason).split()))
|
||||
return errors
|
||||
|
||||
EMPARSERS = [emparse_list, ]
|
||||
|
||||
def sort_numeric(a, b):
|
||||
a = int(a)
|
||||
b = int(b)
|
||||
if a < b: return -1
|
||||
elif a > b: return 1
|
||||
else: return 0
|
||||
|
||||
def parsedir(dir, modify):
|
||||
os.chdir(dir)
|
||||
pat = re.compile('^[0-9]*$')
|
||||
errordict = {}
|
||||
errorfirst = {}
|
||||
errorlast = {}
|
||||
nok = nwarn = nbad = 0
|
||||
|
||||
# find all numeric file names and sort them
|
||||
files = filter(lambda fn, pat=pat: pat.match(fn) is not None, os.listdir('.'))
|
||||
files.sort(sort_numeric)
|
||||
|
||||
for fn in files:
|
||||
# Lets try to parse the file.
|
||||
fp = open(fn)
|
||||
m = ErrorMessage(fp)
|
||||
sender = m.getaddr('From')
|
||||
print '%s\t%-40s\t'%(fn, sender[1]),
|
||||
|
||||
if m.is_warning():
|
||||
fp.close()
|
||||
print 'warning only'
|
||||
nwarn = nwarn + 1
|
||||
if modify:
|
||||
os.rename(fn, ','+fn)
|
||||
## os.unlink(fn)
|
||||
continue
|
||||
|
||||
try:
|
||||
errors = m.get_errors()
|
||||
except Unparseable:
|
||||
print '** Not parseable'
|
||||
nbad = nbad + 1
|
||||
fp.close()
|
||||
continue
|
||||
print len(errors), 'errors'
|
||||
|
||||
# Remember them
|
||||
for e in errors:
|
||||
try:
|
||||
mm, dd = m.getdate('date')[1:1+2]
|
||||
date = '%s %02d' % (calendar.month_abbr[mm], dd)
|
||||
except:
|
||||
date = '??????'
|
||||
if not errordict.has_key(e):
|
||||
errordict[e] = 1
|
||||
errorfirst[e] = '%s (%s)' % (fn, date)
|
||||
else:
|
||||
errordict[e] = errordict[e] + 1
|
||||
errorlast[e] = '%s (%s)' % (fn, date)
|
||||
|
||||
fp.close()
|
||||
nok = nok + 1
|
||||
if modify:
|
||||
os.rename(fn, ','+fn)
|
||||
## os.unlink(fn)
|
||||
|
||||
print '--------------'
|
||||
print nok, 'files parsed,',nwarn,'files warning-only,',
|
||||
print nbad,'files unparseable'
|
||||
print '--------------'
|
||||
list = []
|
||||
for e in errordict.keys():
|
||||
list.append((errordict[e], errorfirst[e], errorlast[e], e))
|
||||
list.sort()
|
||||
for num, first, last, e in list:
|
||||
print '%d %s - %s\t%s' % (num, first, last, e)
|
||||
|
||||
def main():
|
||||
modify = 0
|
||||
if len(sys.argv) > 1 and sys.argv[1] == '-d':
|
||||
modify = 1
|
||||
del sys.argv[1]
|
||||
if len(sys.argv) > 1:
|
||||
for folder in sys.argv[1:]:
|
||||
parsedir(folder, modify)
|
||||
else:
|
||||
parsedir('/ufs/jack/Mail/errorsinbox', modify)
|
||||
|
||||
if __name__ == '__main__' or sys.argv[0] == __name__:
|
||||
main()
|
@ -0,0 +1,90 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""Python utility to print MD5 checksums of argument files.
|
||||
"""
|
||||
|
||||
|
||||
bufsize = 8096
|
||||
fnfilter = None
|
||||
rmode = 'rb'
|
||||
|
||||
usage = """
|
||||
usage: sum5 [-b] [-t] [-l] [-s bufsize] [file ...]
|
||||
-b : read files in binary mode (default)
|
||||
-t : read files in text mode (you almost certainly don't want this!)
|
||||
-l : print last pathname component only
|
||||
-s bufsize: read buffer size (default %d)
|
||||
file ... : files to sum; '-' or no files means stdin
|
||||
""" % bufsize
|
||||
|
||||
import sys
|
||||
import os
|
||||
import getopt
|
||||
import md5
|
||||
|
||||
def sum(*files):
|
||||
sts = 0
|
||||
if files and isinstance(files[-1], file):
|
||||
out, files = files[-1], files[:-1]
|
||||
else:
|
||||
out = sys.stdout
|
||||
if len(files) == 1 and not isinstance(files[0], str):
|
||||
files = files[0]
|
||||
for f in files:
|
||||
if isinstance(f, str):
|
||||
if f == '-':
|
||||
sts = printsumfp(sys.stdin, '<stdin>', out) or sts
|
||||
else:
|
||||
sts = printsum(f, out) or sts
|
||||
else:
|
||||
sts = sum(f, out) or sts
|
||||
return sts
|
||||
|
||||
def printsum(filename, out=sys.stdout):
|
||||
try:
|
||||
fp = open(filename, rmode)
|
||||
except IOError, msg:
|
||||
sys.stderr.write('%s: Can\'t open: %s\n' % (filename, msg))
|
||||
return 1
|
||||
if fnfilter:
|
||||
filename = fnfilter(filename)
|
||||
sts = printsumfp(fp, filename, out)
|
||||
fp.close()
|
||||
return sts
|
||||
|
||||
def printsumfp(fp, filename, out=sys.stdout):
|
||||
m = md5.new()
|
||||
try:
|
||||
while 1:
|
||||
data = fp.read(bufsize)
|
||||
if not data:
|
||||
break
|
||||
m.update(data)
|
||||
except IOError, msg:
|
||||
sys.stderr.write('%s: I/O error: %s\n' % (filename, msg))
|
||||
return 1
|
||||
out.write('%s %s\n' % (m.hexdigest(), filename))
|
||||
return 0
|
||||
|
||||
def main(args = sys.argv[1:], out=sys.stdout):
|
||||
global fnfilter, rmode, bufsize
|
||||
try:
|
||||
opts, args = getopt.getopt(args, 'blts:')
|
||||
except getopt.error, msg:
|
||||
sys.stderr.write('%s: %s\n%s' % (sys.argv[0], msg, usage))
|
||||
return 2
|
||||
for o, a in opts:
|
||||
if o == '-l':
|
||||
fnfilter = os.path.basename
|
||||
elif o == '-b':
|
||||
rmode = 'rb'
|
||||
elif o == '-t':
|
||||
rmode = 'r'
|
||||
elif o == '-s':
|
||||
bufsize = int(a)
|
||||
if not args:
|
||||
args = ['-']
|
||||
return sum(args, out)
|
||||
|
||||
if __name__ == '__main__' or __name__ == sys.argv[0]:
|
||||
sys.exit(main(sys.argv[1:], sys.stdout))
|
171
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/methfix.py
Normal file
171
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/methfix.py
Normal file
@ -0,0 +1,171 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Fix Python source files to avoid using
|
||||
# def method(self, (arg1, ..., argn)):
|
||||
# instead of the more rational
|
||||
# def method(self, arg1, ..., argn):
|
||||
#
|
||||
# Command line arguments are files or directories to be processed.
|
||||
# Directories are searched recursively for files whose name looks
|
||||
# like a python module.
|
||||
# Symbolic links are always ignored (except as explicit directory
|
||||
# arguments). Of course, the original file is kept as a back-up
|
||||
# (with a "~" attached to its name).
|
||||
# It complains about binaries (files containing null bytes)
|
||||
# and about files that are ostensibly not Python files: if the first
|
||||
# line starts with '#!' and does not contain the string 'python'.
|
||||
#
|
||||
# Changes made are reported to stdout in a diff-like format.
|
||||
#
|
||||
# Undoubtedly you can do this using find and sed or perl, but this is
|
||||
# a nice example of Python code that recurses down a directory tree
|
||||
# and uses regular expressions. Also note several subtleties like
|
||||
# preserving the file's mode and avoiding to even write a temp file
|
||||
# when no changes are needed for a file.
|
||||
#
|
||||
# NB: by changing only the function fixline() you can turn this
|
||||
# into a program for a different change to Python programs...
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
from stat import *
|
||||
|
||||
err = sys.stderr.write
|
||||
dbg = err
|
||||
rep = sys.stdout.write
|
||||
|
||||
def main():
|
||||
bad = 0
|
||||
if not sys.argv[1:]: # No arguments
|
||||
err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
|
||||
sys.exit(2)
|
||||
for arg in sys.argv[1:]:
|
||||
if os.path.isdir(arg):
|
||||
if recursedown(arg): bad = 1
|
||||
elif os.path.islink(arg):
|
||||
err(arg + ': will not process symbolic links\n')
|
||||
bad = 1
|
||||
else:
|
||||
if fix(arg): bad = 1
|
||||
sys.exit(bad)
|
||||
|
||||
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
|
||||
def ispython(name):
|
||||
return ispythonprog.match(name) >= 0
|
||||
|
||||
def recursedown(dirname):
|
||||
dbg('recursedown(%r)\n' % (dirname,))
|
||||
bad = 0
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except os.error, msg:
|
||||
err('%s: cannot list directory: %r\n' % (dirname, msg))
|
||||
return 1
|
||||
names.sort()
|
||||
subdirs = []
|
||||
for name in names:
|
||||
if name in (os.curdir, os.pardir): continue
|
||||
fullname = os.path.join(dirname, name)
|
||||
if os.path.islink(fullname): pass
|
||||
elif os.path.isdir(fullname):
|
||||
subdirs.append(fullname)
|
||||
elif ispython(name):
|
||||
if fix(fullname): bad = 1
|
||||
for fullname in subdirs:
|
||||
if recursedown(fullname): bad = 1
|
||||
return bad
|
||||
|
||||
def fix(filename):
|
||||
## dbg('fix(%r)\n' % (filename,))
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, msg:
|
||||
err('%s: cannot open: %r\n' % (filename, msg))
|
||||
return 1
|
||||
head, tail = os.path.split(filename)
|
||||
tempname = os.path.join(head, '@' + tail)
|
||||
g = None
|
||||
# If we find a match, we rewind the file and start over but
|
||||
# now copy everything to a temp file.
|
||||
lineno = 0
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
if g is None and '\0' in line:
|
||||
# Check for binary files
|
||||
err(filename + ': contains null bytes; not fixed\n')
|
||||
f.close()
|
||||
return 1
|
||||
if lineno == 1 and g is None and line[:2] == '#!':
|
||||
# Check for non-Python scripts
|
||||
words = line[2:].split()
|
||||
if words and re.search('[pP]ython', words[0]) < 0:
|
||||
msg = filename + ': ' + words[0]
|
||||
msg = msg + ' script; not fixed\n'
|
||||
err(msg)
|
||||
f.close()
|
||||
return 1
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = f.readline()
|
||||
if not nextline: break
|
||||
line = line + nextline
|
||||
lineno = lineno + 1
|
||||
newline = fixline(line)
|
||||
if newline != line:
|
||||
if g is None:
|
||||
try:
|
||||
g = open(tempname, 'w')
|
||||
except IOError, msg:
|
||||
f.close()
|
||||
err('%s: cannot create: %r\n' % (tempname, msg))
|
||||
return 1
|
||||
f.seek(0)
|
||||
lineno = 0
|
||||
rep(filename + ':\n')
|
||||
continue # restart from the beginning
|
||||
rep(repr(lineno) + '\n')
|
||||
rep('< ' + line)
|
||||
rep('> ' + newline)
|
||||
if g is not None:
|
||||
g.write(newline)
|
||||
|
||||
# End of file
|
||||
f.close()
|
||||
if not g: return 0 # No changes
|
||||
|
||||
# Finishing touch -- move files
|
||||
|
||||
# First copy the file's mode to the temp file
|
||||
try:
|
||||
statbuf = os.stat(filename)
|
||||
os.chmod(tempname, statbuf[ST_MODE] & 07777)
|
||||
except os.error, msg:
|
||||
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
|
||||
# Then make a backup of the original file as filename~
|
||||
try:
|
||||
os.rename(filename, filename + '~')
|
||||
except os.error, msg:
|
||||
err('%s: warning: backup failed (%r)\n' % (filename, msg))
|
||||
# Now move the temp file to the original file
|
||||
try:
|
||||
os.rename(tempname, filename)
|
||||
except os.error, msg:
|
||||
err('%s: rename failed (%r)\n' % (filename, msg))
|
||||
return 1
|
||||
# Return succes
|
||||
return 0
|
||||
|
||||
|
||||
fixpat = '^[ \t]+def +[a-zA-Z0-9_]+ *( *self *, *(( *(.*) *)) *) *:'
|
||||
fixprog = re.compile(fixpat)
|
||||
|
||||
def fixline(line):
|
||||
if fixprog.match(line) >= 0:
|
||||
(a, b), (c, d) = fixprog.regs[1:3]
|
||||
line = line[:a] + line[c:d] + line[b:]
|
||||
return line
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,66 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# mkreal
|
||||
#
|
||||
# turn a symlink to a directory into a real directory
|
||||
|
||||
import sys
|
||||
import os
|
||||
from stat import *
|
||||
|
||||
join = os.path.join
|
||||
|
||||
error = 'mkreal error'
|
||||
|
||||
BUFSIZE = 32*1024
|
||||
|
||||
def mkrealfile(name):
|
||||
st = os.stat(name) # Get the mode
|
||||
mode = S_IMODE(st[ST_MODE])
|
||||
linkto = os.readlink(name) # Make sure again it's a symlink
|
||||
f_in = open(name, 'r') # This ensures it's a file
|
||||
os.unlink(name)
|
||||
f_out = open(name, 'w')
|
||||
while 1:
|
||||
buf = f_in.read(BUFSIZE)
|
||||
if not buf: break
|
||||
f_out.write(buf)
|
||||
del f_out # Flush data to disk before changing mode
|
||||
os.chmod(name, mode)
|
||||
|
||||
def mkrealdir(name):
|
||||
st = os.stat(name) # Get the mode
|
||||
mode = S_IMODE(st[ST_MODE])
|
||||
linkto = os.readlink(name)
|
||||
files = os.listdir(name)
|
||||
os.unlink(name)
|
||||
os.mkdir(name, mode)
|
||||
os.chmod(name, mode)
|
||||
linkto = join(os.pardir, linkto)
|
||||
#
|
||||
for filename in files:
|
||||
if filename not in (os.curdir, os.pardir):
|
||||
os.symlink(join(linkto, filename), join(name, filename))
|
||||
|
||||
def main():
|
||||
sys.stdout = sys.stderr
|
||||
progname = os.path.basename(sys.argv[0])
|
||||
if progname == '-c': progname = 'mkreal'
|
||||
args = sys.argv[1:]
|
||||
if not args:
|
||||
print 'usage:', progname, 'path ...'
|
||||
sys.exit(2)
|
||||
status = 0
|
||||
for name in args:
|
||||
if not os.path.islink(name):
|
||||
print progname+':', name+':', 'not a symlink'
|
||||
status = 1
|
||||
else:
|
||||
if os.path.isdir(name):
|
||||
mkrealdir(name)
|
||||
else:
|
||||
mkrealfile(name)
|
||||
sys.exit(status)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
133
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/ndiff.py
Normal file
133
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/ndiff.py
Normal file
@ -0,0 +1,133 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Module ndiff version 1.7.0
|
||||
# Released to the public domain 08-Dec-2000,
|
||||
# by Tim Peters (tim.one@home.com).
|
||||
|
||||
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
|
||||
|
||||
# ndiff.py is now simply a front-end to the difflib.ndiff() function.
|
||||
# Originally, it contained the difflib.SequenceMatcher class as well.
|
||||
# This completes the raiding of reusable code from this formerly
|
||||
# self-contained script.
|
||||
|
||||
"""ndiff [-q] file1 file2
|
||||
or
|
||||
ndiff (-r1 | -r2) < ndiff_output > file1_or_file2
|
||||
|
||||
Print a human-friendly file difference report to stdout. Both inter-
|
||||
and intra-line differences are noted. In the second form, recreate file1
|
||||
(-r1) or file2 (-r2) on stdout, from an ndiff report on stdin.
|
||||
|
||||
In the first form, if -q ("quiet") is not specified, the first two lines
|
||||
of output are
|
||||
|
||||
-: file1
|
||||
+: file2
|
||||
|
||||
Each remaining line begins with a two-letter code:
|
||||
|
||||
"- " line unique to file1
|
||||
"+ " line unique to file2
|
||||
" " line common to both files
|
||||
"? " line not present in either input file
|
||||
|
||||
Lines beginning with "? " attempt to guide the eye to intraline
|
||||
differences, and were not present in either input file. These lines can be
|
||||
confusing if the source files contain tab characters.
|
||||
|
||||
The first file can be recovered by retaining only lines that begin with
|
||||
" " or "- ", and deleting those 2-character prefixes; use ndiff with -r1.
|
||||
|
||||
The second file can be recovered similarly, but by retaining only " " and
|
||||
"+ " lines; use ndiff with -r2; or, on Unix, the second file can be
|
||||
recovered by piping the output through
|
||||
|
||||
sed -n '/^[+ ] /s/^..//p'
|
||||
"""
|
||||
|
||||
__version__ = 1, 7, 0
|
||||
|
||||
import difflib, sys
|
||||
|
||||
def fail(msg):
|
||||
out = sys.stderr.write
|
||||
out(msg + "\n\n")
|
||||
out(__doc__)
|
||||
return 0
|
||||
|
||||
# open a file & return the file object; gripe and return 0 if it
|
||||
# couldn't be opened
|
||||
def fopen(fname):
|
||||
try:
|
||||
return open(fname, 'U')
|
||||
except IOError, detail:
|
||||
return fail("couldn't open " + fname + ": " + str(detail))
|
||||
|
||||
# open two files & spray the diff to stdout; return false iff a problem
|
||||
def fcompare(f1name, f2name):
|
||||
f1 = fopen(f1name)
|
||||
f2 = fopen(f2name)
|
||||
if not f1 or not f2:
|
||||
return 0
|
||||
|
||||
a = f1.readlines(); f1.close()
|
||||
b = f2.readlines(); f2.close()
|
||||
for line in difflib.ndiff(a, b):
|
||||
print line,
|
||||
|
||||
return 1
|
||||
|
||||
# crack args (sys.argv[1:] is normal) & compare;
|
||||
# return false iff a problem
|
||||
|
||||
def main(args):
|
||||
import getopt
|
||||
try:
|
||||
opts, args = getopt.getopt(args, "qr:")
|
||||
except getopt.error, detail:
|
||||
return fail(str(detail))
|
||||
noisy = 1
|
||||
qseen = rseen = 0
|
||||
for opt, val in opts:
|
||||
if opt == "-q":
|
||||
qseen = 1
|
||||
noisy = 0
|
||||
elif opt == "-r":
|
||||
rseen = 1
|
||||
whichfile = val
|
||||
if qseen and rseen:
|
||||
return fail("can't specify both -q and -r")
|
||||
if rseen:
|
||||
if args:
|
||||
return fail("no args allowed with -r option")
|
||||
if whichfile in ("1", "2"):
|
||||
restore(whichfile)
|
||||
return 1
|
||||
return fail("-r value must be 1 or 2")
|
||||
if len(args) != 2:
|
||||
return fail("need 2 filename args")
|
||||
f1name, f2name = args
|
||||
if noisy:
|
||||
print '-:', f1name
|
||||
print '+:', f2name
|
||||
return fcompare(f1name, f2name)
|
||||
|
||||
# read ndiff output from stdin, and print file1 (which=='1') or
|
||||
# file2 (which=='2') to stdout
|
||||
|
||||
def restore(which):
|
||||
restored = difflib.restore(sys.stdin.readlines(), which)
|
||||
sys.stdout.writelines(restored)
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = sys.argv[1:]
|
||||
if "-profile" in args:
|
||||
import profile, pstats
|
||||
args.remove("-profile")
|
||||
statf = "ndiff.pro"
|
||||
profile.run("main(args)", statf)
|
||||
stats = pstats.Stats(statf)
|
||||
stats.strip_dirs().sort_stats('time').print_stats()
|
||||
else:
|
||||
main(args)
|
103
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/nm2def.py
Normal file
103
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/nm2def.py
Normal file
@ -0,0 +1,103 @@
|
||||
#! /usr/bin/env python
|
||||
"""nm2def.py
|
||||
|
||||
Helpers to extract symbols from Unix libs and auto-generate
|
||||
Windows definition files from them. Depends on nm(1). Tested
|
||||
on Linux and Solaris only (-p option to nm is for Solaris only).
|
||||
|
||||
By Marc-Andre Lemburg, Aug 1998.
|
||||
|
||||
Additional notes: the output of nm is supposed to look like this:
|
||||
|
||||
acceler.o:
|
||||
000001fd T PyGrammar_AddAccelerators
|
||||
U PyGrammar_FindDFA
|
||||
00000237 T PyGrammar_RemoveAccelerators
|
||||
U _IO_stderr_
|
||||
U exit
|
||||
U fprintf
|
||||
U free
|
||||
U malloc
|
||||
U printf
|
||||
|
||||
grammar1.o:
|
||||
00000000 T PyGrammar_FindDFA
|
||||
00000034 T PyGrammar_LabelRepr
|
||||
U _PyParser_TokenNames
|
||||
U abort
|
||||
U printf
|
||||
U sprintf
|
||||
|
||||
...
|
||||
|
||||
Even if this isn't the default output of your nm, there is generally an
|
||||
option to produce this format (since it is the original v7 Unix format).
|
||||
|
||||
"""
|
||||
import os, sys
|
||||
|
||||
PYTHONLIB = 'libpython'+sys.version[:3]+'.a'
|
||||
PC_PYTHONLIB = 'Python'+sys.version[0]+sys.version[2]+'.dll'
|
||||
NM = 'nm -p -g %s' # For Linux, use "nm -g %s"
|
||||
|
||||
def symbols(lib=PYTHONLIB,types=('T','C','D')):
|
||||
|
||||
lines = os.popen(NM % lib).readlines()
|
||||
lines = [s.strip() for s in lines]
|
||||
symbols = {}
|
||||
for line in lines:
|
||||
if len(line) == 0 or ':' in line:
|
||||
continue
|
||||
items = line.split()
|
||||
if len(items) != 3:
|
||||
continue
|
||||
address, type, name = items
|
||||
if type not in types:
|
||||
continue
|
||||
symbols[name] = address,type
|
||||
return symbols
|
||||
|
||||
def export_list(symbols):
|
||||
|
||||
data = []
|
||||
code = []
|
||||
for name,(addr,type) in symbols.items():
|
||||
if type in ('C','D'):
|
||||
data.append('\t'+name)
|
||||
else:
|
||||
code.append('\t'+name)
|
||||
data.sort()
|
||||
data.append('')
|
||||
code.sort()
|
||||
return ' DATA\n'.join(data)+'\n'+'\n'.join(code)
|
||||
|
||||
# Definition file template
|
||||
DEF_TEMPLATE = """\
|
||||
EXPORTS
|
||||
%s
|
||||
"""
|
||||
|
||||
# Special symbols that have to be included even though they don't
|
||||
# pass the filter
|
||||
SPECIALS = (
|
||||
)
|
||||
|
||||
def filter_Python(symbols,specials=SPECIALS):
|
||||
|
||||
for name in symbols.keys():
|
||||
if name[:2] == 'Py' or name[:3] == '_Py':
|
||||
pass
|
||||
elif name not in specials:
|
||||
del symbols[name]
|
||||
|
||||
def main():
|
||||
|
||||
s = symbols(PYTHONLIB)
|
||||
filter_Python(s)
|
||||
exports = export_list(s)
|
||||
f = sys.stdout # open('PC/python_nt.def','w')
|
||||
f.write(DEF_TEMPLATE % (exports))
|
||||
f.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,215 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# objgraph
|
||||
#
|
||||
# Read "nm -o" input (on IRIX: "nm -Bo") of a set of libraries or modules
|
||||
# and print various interesting listings, such as:
|
||||
#
|
||||
# - which names are used but not defined in the set (and used where),
|
||||
# - which names are defined in the set (and where),
|
||||
# - which modules use which other modules,
|
||||
# - which modules are used by which other modules.
|
||||
#
|
||||
# Usage: objgraph [-cdu] [file] ...
|
||||
# -c: print callers per objectfile
|
||||
# -d: print callees per objectfile
|
||||
# -u: print usage of undefined symbols
|
||||
# If none of -cdu is specified, all are assumed.
|
||||
# Use "nm -o" to generate the input (on IRIX: "nm -Bo"),
|
||||
# e.g.: nm -o /lib/libc.a | objgraph
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
import getopt
|
||||
import re
|
||||
|
||||
# Types of symbols.
|
||||
#
|
||||
definitions = 'TRGDSBAEC'
|
||||
externals = 'UV'
|
||||
ignore = 'Nntrgdsbavuc'
|
||||
|
||||
# Regular expression to parse "nm -o" output.
|
||||
#
|
||||
matcher = re.compile('(.*):\t?........ (.) (.*)$')
|
||||
|
||||
# Store "item" in "dict" under "key".
|
||||
# The dictionary maps keys to lists of items.
|
||||
# If there is no list for the key yet, it is created.
|
||||
#
|
||||
def store(dict, key, item):
|
||||
if dict.has_key(key):
|
||||
dict[key].append(item)
|
||||
else:
|
||||
dict[key] = [item]
|
||||
|
||||
# Return a flattened version of a list of strings: the concatenation
|
||||
# of its elements with intervening spaces.
|
||||
#
|
||||
def flat(list):
|
||||
s = ''
|
||||
for item in list:
|
||||
s = s + ' ' + item
|
||||
return s[1:]
|
||||
|
||||
# Global variables mapping defined/undefined names to files and back.
|
||||
#
|
||||
file2undef = {}
|
||||
def2file = {}
|
||||
file2def = {}
|
||||
undef2file = {}
|
||||
|
||||
# Read one input file and merge the data into the tables.
|
||||
# Argument is an open file.
|
||||
#
|
||||
def readinput(fp):
|
||||
while 1:
|
||||
s = fp.readline()
|
||||
if not s:
|
||||
break
|
||||
# If you get any output from this line,
|
||||
# it is probably caused by an unexpected input line:
|
||||
if matcher.search(s) < 0: s; continue # Shouldn't happen
|
||||
(ra, rb), (r1a, r1b), (r2a, r2b), (r3a, r3b) = matcher.regs[:4]
|
||||
fn, name, type = s[r1a:r1b], s[r3a:r3b], s[r2a:r2b]
|
||||
if type in definitions:
|
||||
store(def2file, name, fn)
|
||||
store(file2def, fn, name)
|
||||
elif type in externals:
|
||||
store(file2undef, fn, name)
|
||||
store(undef2file, name, fn)
|
||||
elif not type in ignore:
|
||||
print fn + ':' + name + ': unknown type ' + type
|
||||
|
||||
# Print all names that were undefined in some module and where they are
|
||||
# defined.
|
||||
#
|
||||
def printcallee():
|
||||
flist = file2undef.keys()
|
||||
flist.sort()
|
||||
for filename in flist:
|
||||
print filename + ':'
|
||||
elist = file2undef[filename]
|
||||
elist.sort()
|
||||
for ext in elist:
|
||||
if len(ext) >= 8:
|
||||
tabs = '\t'
|
||||
else:
|
||||
tabs = '\t\t'
|
||||
if not def2file.has_key(ext):
|
||||
print '\t' + ext + tabs + ' *undefined'
|
||||
else:
|
||||
print '\t' + ext + tabs + flat(def2file[ext])
|
||||
|
||||
# Print for each module the names of the other modules that use it.
|
||||
#
|
||||
def printcaller():
|
||||
files = file2def.keys()
|
||||
files.sort()
|
||||
for filename in files:
|
||||
callers = []
|
||||
for label in file2def[filename]:
|
||||
if undef2file.has_key(label):
|
||||
callers = callers + undef2file[label]
|
||||
if callers:
|
||||
callers.sort()
|
||||
print filename + ':'
|
||||
lastfn = ''
|
||||
for fn in callers:
|
||||
if fn <> lastfn:
|
||||
print '\t' + fn
|
||||
lastfn = fn
|
||||
else:
|
||||
print filename + ': unused'
|
||||
|
||||
# Print undefined names and where they are used.
|
||||
#
|
||||
def printundef():
|
||||
undefs = {}
|
||||
for filename in file2undef.keys():
|
||||
for ext in file2undef[filename]:
|
||||
if not def2file.has_key(ext):
|
||||
store(undefs, ext, filename)
|
||||
elist = undefs.keys()
|
||||
elist.sort()
|
||||
for ext in elist:
|
||||
print ext + ':'
|
||||
flist = undefs[ext]
|
||||
flist.sort()
|
||||
for filename in flist:
|
||||
print '\t' + filename
|
||||
|
||||
# Print warning messages about names defined in more than one file.
|
||||
#
|
||||
def warndups():
|
||||
savestdout = sys.stdout
|
||||
sys.stdout = sys.stderr
|
||||
names = def2file.keys()
|
||||
names.sort()
|
||||
for name in names:
|
||||
if len(def2file[name]) > 1:
|
||||
print 'warning:', name, 'multiply defined:',
|
||||
print flat(def2file[name])
|
||||
sys.stdout = savestdout
|
||||
|
||||
# Main program
|
||||
#
|
||||
def main():
|
||||
try:
|
||||
optlist, args = getopt.getopt(sys.argv[1:], 'cdu')
|
||||
except getopt.error:
|
||||
sys.stdout = sys.stderr
|
||||
print 'Usage:', os.path.basename(sys.argv[0]),
|
||||
print '[-cdu] [file] ...'
|
||||
print '-c: print callers per objectfile'
|
||||
print '-d: print callees per objectfile'
|
||||
print '-u: print usage of undefined symbols'
|
||||
print 'If none of -cdu is specified, all are assumed.'
|
||||
print 'Use "nm -o" to generate the input (on IRIX: "nm -Bo"),'
|
||||
print 'e.g.: nm -o /lib/libc.a | objgraph'
|
||||
return 1
|
||||
optu = optc = optd = 0
|
||||
for opt, void in optlist:
|
||||
if opt == '-u':
|
||||
optu = 1
|
||||
elif opt == '-c':
|
||||
optc = 1
|
||||
elif opt == '-d':
|
||||
optd = 1
|
||||
if optu == optc == optd == 0:
|
||||
optu = optc = optd = 1
|
||||
if not args:
|
||||
args = ['-']
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
readinput(sys.stdin)
|
||||
else:
|
||||
readinput(open(filename, 'r'))
|
||||
#
|
||||
warndups()
|
||||
#
|
||||
more = (optu + optc + optd > 1)
|
||||
if optd:
|
||||
if more:
|
||||
print '---------------All callees------------------'
|
||||
printcallee()
|
||||
if optu:
|
||||
if more:
|
||||
print '---------------Undefined callees------------'
|
||||
printundef()
|
||||
if optc:
|
||||
if more:
|
||||
print '---------------All Callers------------------'
|
||||
printcaller()
|
||||
return 0
|
||||
|
||||
# Call the main program.
|
||||
# Use its return value as exit status.
|
||||
# Catch interrupts to avoid stack trace.
|
||||
#
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
sys.exit(main())
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python
|
||||
""" Utility for parsing HTML entity definitions available from:
|
||||
|
||||
http://www.w3.org/ as e.g.
|
||||
http://www.w3.org/TR/REC-html40/HTMLlat1.ent
|
||||
|
||||
Input is read from stdin, output is written to stdout in form of a
|
||||
Python snippet defining a dictionary "entitydefs" mapping literal
|
||||
entity name to character or numeric entity.
|
||||
|
||||
Marc-Andre Lemburg, mal@lemburg.com, 1999.
|
||||
Use as you like. NO WARRANTIES.
|
||||
|
||||
"""
|
||||
import re,sys
|
||||
import TextTools
|
||||
|
||||
entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
|
||||
|
||||
def parse(text,pos=0,endpos=None):
|
||||
|
||||
pos = 0
|
||||
if endpos is None:
|
||||
endpos = len(text)
|
||||
d = {}
|
||||
while 1:
|
||||
m = entityRE.search(text,pos,endpos)
|
||||
if not m:
|
||||
break
|
||||
name,charcode,comment = m.groups()
|
||||
d[name] = charcode,comment
|
||||
pos = m.end()
|
||||
return d
|
||||
|
||||
def writefile(f,defs):
|
||||
|
||||
f.write("entitydefs = {\n")
|
||||
items = defs.items()
|
||||
items.sort()
|
||||
for name,(charcode,comment) in items:
|
||||
if charcode[:2] == '&#':
|
||||
code = int(charcode[2:-1])
|
||||
if code < 256:
|
||||
charcode = "'\%o'" % code
|
||||
else:
|
||||
charcode = repr(charcode)
|
||||
else:
|
||||
charcode = repr(charcode)
|
||||
comment = TextTools.collapse(comment)
|
||||
f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment))
|
||||
f.write('\n}\n')
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) > 1:
|
||||
infile = open(sys.argv[1])
|
||||
else:
|
||||
infile = sys.stdin
|
||||
if len(sys.argv) > 2:
|
||||
outfile = open(sys.argv[2],'w')
|
||||
else:
|
||||
outfile = sys.stdout
|
||||
text = infile.read()
|
||||
defs = parse(text)
|
||||
writefile(outfile,defs)
|
@ -0,0 +1,158 @@
|
||||
import re
|
||||
import sys
|
||||
import shutil
|
||||
import os.path
|
||||
import subprocess
|
||||
|
||||
import reindent
|
||||
import untabify
|
||||
|
||||
|
||||
def n_files_str(count):
|
||||
"""Return 'N file(s)' with the proper plurality on 'file'."""
|
||||
return "{} file{}".format(count, "s" if count != 1 else "")
|
||||
|
||||
|
||||
def status(message, modal=False, info=None):
|
||||
"""Decorator to output status info to stdout."""
|
||||
def decorated_fxn(fxn):
|
||||
def call_fxn(*args, **kwargs):
|
||||
sys.stdout.write(message + ' ... ')
|
||||
sys.stdout.flush()
|
||||
result = fxn(*args, **kwargs)
|
||||
if not modal and not info:
|
||||
print "done"
|
||||
elif info:
|
||||
print info(result)
|
||||
else:
|
||||
print "yes" if result else "NO"
|
||||
return result
|
||||
return call_fxn
|
||||
return decorated_fxn
|
||||
|
||||
|
||||
@status("Getting the list of files that have been added/changed",
|
||||
info=lambda x: n_files_str(len(x)))
|
||||
def changed_files():
|
||||
"""Get the list of changed or added files from the VCS."""
|
||||
if os.path.isdir('.hg'):
|
||||
vcs = 'hg'
|
||||
cmd = 'hg status --added --modified --no-status'
|
||||
elif os.path.isdir('.svn'):
|
||||
vcs = 'svn'
|
||||
cmd = 'svn status --quiet --non-interactive --ignore-externals'
|
||||
else:
|
||||
sys.exit('need a checkout to get modified files')
|
||||
|
||||
st = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
|
||||
try:
|
||||
st.wait()
|
||||
if vcs == 'hg':
|
||||
return [x.decode().rstrip() for x in st.stdout]
|
||||
else:
|
||||
output = (x.decode().rstrip().rsplit(None, 1)[-1]
|
||||
for x in st.stdout if x[0] in 'AM')
|
||||
return set(path for path in output if os.path.isfile(path))
|
||||
finally:
|
||||
st.stdout.close()
|
||||
|
||||
|
||||
def report_modified_files(file_paths):
|
||||
count = len(file_paths)
|
||||
if count == 0:
|
||||
return n_files_str(count)
|
||||
else:
|
||||
lines = ["{}:".format(n_files_str(count))]
|
||||
for path in file_paths:
|
||||
lines.append(" {}".format(path))
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
@status("Fixing whitespace", info=report_modified_files)
|
||||
def normalize_whitespace(file_paths):
|
||||
"""Make sure that the whitespace for .py files have been normalized."""
|
||||
reindent.makebackup = False # No need to create backups.
|
||||
fixed = []
|
||||
for path in (x for x in file_paths if x.endswith('.py')):
|
||||
if reindent.check(path):
|
||||
fixed.append(path)
|
||||
return fixed
|
||||
|
||||
|
||||
@status("Fixing C file whitespace", info=report_modified_files)
|
||||
def normalize_c_whitespace(file_paths):
|
||||
"""Report if any C files """
|
||||
fixed = []
|
||||
for path in file_paths:
|
||||
with open(path, 'r') as f:
|
||||
if '\t' not in f.read():
|
||||
continue
|
||||
untabify.process(path, 8, verbose=False)
|
||||
fixed.append(path)
|
||||
return fixed
|
||||
|
||||
|
||||
ws_re = re.compile(br'\s+(\r?\n)$')
|
||||
|
||||
@status("Fixing docs whitespace", info=report_modified_files)
|
||||
def normalize_docs_whitespace(file_paths):
|
||||
fixed = []
|
||||
for path in file_paths:
|
||||
try:
|
||||
with open(path, 'rb') as f:
|
||||
lines = f.readlines()
|
||||
new_lines = [ws_re.sub(br'\1', line) for line in lines]
|
||||
if new_lines != lines:
|
||||
shutil.copyfile(path, path + '.bak')
|
||||
with open(path, 'wb') as f:
|
||||
f.writelines(new_lines)
|
||||
fixed.append(path)
|
||||
except Exception as err:
|
||||
print 'Cannot fix %s: %s' % (path, err)
|
||||
return fixed
|
||||
|
||||
|
||||
@status("Docs modified", modal=True)
|
||||
def docs_modified(file_paths):
|
||||
"""Report if any file in the Doc directory has been changed."""
|
||||
return bool(file_paths)
|
||||
|
||||
|
||||
@status("Misc/ACKS updated", modal=True)
|
||||
def credit_given(file_paths):
|
||||
"""Check if Misc/ACKS has been changed."""
|
||||
return 'Misc/ACKS' in file_paths
|
||||
|
||||
|
||||
@status("Misc/NEWS updated", modal=True)
|
||||
def reported_news(file_paths):
|
||||
"""Check if Misc/NEWS has been changed."""
|
||||
return 'Misc/NEWS' in file_paths
|
||||
|
||||
|
||||
def main():
|
||||
file_paths = changed_files()
|
||||
python_files = [fn for fn in file_paths if fn.endswith('.py')]
|
||||
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
|
||||
doc_files = [fn for fn in file_paths if fn.startswith('Doc')]
|
||||
special_files = {'Misc/ACKS', 'Misc/NEWS'} & set(file_paths)
|
||||
# PEP 8 whitespace rules enforcement.
|
||||
normalize_whitespace(python_files)
|
||||
# C rules enforcement.
|
||||
normalize_c_whitespace(c_files)
|
||||
# Doc whitespace enforcement.
|
||||
normalize_docs_whitespace(doc_files)
|
||||
# Docs updated.
|
||||
docs_modified(doc_files)
|
||||
# Misc/ACKS changed.
|
||||
credit_given(special_files)
|
||||
# Misc/NEWS changed.
|
||||
reported_news(special_files)
|
||||
|
||||
# Test suite run and passed.
|
||||
print
|
||||
print "Did you run the test suite?"
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
149
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/pathfix.py
Normal file
149
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/pathfix.py
Normal file
@ -0,0 +1,149 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Change the #! line occurring in Python scripts. The new interpreter
|
||||
# pathname must be given with a -i option.
|
||||
#
|
||||
# Command line arguments are files or directories to be processed.
|
||||
# Directories are searched recursively for files whose name looks
|
||||
# like a python module.
|
||||
# Symbolic links are always ignored (except as explicit directory
|
||||
# arguments). Of course, the original file is kept as a back-up
|
||||
# (with a "~" attached to its name).
|
||||
#
|
||||
# Undoubtedly you can do this using find and sed or perl, but this is
|
||||
# a nice example of Python code that recurses down a directory tree
|
||||
# and uses regular expressions. Also note several subtleties like
|
||||
# preserving the file's mode and avoiding to even write a temp file
|
||||
# when no changes are needed for a file.
|
||||
#
|
||||
# NB: by changing only the function fixfile() you can turn this
|
||||
# into a program for a different change to Python programs...
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
from stat import *
|
||||
import getopt
|
||||
|
||||
err = sys.stderr.write
|
||||
dbg = err
|
||||
rep = sys.stdout.write
|
||||
|
||||
new_interpreter = None
|
||||
|
||||
def main():
|
||||
global new_interpreter
|
||||
usage = ('usage: %s -i /interpreter file-or-directory ...\n' %
|
||||
sys.argv[0])
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'i:')
|
||||
except getopt.error, msg:
|
||||
err(msg + '\n')
|
||||
err(usage)
|
||||
sys.exit(2)
|
||||
for o, a in opts:
|
||||
if o == '-i':
|
||||
new_interpreter = a
|
||||
if not new_interpreter or new_interpreter[0] != '/' or not args:
|
||||
err('-i option or file-or-directory missing\n')
|
||||
err(usage)
|
||||
sys.exit(2)
|
||||
bad = 0
|
||||
for arg in args:
|
||||
if os.path.isdir(arg):
|
||||
if recursedown(arg): bad = 1
|
||||
elif os.path.islink(arg):
|
||||
err(arg + ': will not process symbolic links\n')
|
||||
bad = 1
|
||||
else:
|
||||
if fix(arg): bad = 1
|
||||
sys.exit(bad)
|
||||
|
||||
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
|
||||
def ispython(name):
|
||||
return ispythonprog.match(name) >= 0
|
||||
|
||||
def recursedown(dirname):
|
||||
dbg('recursedown(%r)\n' % (dirname,))
|
||||
bad = 0
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except os.error, msg:
|
||||
err('%s: cannot list directory: %r\n' % (dirname, msg))
|
||||
return 1
|
||||
names.sort()
|
||||
subdirs = []
|
||||
for name in names:
|
||||
if name in (os.curdir, os.pardir): continue
|
||||
fullname = os.path.join(dirname, name)
|
||||
if os.path.islink(fullname): pass
|
||||
elif os.path.isdir(fullname):
|
||||
subdirs.append(fullname)
|
||||
elif ispython(name):
|
||||
if fix(fullname): bad = 1
|
||||
for fullname in subdirs:
|
||||
if recursedown(fullname): bad = 1
|
||||
return bad
|
||||
|
||||
def fix(filename):
|
||||
## dbg('fix(%r)\n' % (filename,))
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, msg:
|
||||
err('%s: cannot open: %r\n' % (filename, msg))
|
||||
return 1
|
||||
line = f.readline()
|
||||
fixed = fixline(line)
|
||||
if line == fixed:
|
||||
rep(filename+': no change\n')
|
||||
f.close()
|
||||
return
|
||||
head, tail = os.path.split(filename)
|
||||
tempname = os.path.join(head, '@' + tail)
|
||||
try:
|
||||
g = open(tempname, 'w')
|
||||
except IOError, msg:
|
||||
f.close()
|
||||
err('%s: cannot create: %r\n' % (tempname, msg))
|
||||
return 1
|
||||
rep(filename + ': updating\n')
|
||||
g.write(fixed)
|
||||
BUFSIZE = 8*1024
|
||||
while 1:
|
||||
buf = f.read(BUFSIZE)
|
||||
if not buf: break
|
||||
g.write(buf)
|
||||
g.close()
|
||||
f.close()
|
||||
|
||||
# Finishing touch -- move files
|
||||
|
||||
# First copy the file's mode to the temp file
|
||||
try:
|
||||
statbuf = os.stat(filename)
|
||||
os.chmod(tempname, statbuf[ST_MODE] & 07777)
|
||||
except os.error, msg:
|
||||
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
|
||||
# Then make a backup of the original file as filename~
|
||||
try:
|
||||
os.rename(filename, filename + '~')
|
||||
except os.error, msg:
|
||||
err('%s: warning: backup failed (%r)\n' % (filename, msg))
|
||||
# Now move the temp file to the original file
|
||||
try:
|
||||
os.rename(tempname, filename)
|
||||
except os.error, msg:
|
||||
err('%s: rename failed (%r)\n' % (filename, msg))
|
||||
return 1
|
||||
# Return succes
|
||||
return 0
|
||||
|
||||
def fixline(line):
|
||||
if not line.startswith('#!'):
|
||||
return line
|
||||
if "python" not in line:
|
||||
return line
|
||||
return '#! %s\n' % new_interpreter
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
167
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/pdeps.py
Normal file
167
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/pdeps.py
Normal file
@ -0,0 +1,167 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# pdeps
|
||||
#
|
||||
# Find dependencies between a bunch of Python modules.
|
||||
#
|
||||
# Usage:
|
||||
# pdeps file1.py file2.py ...
|
||||
#
|
||||
# Output:
|
||||
# Four tables separated by lines like '--- Closure ---':
|
||||
# 1) Direct dependencies, listing which module imports which other modules
|
||||
# 2) The inverse of (1)
|
||||
# 3) Indirect dependencies, or the closure of the above
|
||||
# 4) The inverse of (3)
|
||||
#
|
||||
# To do:
|
||||
# - command line options to select output type
|
||||
# - option to automatically scan the Python library for referenced modules
|
||||
# - option to limit output to particular modules
|
||||
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
# Main program
|
||||
#
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
if not args:
|
||||
print 'usage: pdeps file.py file.py ...'
|
||||
return 2
|
||||
#
|
||||
table = {}
|
||||
for arg in args:
|
||||
process(arg, table)
|
||||
#
|
||||
print '--- Uses ---'
|
||||
printresults(table)
|
||||
#
|
||||
print '--- Used By ---'
|
||||
inv = inverse(table)
|
||||
printresults(inv)
|
||||
#
|
||||
print '--- Closure of Uses ---'
|
||||
reach = closure(table)
|
||||
printresults(reach)
|
||||
#
|
||||
print '--- Closure of Used By ---'
|
||||
invreach = inverse(reach)
|
||||
printresults(invreach)
|
||||
#
|
||||
return 0
|
||||
|
||||
|
||||
# Compiled regular expressions to search for import statements
|
||||
#
|
||||
m_import = re.compile('^[ \t]*from[ \t]+([^ \t]+)[ \t]+')
|
||||
m_from = re.compile('^[ \t]*import[ \t]+([^#]+)')
|
||||
|
||||
|
||||
# Collect data from one file
|
||||
#
|
||||
def process(filename, table):
|
||||
fp = open(filename, 'r')
|
||||
mod = os.path.basename(filename)
|
||||
if mod[-3:] == '.py':
|
||||
mod = mod[:-3]
|
||||
table[mod] = list = []
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line: break
|
||||
while line[-1:] == '\\':
|
||||
nextline = fp.readline()
|
||||
if not nextline: break
|
||||
line = line[:-1] + nextline
|
||||
if m_import.match(line) >= 0:
|
||||
(a, b), (a1, b1) = m_import.regs[:2]
|
||||
elif m_from.match(line) >= 0:
|
||||
(a, b), (a1, b1) = m_from.regs[:2]
|
||||
else: continue
|
||||
words = line[a1:b1].split(',')
|
||||
# print '#', line, words
|
||||
for word in words:
|
||||
word = word.strip()
|
||||
if word not in list:
|
||||
list.append(word)
|
||||
|
||||
|
||||
# Compute closure (this is in fact totally general)
|
||||
#
|
||||
def closure(table):
|
||||
modules = table.keys()
|
||||
#
|
||||
# Initialize reach with a copy of table
|
||||
#
|
||||
reach = {}
|
||||
for mod in modules:
|
||||
reach[mod] = table[mod][:]
|
||||
#
|
||||
# Iterate until no more change
|
||||
#
|
||||
change = 1
|
||||
while change:
|
||||
change = 0
|
||||
for mod in modules:
|
||||
for mo in reach[mod]:
|
||||
if mo in modules:
|
||||
for m in reach[mo]:
|
||||
if m not in reach[mod]:
|
||||
reach[mod].append(m)
|
||||
change = 1
|
||||
#
|
||||
return reach
|
||||
|
||||
|
||||
# Invert a table (this is again totally general).
|
||||
# All keys of the original table are made keys of the inverse,
|
||||
# so there may be empty lists in the inverse.
|
||||
#
|
||||
def inverse(table):
|
||||
inv = {}
|
||||
for key in table.keys():
|
||||
if not inv.has_key(key):
|
||||
inv[key] = []
|
||||
for item in table[key]:
|
||||
store(inv, item, key)
|
||||
return inv
|
||||
|
||||
|
||||
# Store "item" in "dict" under "key".
|
||||
# The dictionary maps keys to lists of items.
|
||||
# If there is no list for the key yet, it is created.
|
||||
#
|
||||
def store(dict, key, item):
|
||||
if dict.has_key(key):
|
||||
dict[key].append(item)
|
||||
else:
|
||||
dict[key] = [item]
|
||||
|
||||
|
||||
# Tabulate results neatly
|
||||
#
|
||||
def printresults(table):
|
||||
modules = table.keys()
|
||||
maxlen = 0
|
||||
for mod in modules: maxlen = max(maxlen, len(mod))
|
||||
modules.sort()
|
||||
for mod in modules:
|
||||
list = table[mod]
|
||||
list.sort()
|
||||
print mod.ljust(maxlen), ':',
|
||||
if mod in list:
|
||||
print '(*)',
|
||||
for ref in list:
|
||||
print ref,
|
||||
print
|
||||
|
||||
|
||||
# Call main and honor exit status
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
sys.exit(main())
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Synopsis: %(prog)s [-h|-b|-g|-r|-a|-d] [ picklefile ] dbfile
|
||||
|
||||
Read the given picklefile as a series of key/value pairs and write to a new
|
||||
database. If the database already exists, any contents are deleted. The
|
||||
optional flags indicate the type of the output database:
|
||||
|
||||
-a - open using anydbm
|
||||
-b - open as bsddb btree file
|
||||
-d - open as dbm file
|
||||
-g - open as gdbm file
|
||||
-h - open as bsddb hash file
|
||||
-r - open as bsddb recno file
|
||||
|
||||
The default is hash. If a pickle file is named it is opened for read
|
||||
access. If no pickle file is named, the pickle input is read from standard
|
||||
input.
|
||||
|
||||
Note that recno databases can only contain integer keys, so you can't dump a
|
||||
hash or btree database using db2pickle.py and reconstitute it to a recno
|
||||
database with %(prog)s unless your keys are integers.
|
||||
|
||||
"""
|
||||
|
||||
import getopt
|
||||
try:
|
||||
import bsddb
|
||||
except ImportError:
|
||||
bsddb = None
|
||||
try:
|
||||
import dbm
|
||||
except ImportError:
|
||||
dbm = None
|
||||
try:
|
||||
import gdbm
|
||||
except ImportError:
|
||||
gdbm = None
|
||||
try:
|
||||
import anydbm
|
||||
except ImportError:
|
||||
anydbm = None
|
||||
import sys
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
prog = sys.argv[0]
|
||||
|
||||
def usage():
|
||||
sys.stderr.write(__doc__ % globals())
|
||||
|
||||
def main(args):
|
||||
try:
|
||||
opts, args = getopt.getopt(args, "hbrdag",
|
||||
["hash", "btree", "recno", "dbm", "anydbm",
|
||||
"gdbm"])
|
||||
except getopt.error:
|
||||
usage()
|
||||
return 1
|
||||
|
||||
if len(args) == 0 or len(args) > 2:
|
||||
usage()
|
||||
return 1
|
||||
elif len(args) == 1:
|
||||
pfile = sys.stdin
|
||||
dbfile = args[0]
|
||||
else:
|
||||
try:
|
||||
pfile = open(args[0], 'rb')
|
||||
except IOError:
|
||||
sys.stderr.write("Unable to open %s\n" % args[0])
|
||||
return 1
|
||||
dbfile = args[1]
|
||||
|
||||
dbopen = None
|
||||
for opt, arg in opts:
|
||||
if opt in ("-h", "--hash"):
|
||||
try:
|
||||
dbopen = bsddb.hashopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-b", "--btree"):
|
||||
try:
|
||||
dbopen = bsddb.btopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-r", "--recno"):
|
||||
try:
|
||||
dbopen = bsddb.rnopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-a", "--anydbm"):
|
||||
try:
|
||||
dbopen = anydbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("anydbm module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-g", "--gdbm"):
|
||||
try:
|
||||
dbopen = gdbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("gdbm module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-d", "--dbm"):
|
||||
try:
|
||||
dbopen = dbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("dbm module unavailable.\n")
|
||||
return 1
|
||||
if dbopen is None:
|
||||
if bsddb is None:
|
||||
sys.stderr.write("bsddb module unavailable - ")
|
||||
sys.stderr.write("must specify dbtype.\n")
|
||||
return 1
|
||||
else:
|
||||
dbopen = bsddb.hashopen
|
||||
|
||||
try:
|
||||
db = dbopen(dbfile, 'c')
|
||||
except bsddb.error:
|
||||
sys.stderr.write("Unable to open %s. " % dbfile)
|
||||
sys.stderr.write("Check for format or version mismatch.\n")
|
||||
return 1
|
||||
else:
|
||||
for k in db.keys():
|
||||
del db[k]
|
||||
|
||||
while 1:
|
||||
try:
|
||||
(key, val) = pickle.load(pfile)
|
||||
except EOFError:
|
||||
break
|
||||
db[key] = val
|
||||
|
||||
db.close()
|
||||
pfile.close()
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
542
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/pindent.py
Normal file
542
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/pindent.py
Normal file
@ -0,0 +1,542 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# This file contains a class and a main program that perform three
|
||||
# related (though complimentary) formatting operations on Python
|
||||
# programs. When called as "pindent -c", it takes a valid Python
|
||||
# program as input and outputs a version augmented with block-closing
|
||||
# comments. When called as "pindent -d", it assumes its input is a
|
||||
# Python program with block-closing comments and outputs a commentless
|
||||
# version. When called as "pindent -r" it assumes its input is a
|
||||
# Python program with block-closing comments but with its indentation
|
||||
# messed up, and outputs a properly indented version.
|
||||
|
||||
# A "block-closing comment" is a comment of the form '# end <keyword>'
|
||||
# where <keyword> is the keyword that opened the block. If the
|
||||
# opening keyword is 'def' or 'class', the function or class name may
|
||||
# be repeated in the block-closing comment as well. Here is an
|
||||
# example of a program fully augmented with block-closing comments:
|
||||
|
||||
# def foobar(a, b):
|
||||
# if a == b:
|
||||
# a = a+1
|
||||
# elif a < b:
|
||||
# b = b-1
|
||||
# if b > a: a = a-1
|
||||
# # end if
|
||||
# else:
|
||||
# print 'oops!'
|
||||
# # end if
|
||||
# # end def foobar
|
||||
|
||||
# Note that only the last part of an if...elif...else... block needs a
|
||||
# block-closing comment; the same is true for other compound
|
||||
# statements (e.g. try...except). Also note that "short-form" blocks
|
||||
# like the second 'if' in the example must be closed as well;
|
||||
# otherwise the 'else' in the example would be ambiguous (remember
|
||||
# that indentation is not significant when interpreting block-closing
|
||||
# comments).
|
||||
|
||||
# The operations are idempotent (i.e. applied to their own output
|
||||
# they yield an identical result). Running first "pindent -c" and
|
||||
# then "pindent -r" on a valid Python program produces a program that
|
||||
# is semantically identical to the input (though its indentation may
|
||||
# be different). Running "pindent -e" on that output produces a
|
||||
# program that only differs from the original in indentation.
|
||||
|
||||
# Other options:
|
||||
# -s stepsize: set the indentation step size (default 8)
|
||||
# -t tabsize : set the number of spaces a tab character is worth (default 8)
|
||||
# -e : expand TABs into spaces
|
||||
# file ... : input file(s) (default standard input)
|
||||
# The results always go to standard output
|
||||
|
||||
# Caveats:
|
||||
# - comments ending in a backslash will be mistaken for continued lines
|
||||
# - continuations using backslash are always left unchanged
|
||||
# - continuations inside parentheses are not extra indented by -r
|
||||
# but must be indented for -c to work correctly (this breaks
|
||||
# idempotency!)
|
||||
# - continued lines inside triple-quoted strings are totally garbled
|
||||
|
||||
# Secret feature:
|
||||
# - On input, a block may also be closed with an "end statement" --
|
||||
# this is a block-closing comment without the '#' sign.
|
||||
|
||||
# Possible improvements:
|
||||
# - check syntax based on transitions in 'next' table
|
||||
# - better error reporting
|
||||
# - better error recovery
|
||||
# - check identifier after class/def
|
||||
|
||||
# The following wishes need a more complete tokenization of the source:
|
||||
# - Don't get fooled by comments ending in backslash
|
||||
# - reindent continuation lines indicated by backslash
|
||||
# - handle continuation lines inside parentheses/braces/brackets
|
||||
# - handle triple quoted strings spanning lines
|
||||
# - realign comments
|
||||
# - optionally do much more thorough reformatting, a la C indent
|
||||
|
||||
# Defaults
|
||||
STEPSIZE = 8
|
||||
TABSIZE = 8
|
||||
EXPANDTABS = 0
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
next = {}
|
||||
next['if'] = next['elif'] = 'elif', 'else', 'end'
|
||||
next['while'] = next['for'] = 'else', 'end'
|
||||
next['try'] = 'except', 'finally'
|
||||
next['except'] = 'except', 'else', 'finally', 'end'
|
||||
next['else'] = next['finally'] = next['def'] = next['class'] = 'end'
|
||||
next['end'] = ()
|
||||
start = 'if', 'while', 'for', 'try', 'with', 'def', 'class'
|
||||
|
||||
class PythonIndenter:
|
||||
|
||||
def __init__(self, fpi = sys.stdin, fpo = sys.stdout,
|
||||
indentsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
self.fpi = fpi
|
||||
self.fpo = fpo
|
||||
self.indentsize = indentsize
|
||||
self.tabsize = tabsize
|
||||
self.lineno = 0
|
||||
self.expandtabs = expandtabs
|
||||
self._write = fpo.write
|
||||
self.kwprog = re.compile(
|
||||
r'^\s*(?P<kw>[a-z]+)'
|
||||
r'(\s+(?P<id>[a-zA-Z_]\w*))?'
|
||||
r'[^\w]')
|
||||
self.endprog = re.compile(
|
||||
r'^\s*#?\s*end\s+(?P<kw>[a-z]+)'
|
||||
r'(\s+(?P<id>[a-zA-Z_]\w*))?'
|
||||
r'[^\w]')
|
||||
self.wsprog = re.compile(r'^[ \t]*')
|
||||
# end def __init__
|
||||
|
||||
def write(self, line):
|
||||
if self.expandtabs:
|
||||
self._write(line.expandtabs(self.tabsize))
|
||||
else:
|
||||
self._write(line)
|
||||
# end if
|
||||
# end def write
|
||||
|
||||
def readline(self):
|
||||
line = self.fpi.readline()
|
||||
if line: self.lineno = self.lineno + 1
|
||||
# end if
|
||||
return line
|
||||
# end def readline
|
||||
|
||||
def error(self, fmt, *args):
|
||||
if args: fmt = fmt % args
|
||||
# end if
|
||||
sys.stderr.write('Error at line %d: %s\n' % (self.lineno, fmt))
|
||||
self.write('### %s ###\n' % fmt)
|
||||
# end def error
|
||||
|
||||
def getline(self):
|
||||
line = self.readline()
|
||||
while line[-2:] == '\\\n':
|
||||
line2 = self.readline()
|
||||
if not line2: break
|
||||
# end if
|
||||
line = line + line2
|
||||
# end while
|
||||
return line
|
||||
# end def getline
|
||||
|
||||
def putline(self, line, indent = None):
|
||||
if indent is None:
|
||||
self.write(line)
|
||||
return
|
||||
# end if
|
||||
tabs, spaces = divmod(indent*self.indentsize, self.tabsize)
|
||||
i = 0
|
||||
m = self.wsprog.match(line)
|
||||
if m: i = m.end()
|
||||
# end if
|
||||
self.write('\t'*tabs + ' '*spaces + line[i:])
|
||||
# end def putline
|
||||
|
||||
def reformat(self):
|
||||
stack = []
|
||||
while 1:
|
||||
line = self.getline()
|
||||
if not line: break # EOF
|
||||
# end if
|
||||
m = self.endprog.match(line)
|
||||
if m:
|
||||
kw = 'end'
|
||||
kw2 = m.group('kw')
|
||||
if not stack:
|
||||
self.error('unexpected end')
|
||||
elif stack[-1][0] != kw2:
|
||||
self.error('unmatched end')
|
||||
# end if
|
||||
del stack[-1:]
|
||||
self.putline(line, len(stack))
|
||||
continue
|
||||
# end if
|
||||
m = self.kwprog.match(line)
|
||||
if m:
|
||||
kw = m.group('kw')
|
||||
if kw in start:
|
||||
self.putline(line, len(stack))
|
||||
stack.append((kw, kw))
|
||||
continue
|
||||
# end if
|
||||
if next.has_key(kw) and stack:
|
||||
self.putline(line, len(stack)-1)
|
||||
kwa, kwb = stack[-1]
|
||||
stack[-1] = kwa, kw
|
||||
continue
|
||||
# end if
|
||||
# end if
|
||||
self.putline(line, len(stack))
|
||||
# end while
|
||||
if stack:
|
||||
self.error('unterminated keywords')
|
||||
for kwa, kwb in stack:
|
||||
self.write('\t%s\n' % kwa)
|
||||
# end for
|
||||
# end if
|
||||
# end def reformat
|
||||
|
||||
def delete(self):
|
||||
begin_counter = 0
|
||||
end_counter = 0
|
||||
while 1:
|
||||
line = self.getline()
|
||||
if not line: break # EOF
|
||||
# end if
|
||||
m = self.endprog.match(line)
|
||||
if m:
|
||||
end_counter = end_counter + 1
|
||||
continue
|
||||
# end if
|
||||
m = self.kwprog.match(line)
|
||||
if m:
|
||||
kw = m.group('kw')
|
||||
if kw in start:
|
||||
begin_counter = begin_counter + 1
|
||||
# end if
|
||||
# end if
|
||||
self.putline(line)
|
||||
# end while
|
||||
if begin_counter - end_counter < 0:
|
||||
sys.stderr.write('Warning: input contained more end tags than expected\n')
|
||||
elif begin_counter - end_counter > 0:
|
||||
sys.stderr.write('Warning: input contained less end tags than expected\n')
|
||||
# end if
|
||||
# end def delete
|
||||
|
||||
def complete(self):
|
||||
self.indentsize = 1
|
||||
stack = []
|
||||
todo = []
|
||||
thisid = ''
|
||||
current, firstkw, lastkw, topid = 0, '', '', ''
|
||||
while 1:
|
||||
line = self.getline()
|
||||
i = 0
|
||||
m = self.wsprog.match(line)
|
||||
if m: i = m.end()
|
||||
# end if
|
||||
m = self.endprog.match(line)
|
||||
if m:
|
||||
thiskw = 'end'
|
||||
endkw = m.group('kw')
|
||||
thisid = m.group('id')
|
||||
else:
|
||||
m = self.kwprog.match(line)
|
||||
if m:
|
||||
thiskw = m.group('kw')
|
||||
if not next.has_key(thiskw):
|
||||
thiskw = ''
|
||||
# end if
|
||||
if thiskw in ('def', 'class'):
|
||||
thisid = m.group('id')
|
||||
else:
|
||||
thisid = ''
|
||||
# end if
|
||||
elif line[i:i+1] in ('\n', '#'):
|
||||
todo.append(line)
|
||||
continue
|
||||
else:
|
||||
thiskw = ''
|
||||
# end if
|
||||
# end if
|
||||
indent = len(line[:i].expandtabs(self.tabsize))
|
||||
while indent < current:
|
||||
if firstkw:
|
||||
if topid:
|
||||
s = '# end %s %s\n' % (
|
||||
firstkw, topid)
|
||||
else:
|
||||
s = '# end %s\n' % firstkw
|
||||
# end if
|
||||
self.putline(s, current)
|
||||
firstkw = lastkw = ''
|
||||
# end if
|
||||
current, firstkw, lastkw, topid = stack[-1]
|
||||
del stack[-1]
|
||||
# end while
|
||||
if indent == current and firstkw:
|
||||
if thiskw == 'end':
|
||||
if endkw != firstkw:
|
||||
self.error('mismatched end')
|
||||
# end if
|
||||
firstkw = lastkw = ''
|
||||
elif not thiskw or thiskw in start:
|
||||
if topid:
|
||||
s = '# end %s %s\n' % (
|
||||
firstkw, topid)
|
||||
else:
|
||||
s = '# end %s\n' % firstkw
|
||||
# end if
|
||||
self.putline(s, current)
|
||||
firstkw = lastkw = topid = ''
|
||||
# end if
|
||||
# end if
|
||||
if indent > current:
|
||||
stack.append((current, firstkw, lastkw, topid))
|
||||
if thiskw and thiskw not in start:
|
||||
# error
|
||||
thiskw = ''
|
||||
# end if
|
||||
current, firstkw, lastkw, topid = \
|
||||
indent, thiskw, thiskw, thisid
|
||||
# end if
|
||||
if thiskw:
|
||||
if thiskw in start:
|
||||
firstkw = lastkw = thiskw
|
||||
topid = thisid
|
||||
else:
|
||||
lastkw = thiskw
|
||||
# end if
|
||||
# end if
|
||||
for l in todo: self.write(l)
|
||||
# end for
|
||||
todo = []
|
||||
if not line: break
|
||||
# end if
|
||||
self.write(line)
|
||||
# end while
|
||||
# end def complete
|
||||
|
||||
# end class PythonIndenter
|
||||
|
||||
# Simplified user interface
|
||||
# - xxx_filter(input, output): read and write file objects
|
||||
# - xxx_string(s): take and return string object
|
||||
# - xxx_file(filename): process file in place, return true iff changed
|
||||
|
||||
def complete_filter(input = sys.stdin, output = sys.stdout,
|
||||
stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.complete()
|
||||
# end def complete_filter
|
||||
|
||||
def delete_filter(input= sys.stdin, output = sys.stdout,
|
||||
stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.delete()
|
||||
# end def delete_filter
|
||||
|
||||
def reformat_filter(input = sys.stdin, output = sys.stdout,
|
||||
stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.reformat()
|
||||
# end def reformat_filter
|
||||
|
||||
class StringReader:
|
||||
def __init__(self, buf):
|
||||
self.buf = buf
|
||||
self.pos = 0
|
||||
self.len = len(self.buf)
|
||||
# end def __init__
|
||||
def read(self, n = 0):
|
||||
if n <= 0:
|
||||
n = self.len - self.pos
|
||||
else:
|
||||
n = min(n, self.len - self.pos)
|
||||
# end if
|
||||
r = self.buf[self.pos : self.pos + n]
|
||||
self.pos = self.pos + n
|
||||
return r
|
||||
# end def read
|
||||
def readline(self):
|
||||
i = self.buf.find('\n', self.pos)
|
||||
return self.read(i + 1 - self.pos)
|
||||
# end def readline
|
||||
def readlines(self):
|
||||
lines = []
|
||||
line = self.readline()
|
||||
while line:
|
||||
lines.append(line)
|
||||
line = self.readline()
|
||||
# end while
|
||||
return lines
|
||||
# end def readlines
|
||||
# seek/tell etc. are left as an exercise for the reader
|
||||
# end class StringReader
|
||||
|
||||
class StringWriter:
|
||||
def __init__(self):
|
||||
self.buf = ''
|
||||
# end def __init__
|
||||
def write(self, s):
|
||||
self.buf = self.buf + s
|
||||
# end def write
|
||||
def getvalue(self):
|
||||
return self.buf
|
||||
# end def getvalue
|
||||
# end class StringWriter
|
||||
|
||||
def complete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
input = StringReader(source)
|
||||
output = StringWriter()
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.complete()
|
||||
return output.getvalue()
|
||||
# end def complete_string
|
||||
|
||||
def delete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
input = StringReader(source)
|
||||
output = StringWriter()
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.delete()
|
||||
return output.getvalue()
|
||||
# end def delete_string
|
||||
|
||||
def reformat_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
input = StringReader(source)
|
||||
output = StringWriter()
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.reformat()
|
||||
return output.getvalue()
|
||||
# end def reformat_string
|
||||
|
||||
def complete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
source = open(filename, 'r').read()
|
||||
result = complete_string(source, stepsize, tabsize, expandtabs)
|
||||
if source == result: return 0
|
||||
# end if
|
||||
import os
|
||||
try: os.rename(filename, filename + '~')
|
||||
except os.error: pass
|
||||
# end try
|
||||
f = open(filename, 'w')
|
||||
f.write(result)
|
||||
f.close()
|
||||
return 1
|
||||
# end def complete_file
|
||||
|
||||
def delete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
source = open(filename, 'r').read()
|
||||
result = delete_string(source, stepsize, tabsize, expandtabs)
|
||||
if source == result: return 0
|
||||
# end if
|
||||
import os
|
||||
try: os.rename(filename, filename + '~')
|
||||
except os.error: pass
|
||||
# end try
|
||||
f = open(filename, 'w')
|
||||
f.write(result)
|
||||
f.close()
|
||||
return 1
|
||||
# end def delete_file
|
||||
|
||||
def reformat_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
source = open(filename, 'r').read()
|
||||
result = reformat_string(source, stepsize, tabsize, expandtabs)
|
||||
if source == result: return 0
|
||||
# end if
|
||||
import os
|
||||
try: os.rename(filename, filename + '~')
|
||||
except os.error: pass
|
||||
# end try
|
||||
f = open(filename, 'w')
|
||||
f.write(result)
|
||||
f.close()
|
||||
return 1
|
||||
# end def reformat_file
|
||||
|
||||
# Test program when called as a script
|
||||
|
||||
usage = """
|
||||
usage: pindent (-c|-d|-r) [-s stepsize] [-t tabsize] [-e] [file] ...
|
||||
-c : complete a correctly indented program (add #end directives)
|
||||
-d : delete #end directives
|
||||
-r : reformat a completed program (use #end directives)
|
||||
-s stepsize: indentation step (default %(STEPSIZE)d)
|
||||
-t tabsize : the worth in spaces of a tab (default %(TABSIZE)d)
|
||||
-e : expand TABs into spaces (defailt OFF)
|
||||
[file] ... : files are changed in place, with backups in file~
|
||||
If no files are specified or a single - is given,
|
||||
the program acts as a filter (reads stdin, writes stdout).
|
||||
""" % vars()
|
||||
|
||||
def error_both(op1, op2):
|
||||
sys.stderr.write('Error: You can not specify both '+op1+' and -'+op2[0]+' at the same time\n')
|
||||
sys.stderr.write(usage)
|
||||
sys.exit(2)
|
||||
# end def error_both
|
||||
|
||||
def test():
|
||||
import getopt
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'cdrs:t:e')
|
||||
except getopt.error, msg:
|
||||
sys.stderr.write('Error: %s\n' % msg)
|
||||
sys.stderr.write(usage)
|
||||
sys.exit(2)
|
||||
# end try
|
||||
action = None
|
||||
stepsize = STEPSIZE
|
||||
tabsize = TABSIZE
|
||||
expandtabs = EXPANDTABS
|
||||
for o, a in opts:
|
||||
if o == '-c':
|
||||
if action: error_both(o, action)
|
||||
# end if
|
||||
action = 'complete'
|
||||
elif o == '-d':
|
||||
if action: error_both(o, action)
|
||||
# end if
|
||||
action = 'delete'
|
||||
elif o == '-r':
|
||||
if action: error_both(o, action)
|
||||
# end if
|
||||
action = 'reformat'
|
||||
elif o == '-s':
|
||||
stepsize = int(a)
|
||||
elif o == '-t':
|
||||
tabsize = int(a)
|
||||
elif o == '-e':
|
||||
expandtabs = 1
|
||||
# end if
|
||||
# end for
|
||||
if not action:
|
||||
sys.stderr.write(
|
||||
'You must specify -c(omplete), -d(elete) or -r(eformat)\n')
|
||||
sys.stderr.write(usage)
|
||||
sys.exit(2)
|
||||
# end if
|
||||
if not args or args == ['-']:
|
||||
action = eval(action + '_filter')
|
||||
action(sys.stdin, sys.stdout, stepsize, tabsize, expandtabs)
|
||||
else:
|
||||
action = eval(action + '_file')
|
||||
for filename in args:
|
||||
action(filename, stepsize, tabsize, expandtabs)
|
||||
# end for
|
||||
# end if
|
||||
# end def test
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
# end if
|
@ -0,0 +1,53 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# ptags
|
||||
#
|
||||
# Create a tags file for Python programs, usable with vi.
|
||||
# Tagged are:
|
||||
# - functions (even inside other defs or classes)
|
||||
# - classes
|
||||
# - filenames
|
||||
# Warns about files it cannot open.
|
||||
# No warnings about duplicate tags.
|
||||
|
||||
import sys, re, os
|
||||
|
||||
tags = [] # Modified global variable!
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
for filename in args:
|
||||
treat_file(filename)
|
||||
if tags:
|
||||
fp = open('tags', 'w')
|
||||
tags.sort()
|
||||
for s in tags: fp.write(s)
|
||||
|
||||
|
||||
expr = '^[ \t]*(def|class)[ \t]+([a-zA-Z0-9_]+)[ \t]*[:\(]'
|
||||
matcher = re.compile(expr)
|
||||
|
||||
def treat_file(filename):
|
||||
try:
|
||||
fp = open(filename, 'r')
|
||||
except:
|
||||
sys.stderr.write('Cannot open %s\n' % filename)
|
||||
return
|
||||
base = os.path.basename(filename)
|
||||
if base[-3:] == '.py':
|
||||
base = base[:-3]
|
||||
s = base + '\t' + filename + '\t' + '1\n'
|
||||
tags.append(s)
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
m = matcher.match(line)
|
||||
if m:
|
||||
content = m.group(0)
|
||||
name = m.group(2)
|
||||
s = name + '\t' + filename + '\t/^' + content + '/\n'
|
||||
tags.append(s)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import pydoc
|
||||
if __name__ == '__main__':
|
||||
pydoc.cli()
|
@ -0,0 +1,7 @@
|
||||
# Note: this file must not be named pydoc.pyw, lest it just end up
|
||||
# importing itself (Python began allowing import of .pyw files
|
||||
# between 2.2a1 and 2.2a2).
|
||||
import pydoc
|
||||
|
||||
if __name__ == '__main__':
|
||||
pydoc.gui()
|
@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""\
|
||||
List python source files.
|
||||
|
||||
There are three functions to check whether a file is a Python source, listed
|
||||
here with increasing complexity:
|
||||
|
||||
- has_python_ext() checks whether a file name ends in '.py[w]'.
|
||||
- look_like_python() checks whether the file is not binary and either has
|
||||
the '.py[w]' extension or the first line contains the word 'python'.
|
||||
- can_be_compiled() checks whether the file can be compiled by compile().
|
||||
|
||||
The file also must be of appropriate size - not bigger than a megabyte.
|
||||
|
||||
walk_python_files() recursively lists all Python files under the given directories.
|
||||
"""
|
||||
__author__ = "Oleg Broytmann, Georg Brandl"
|
||||
|
||||
__all__ = ["has_python_ext", "looks_like_python", "can_be_compiled", "walk_python_files"]
|
||||
|
||||
|
||||
import os, re
|
||||
|
||||
binary_re = re.compile('[\x00-\x08\x0E-\x1F\x7F]')
|
||||
|
||||
debug = False
|
||||
|
||||
def print_debug(msg):
|
||||
if debug: print msg
|
||||
|
||||
|
||||
def _open(fullpath):
|
||||
try:
|
||||
size = os.stat(fullpath).st_size
|
||||
except OSError, err: # Permission denied - ignore the file
|
||||
print_debug("%s: permission denied: %s" % (fullpath, err))
|
||||
return None
|
||||
|
||||
if size > 1024*1024: # too big
|
||||
print_debug("%s: the file is too big: %d bytes" % (fullpath, size))
|
||||
return None
|
||||
|
||||
try:
|
||||
return open(fullpath, 'rU')
|
||||
except IOError, err: # Access denied, or a special file - ignore it
|
||||
print_debug("%s: access denied: %s" % (fullpath, err))
|
||||
return None
|
||||
|
||||
def has_python_ext(fullpath):
|
||||
return fullpath.endswith(".py") or fullpath.endswith(".pyw")
|
||||
|
||||
def looks_like_python(fullpath):
|
||||
infile = _open(fullpath)
|
||||
if infile is None:
|
||||
return False
|
||||
|
||||
line = infile.readline()
|
||||
infile.close()
|
||||
|
||||
if binary_re.search(line):
|
||||
# file appears to be binary
|
||||
print_debug("%s: appears to be binary" % fullpath)
|
||||
return False
|
||||
|
||||
if fullpath.endswith(".py") or fullpath.endswith(".pyw"):
|
||||
return True
|
||||
elif "python" in line:
|
||||
# disguised Python script (e.g. CGI)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def can_be_compiled(fullpath):
|
||||
infile = _open(fullpath)
|
||||
if infile is None:
|
||||
return False
|
||||
|
||||
code = infile.read()
|
||||
infile.close()
|
||||
|
||||
try:
|
||||
compile(code, fullpath, "exec")
|
||||
except Exception, err:
|
||||
print_debug("%s: cannot compile: %s" % (fullpath, err))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def walk_python_files(paths, is_python=looks_like_python, exclude_dirs=None):
|
||||
"""\
|
||||
Recursively yield all Python source files below the given paths.
|
||||
|
||||
paths: a list of files and/or directories to be checked.
|
||||
is_python: a function that takes a file name and checks whether it is a
|
||||
Python source file
|
||||
exclude_dirs: a list of directory base names that should be excluded in
|
||||
the search
|
||||
"""
|
||||
if exclude_dirs is None:
|
||||
exclude_dirs=[]
|
||||
|
||||
for path in paths:
|
||||
print_debug("testing: %s" % path)
|
||||
if os.path.isfile(path):
|
||||
if is_python(path):
|
||||
yield path
|
||||
elif os.path.isdir(path):
|
||||
print_debug(" it is a directory")
|
||||
for dirpath, dirnames, filenames in os.walk(path):
|
||||
for exclude in exclude_dirs:
|
||||
if exclude in dirnames:
|
||||
dirnames.remove(exclude)
|
||||
for filename in filenames:
|
||||
fullpath = os.path.join(dirpath, filename)
|
||||
print_debug("testing: %s" % fullpath)
|
||||
if is_python(fullpath):
|
||||
yield fullpath
|
||||
else:
|
||||
print_debug(" unknown type")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Two simple examples/tests
|
||||
for fullpath in walk_python_files(['.']):
|
||||
print fullpath
|
||||
print "----------"
|
||||
for fullpath in walk_python_files(['.'], is_python=can_be_compiled):
|
||||
print fullpath
|
171
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/redemo.py
Normal file
171
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/redemo.py
Normal file
@ -0,0 +1,171 @@
|
||||
"""Basic regular expression demostration facility (Perl style syntax)."""
|
||||
|
||||
from Tkinter import *
|
||||
import re
|
||||
|
||||
class ReDemo:
|
||||
|
||||
def __init__(self, master):
|
||||
self.master = master
|
||||
|
||||
self.promptdisplay = Label(self.master, anchor=W,
|
||||
text="Enter a Perl-style regular expression:")
|
||||
self.promptdisplay.pack(side=TOP, fill=X)
|
||||
|
||||
self.regexdisplay = Entry(self.master)
|
||||
self.regexdisplay.pack(fill=X)
|
||||
self.regexdisplay.focus_set()
|
||||
|
||||
self.addoptions()
|
||||
|
||||
self.statusdisplay = Label(self.master, text="", anchor=W)
|
||||
self.statusdisplay.pack(side=TOP, fill=X)
|
||||
|
||||
self.labeldisplay = Label(self.master, anchor=W,
|
||||
text="Enter a string to search:")
|
||||
self.labeldisplay.pack(fill=X)
|
||||
self.labeldisplay.pack(fill=X)
|
||||
|
||||
self.showframe = Frame(master)
|
||||
self.showframe.pack(fill=X, anchor=W)
|
||||
|
||||
self.showvar = StringVar(master)
|
||||
self.showvar.set("first")
|
||||
|
||||
self.showfirstradio = Radiobutton(self.showframe,
|
||||
text="Highlight first match",
|
||||
variable=self.showvar,
|
||||
value="first",
|
||||
command=self.recompile)
|
||||
self.showfirstradio.pack(side=LEFT)
|
||||
|
||||
self.showallradio = Radiobutton(self.showframe,
|
||||
text="Highlight all matches",
|
||||
variable=self.showvar,
|
||||
value="all",
|
||||
command=self.recompile)
|
||||
self.showallradio.pack(side=LEFT)
|
||||
|
||||
self.stringdisplay = Text(self.master, width=60, height=4)
|
||||
self.stringdisplay.pack(fill=BOTH, expand=1)
|
||||
self.stringdisplay.tag_configure("hit", background="yellow")
|
||||
|
||||
self.grouplabel = Label(self.master, text="Groups:", anchor=W)
|
||||
self.grouplabel.pack(fill=X)
|
||||
|
||||
self.grouplist = Listbox(self.master)
|
||||
self.grouplist.pack(expand=1, fill=BOTH)
|
||||
|
||||
self.regexdisplay.bind('<Key>', self.recompile)
|
||||
self.stringdisplay.bind('<Key>', self.reevaluate)
|
||||
|
||||
self.compiled = None
|
||||
self.recompile()
|
||||
|
||||
btags = self.regexdisplay.bindtags()
|
||||
self.regexdisplay.bindtags(btags[1:] + btags[:1])
|
||||
|
||||
btags = self.stringdisplay.bindtags()
|
||||
self.stringdisplay.bindtags(btags[1:] + btags[:1])
|
||||
|
||||
def addoptions(self):
|
||||
self.frames = []
|
||||
self.boxes = []
|
||||
self.vars = []
|
||||
for name in ('IGNORECASE',
|
||||
'LOCALE',
|
||||
'MULTILINE',
|
||||
'DOTALL',
|
||||
'VERBOSE'):
|
||||
if len(self.boxes) % 3 == 0:
|
||||
frame = Frame(self.master)
|
||||
frame.pack(fill=X)
|
||||
self.frames.append(frame)
|
||||
val = getattr(re, name)
|
||||
var = IntVar()
|
||||
box = Checkbutton(frame,
|
||||
variable=var, text=name,
|
||||
offvalue=0, onvalue=val,
|
||||
command=self.recompile)
|
||||
box.pack(side=LEFT)
|
||||
self.boxes.append(box)
|
||||
self.vars.append(var)
|
||||
|
||||
def getflags(self):
|
||||
flags = 0
|
||||
for var in self.vars:
|
||||
flags = flags | var.get()
|
||||
flags = flags
|
||||
return flags
|
||||
|
||||
def recompile(self, event=None):
|
||||
try:
|
||||
self.compiled = re.compile(self.regexdisplay.get(),
|
||||
self.getflags())
|
||||
bg = self.promptdisplay['background']
|
||||
self.statusdisplay.config(text="", background=bg)
|
||||
except re.error, msg:
|
||||
self.compiled = None
|
||||
self.statusdisplay.config(
|
||||
text="re.error: %s" % str(msg),
|
||||
background="red")
|
||||
self.reevaluate()
|
||||
|
||||
def reevaluate(self, event=None):
|
||||
try:
|
||||
self.stringdisplay.tag_remove("hit", "1.0", END)
|
||||
except TclError:
|
||||
pass
|
||||
try:
|
||||
self.stringdisplay.tag_remove("hit0", "1.0", END)
|
||||
except TclError:
|
||||
pass
|
||||
self.grouplist.delete(0, END)
|
||||
if not self.compiled:
|
||||
return
|
||||
self.stringdisplay.tag_configure("hit", background="yellow")
|
||||
self.stringdisplay.tag_configure("hit0", background="orange")
|
||||
text = self.stringdisplay.get("1.0", END)
|
||||
last = 0
|
||||
nmatches = 0
|
||||
while last <= len(text):
|
||||
m = self.compiled.search(text, last)
|
||||
if m is None:
|
||||
break
|
||||
first, last = m.span()
|
||||
if last == first:
|
||||
last = first+1
|
||||
tag = "hit0"
|
||||
else:
|
||||
tag = "hit"
|
||||
pfirst = "1.0 + %d chars" % first
|
||||
plast = "1.0 + %d chars" % last
|
||||
self.stringdisplay.tag_add(tag, pfirst, plast)
|
||||
if nmatches == 0:
|
||||
self.stringdisplay.yview_pickplace(pfirst)
|
||||
groups = list(m.groups())
|
||||
groups.insert(0, m.group())
|
||||
for i in range(len(groups)):
|
||||
g = "%2d: %r" % (i, groups[i])
|
||||
self.grouplist.insert(END, g)
|
||||
nmatches = nmatches + 1
|
||||
if self.showvar.get() == "first":
|
||||
break
|
||||
|
||||
if nmatches == 0:
|
||||
self.statusdisplay.config(text="(no match)",
|
||||
background="yellow")
|
||||
else:
|
||||
self.statusdisplay.config(text="")
|
||||
|
||||
|
||||
# Main function, run when invoked as a stand-alone Python program.
|
||||
|
||||
def main():
|
||||
root = Tk()
|
||||
demo = ReDemo(root)
|
||||
root.protocol('WM_DELETE_WINDOW', root.quit)
|
||||
root.mainloop()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Make a reST file compliant to our pre-commit hook.
|
||||
# Currently just remove trailing whitespace.
|
||||
|
||||
import sys
|
||||
|
||||
import patchcheck
|
||||
|
||||
def main(argv=sys.argv):
|
||||
patchcheck.normalize_docs_whitespace(argv[1:])
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
@ -0,0 +1,304 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Released to the public domain, by Tim Peters, 03 October 2000.
|
||||
|
||||
"""reindent [-d][-r][-v] [ path ... ]
|
||||
|
||||
-d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
|
||||
-r (--recurse) Recurse. Search for all .py files in subdirectories too.
|
||||
-n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
|
||||
-v (--verbose) Verbose. Print informative msgs; else no output.
|
||||
-h (--help) Help. Print this usage information and exit.
|
||||
|
||||
Change Python (.py) files to use 4-space indents and no hard tab characters.
|
||||
Also trim excess spaces and tabs from ends of lines, and remove empty lines
|
||||
at the end of files. Also ensure the last line ends with a newline.
|
||||
|
||||
If no paths are given on the command line, reindent operates as a filter,
|
||||
reading a single source file from standard input and writing the transformed
|
||||
source to standard output. In this case, the -d, -r and -v flags are
|
||||
ignored.
|
||||
|
||||
You can pass one or more file and/or directory paths. When a directory
|
||||
path, all .py files within the directory will be examined, and, if the -r
|
||||
option is given, likewise recursively for subdirectories.
|
||||
|
||||
If output is not to standard output, reindent overwrites files in place,
|
||||
renaming the originals with a .bak extension. If it finds nothing to
|
||||
change, the file is left alone. If reindent does change a file, the changed
|
||||
file is a fixed-point for future runs (i.e., running reindent on the
|
||||
resulting .py file won't change it again).
|
||||
|
||||
The hard part of reindenting is figuring out what to do with comment
|
||||
lines. So long as the input files get a clean bill of health from
|
||||
tabnanny.py, reindent should do a good job.
|
||||
|
||||
The backup file is a copy of the one that is being reindented. The ".bak"
|
||||
file is generated with shutil.copy(), but some corner cases regarding
|
||||
user/group and permissions could leave the backup file more readable that
|
||||
you'd prefer. You can always use the --nobackup option to prevent this.
|
||||
"""
|
||||
|
||||
__version__ = "1"
|
||||
|
||||
import tokenize
|
||||
import os, shutil
|
||||
import sys
|
||||
|
||||
verbose = 0
|
||||
recurse = 0
|
||||
dryrun = 0
|
||||
makebackup = True
|
||||
|
||||
def usage(msg=None):
|
||||
if msg is not None:
|
||||
print >> sys.stderr, msg
|
||||
print >> sys.stderr, __doc__
|
||||
|
||||
def errprint(*args):
|
||||
sep = ""
|
||||
for arg in args:
|
||||
sys.stderr.write(sep + str(arg))
|
||||
sep = " "
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def main():
|
||||
import getopt
|
||||
global verbose, recurse, dryrun, makebackup
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "drnvh",
|
||||
["dryrun", "recurse", "nobackup", "verbose", "help"])
|
||||
except getopt.error, msg:
|
||||
usage(msg)
|
||||
return
|
||||
for o, a in opts:
|
||||
if o in ('-d', '--dryrun'):
|
||||
dryrun += 1
|
||||
elif o in ('-r', '--recurse'):
|
||||
recurse += 1
|
||||
elif o in ('-n', '--nobackup'):
|
||||
makebackup = False
|
||||
elif o in ('-v', '--verbose'):
|
||||
verbose += 1
|
||||
elif o in ('-h', '--help'):
|
||||
usage()
|
||||
return
|
||||
if not args:
|
||||
r = Reindenter(sys.stdin)
|
||||
r.run()
|
||||
r.write(sys.stdout)
|
||||
return
|
||||
for arg in args:
|
||||
check(arg)
|
||||
|
||||
def check(file):
|
||||
if os.path.isdir(file) and not os.path.islink(file):
|
||||
if verbose:
|
||||
print "listing directory", file
|
||||
names = os.listdir(file)
|
||||
for name in names:
|
||||
fullname = os.path.join(file, name)
|
||||
if ((recurse and os.path.isdir(fullname) and
|
||||
not os.path.islink(fullname) and
|
||||
not os.path.split(fullname)[1].startswith("."))
|
||||
or name.lower().endswith(".py")):
|
||||
check(fullname)
|
||||
return
|
||||
|
||||
if verbose:
|
||||
print "checking", file, "...",
|
||||
try:
|
||||
f = open(file)
|
||||
except IOError, msg:
|
||||
errprint("%s: I/O Error: %s" % (file, str(msg)))
|
||||
return
|
||||
|
||||
r = Reindenter(f)
|
||||
f.close()
|
||||
if r.run():
|
||||
if verbose:
|
||||
print "changed."
|
||||
if dryrun:
|
||||
print "But this is a dry run, so leaving it alone."
|
||||
if not dryrun:
|
||||
bak = file + ".bak"
|
||||
if makebackup:
|
||||
shutil.copyfile(file, bak)
|
||||
if verbose:
|
||||
print "backed up", file, "to", bak
|
||||
f = open(file, "w")
|
||||
r.write(f)
|
||||
f.close()
|
||||
if verbose:
|
||||
print "wrote new", file
|
||||
return True
|
||||
else:
|
||||
if verbose:
|
||||
print "unchanged."
|
||||
return False
|
||||
|
||||
def _rstrip(line, JUNK='\n \t'):
|
||||
"""Return line stripped of trailing spaces, tabs, newlines.
|
||||
|
||||
Note that line.rstrip() instead also strips sundry control characters,
|
||||
but at least one known Emacs user expects to keep junk like that, not
|
||||
mentioning Barry by name or anything <wink>.
|
||||
"""
|
||||
|
||||
i = len(line)
|
||||
while i > 0 and line[i-1] in JUNK:
|
||||
i -= 1
|
||||
return line[:i]
|
||||
|
||||
class Reindenter:
|
||||
|
||||
def __init__(self, f):
|
||||
self.find_stmt = 1 # next token begins a fresh stmt?
|
||||
self.level = 0 # current indent level
|
||||
|
||||
# Raw file lines.
|
||||
self.raw = f.readlines()
|
||||
|
||||
# File lines, rstripped & tab-expanded. Dummy at start is so
|
||||
# that we can use tokenize's 1-based line numbering easily.
|
||||
# Note that a line is all-blank iff it's "\n".
|
||||
self.lines = [_rstrip(line).expandtabs() + "\n"
|
||||
for line in self.raw]
|
||||
self.lines.insert(0, None)
|
||||
self.index = 1 # index into self.lines of next line
|
||||
|
||||
# List of (lineno, indentlevel) pairs, one for each stmt and
|
||||
# comment line. indentlevel is -1 for comment lines, as a
|
||||
# signal that tokenize doesn't know what to do about them;
|
||||
# indeed, they're our headache!
|
||||
self.stats = []
|
||||
|
||||
def run(self):
|
||||
tokenize.tokenize(self.getline, self.tokeneater)
|
||||
# Remove trailing empty lines.
|
||||
lines = self.lines
|
||||
while lines and lines[-1] == "\n":
|
||||
lines.pop()
|
||||
# Sentinel.
|
||||
stats = self.stats
|
||||
stats.append((len(lines), 0))
|
||||
# Map count of leading spaces to # we want.
|
||||
have2want = {}
|
||||
# Program after transformation.
|
||||
after = self.after = []
|
||||
# Copy over initial empty lines -- there's nothing to do until
|
||||
# we see a line with *something* on it.
|
||||
i = stats[0][0]
|
||||
after.extend(lines[1:i])
|
||||
for i in range(len(stats)-1):
|
||||
thisstmt, thislevel = stats[i]
|
||||
nextstmt = stats[i+1][0]
|
||||
have = getlspace(lines[thisstmt])
|
||||
want = thislevel * 4
|
||||
if want < 0:
|
||||
# A comment line.
|
||||
if have:
|
||||
# An indented comment line. If we saw the same
|
||||
# indentation before, reuse what it most recently
|
||||
# mapped to.
|
||||
want = have2want.get(have, -1)
|
||||
if want < 0:
|
||||
# Then it probably belongs to the next real stmt.
|
||||
for j in xrange(i+1, len(stats)-1):
|
||||
jline, jlevel = stats[j]
|
||||
if jlevel >= 0:
|
||||
if have == getlspace(lines[jline]):
|
||||
want = jlevel * 4
|
||||
break
|
||||
if want < 0: # Maybe it's a hanging
|
||||
# comment like this one,
|
||||
# in which case we should shift it like its base
|
||||
# line got shifted.
|
||||
for j in xrange(i-1, -1, -1):
|
||||
jline, jlevel = stats[j]
|
||||
if jlevel >= 0:
|
||||
want = have + getlspace(after[jline-1]) - \
|
||||
getlspace(lines[jline])
|
||||
break
|
||||
if want < 0:
|
||||
# Still no luck -- leave it alone.
|
||||
want = have
|
||||
else:
|
||||
want = 0
|
||||
assert want >= 0
|
||||
have2want[have] = want
|
||||
diff = want - have
|
||||
if diff == 0 or have == 0:
|
||||
after.extend(lines[thisstmt:nextstmt])
|
||||
else:
|
||||
for line in lines[thisstmt:nextstmt]:
|
||||
if diff > 0:
|
||||
if line == "\n":
|
||||
after.append(line)
|
||||
else:
|
||||
after.append(" " * diff + line)
|
||||
else:
|
||||
remove = min(getlspace(line), -diff)
|
||||
after.append(line[remove:])
|
||||
return self.raw != self.after
|
||||
|
||||
def write(self, f):
|
||||
f.writelines(self.after)
|
||||
|
||||
# Line-getter for tokenize.
|
||||
def getline(self):
|
||||
if self.index >= len(self.lines):
|
||||
line = ""
|
||||
else:
|
||||
line = self.lines[self.index]
|
||||
self.index += 1
|
||||
return line
|
||||
|
||||
# Line-eater for tokenize.
|
||||
def tokeneater(self, type, token, (sline, scol), end, line,
|
||||
INDENT=tokenize.INDENT,
|
||||
DEDENT=tokenize.DEDENT,
|
||||
NEWLINE=tokenize.NEWLINE,
|
||||
COMMENT=tokenize.COMMENT,
|
||||
NL=tokenize.NL):
|
||||
|
||||
if type == NEWLINE:
|
||||
# A program statement, or ENDMARKER, will eventually follow,
|
||||
# after some (possibly empty) run of tokens of the form
|
||||
# (NL | COMMENT)* (INDENT | DEDENT+)?
|
||||
self.find_stmt = 1
|
||||
|
||||
elif type == INDENT:
|
||||
self.find_stmt = 1
|
||||
self.level += 1
|
||||
|
||||
elif type == DEDENT:
|
||||
self.find_stmt = 1
|
||||
self.level -= 1
|
||||
|
||||
elif type == COMMENT:
|
||||
if self.find_stmt:
|
||||
self.stats.append((sline, -1))
|
||||
# but we're still looking for a new stmt, so leave
|
||||
# find_stmt alone
|
||||
|
||||
elif type == NL:
|
||||
pass
|
||||
|
||||
elif self.find_stmt:
|
||||
# This is the first "real token" following a NEWLINE, so it
|
||||
# must be the first token of the next program statement, or an
|
||||
# ENDMARKER.
|
||||
self.find_stmt = 0
|
||||
if line: # not endmarker
|
||||
self.stats.append((sline, self.level))
|
||||
|
||||
# Count number of leading blanks.
|
||||
def getlspace(line):
|
||||
i, n = 0, len(line)
|
||||
while i < n and line[i] == " ":
|
||||
i += 1
|
||||
return i
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,64 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""Reverse grep.
|
||||
|
||||
Usage: rgrep [-i] pattern file
|
||||
"""
|
||||
|
||||
import sys
|
||||
import re
|
||||
import getopt
|
||||
|
||||
def main():
|
||||
bufsize = 64*1024
|
||||
reflags = 0
|
||||
opts, args = getopt.getopt(sys.argv[1:], "i")
|
||||
for o, a in opts:
|
||||
if o == '-i':
|
||||
reflags = reflags | re.IGNORECASE
|
||||
if len(args) < 2:
|
||||
usage("not enough arguments")
|
||||
if len(args) > 2:
|
||||
usage("exactly one file argument required")
|
||||
pattern, filename = args
|
||||
try:
|
||||
prog = re.compile(pattern, reflags)
|
||||
except re.error, msg:
|
||||
usage("error in regular expression: %s" % str(msg))
|
||||
try:
|
||||
f = open(filename)
|
||||
except IOError, msg:
|
||||
usage("can't open %s: %s" % (repr(filename), str(msg)), 1)
|
||||
f.seek(0, 2)
|
||||
pos = f.tell()
|
||||
leftover = None
|
||||
while pos > 0:
|
||||
size = min(pos, bufsize)
|
||||
pos = pos - size
|
||||
f.seek(pos)
|
||||
buffer = f.read(size)
|
||||
lines = buffer.split("\n")
|
||||
del buffer
|
||||
if leftover is None:
|
||||
if not lines[-1]:
|
||||
del lines[-1]
|
||||
else:
|
||||
lines[-1] = lines[-1] + leftover
|
||||
if pos > 0:
|
||||
leftover = lines[0]
|
||||
del lines[0]
|
||||
else:
|
||||
leftover = None
|
||||
lines.reverse()
|
||||
for line in lines:
|
||||
if prog.search(line):
|
||||
print line
|
||||
|
||||
def usage(msg, code=2):
|
||||
sys.stdout = sys.stderr
|
||||
print msg
|
||||
print __doc__
|
||||
sys.exit(code)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python
|
||||
'''
|
||||
Small wsgiref based web server. Takes a path to serve from and an
|
||||
optional port number (defaults to 8000), then tries to serve files.
|
||||
Mime types are guessed from the file names, 404 errors are thrown
|
||||
if the file is not found. Used for the make serve target in Doc.
|
||||
'''
|
||||
import sys
|
||||
import os
|
||||
import mimetypes
|
||||
from wsgiref import simple_server, util
|
||||
|
||||
def app(environ, respond):
|
||||
|
||||
fn = os.path.join(path, environ['PATH_INFO'][1:])
|
||||
if '.' not in fn.split(os.path.sep)[-1]:
|
||||
fn = os.path.join(fn, 'index.html')
|
||||
type = mimetypes.guess_type(fn)[0]
|
||||
|
||||
if os.path.exists(fn):
|
||||
respond('200 OK', [('Content-Type', type)])
|
||||
return util.FileWrapper(open(fn))
|
||||
else:
|
||||
respond('404 Not Found', [('Content-Type', 'text/plain')])
|
||||
return ['not found']
|
||||
|
||||
if __name__ == '__main__':
|
||||
path = sys.argv[1]
|
||||
port = int(sys.argv[2]) if len(sys.argv) > 2 else 8000
|
||||
httpd = simple_server.make_server('', port, app)
|
||||
print "Serving %s on port %s, control-C to stop" % (path, port)
|
||||
try:
|
||||
httpd.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
print "\b\bShutting down."
|
@ -0,0 +1,20 @@
|
||||
from distutils.core import setup
|
||||
|
||||
if __name__ == '__main__':
|
||||
setup(
|
||||
scripts=[
|
||||
'byteyears.py',
|
||||
'checkpyc.py',
|
||||
'copytime.py',
|
||||
'crlf.py',
|
||||
'dutree.py',
|
||||
'ftpmirror.py',
|
||||
'h2py.py',
|
||||
'lfcr.py',
|
||||
'../i18n/pygettext.py',
|
||||
'logmerge.py',
|
||||
'../../Lib/tabnanny.py',
|
||||
'../../Lib/timeit.py',
|
||||
'untabify.py',
|
||||
],
|
||||
)
|
@ -0,0 +1,30 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# suff
|
||||
#
|
||||
# show different suffixes amongst arguments
|
||||
|
||||
import sys
|
||||
|
||||
def main():
|
||||
files = sys.argv[1:]
|
||||
suffixes = {}
|
||||
for filename in files:
|
||||
suff = getsuffix(filename)
|
||||
if not suffixes.has_key(suff):
|
||||
suffixes[suff] = []
|
||||
suffixes[suff].append(filename)
|
||||
keys = suffixes.keys()
|
||||
keys.sort()
|
||||
for suff in keys:
|
||||
print repr(suff), len(suffixes[suff])
|
||||
|
||||
def getsuffix(filename):
|
||||
suff = ''
|
||||
for i in range(len(filename)):
|
||||
if filename[i] == '.':
|
||||
suff = filename[i:]
|
||||
return suff
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,91 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""
|
||||
SVN helper script.
|
||||
|
||||
Try to set the svn:eol-style property to "native" on every .py, .txt, .c and
|
||||
.h file in the directory tree rooted at the current directory.
|
||||
|
||||
Files with the svn:eol-style property already set (to anything) are skipped.
|
||||
|
||||
svn will itself refuse to set this property on a file that's not under SVN
|
||||
control, or that has a binary mime-type property set. This script inherits
|
||||
that behavior, and passes on whatever warning message the failing "svn
|
||||
propset" command produces.
|
||||
|
||||
In the Python project, it's safe to invoke this script from the root of
|
||||
a checkout.
|
||||
|
||||
No output is produced for files that are ignored. For a file that gets
|
||||
svn:eol-style set, output looks like:
|
||||
|
||||
property 'svn:eol-style' set on 'Lib\ctypes\__init__.py'
|
||||
|
||||
For a file not under version control:
|
||||
|
||||
svn: warning: 'patch-finalizer.txt' is not under version control
|
||||
|
||||
and for a file with a binary mime-type property:
|
||||
|
||||
svn: File 'Lib\test\test_pep263.py' has binary mime type property
|
||||
"""
|
||||
|
||||
import re
|
||||
import os
|
||||
|
||||
def propfiles(root, fn):
|
||||
default = os.path.join(root, ".svn", "props", fn+".svn-work")
|
||||
try:
|
||||
format = int(open(os.path.join(root, ".svn", "format")).read().strip())
|
||||
except IOError:
|
||||
return []
|
||||
if format in (8, 9):
|
||||
# In version 8 and 9, committed props are stored in prop-base, local
|
||||
# modifications in props
|
||||
return [os.path.join(root, ".svn", "prop-base", fn+".svn-base"),
|
||||
os.path.join(root, ".svn", "props", fn+".svn-work")]
|
||||
raise ValueError, "Unknown repository format"
|
||||
|
||||
def proplist(root, fn):
|
||||
"Return a list of property names for file fn in directory root"
|
||||
result = []
|
||||
for path in propfiles(root, fn):
|
||||
try:
|
||||
f = open(path)
|
||||
except IOError:
|
||||
# no properties file: not under version control,
|
||||
# or no properties set
|
||||
continue
|
||||
while 1:
|
||||
# key-value pairs, of the form
|
||||
# K <length>
|
||||
# <keyname>NL
|
||||
# V length
|
||||
# <value>NL
|
||||
# END
|
||||
line = f.readline()
|
||||
if line.startswith("END"):
|
||||
break
|
||||
assert line.startswith("K ")
|
||||
L = int(line.split()[1])
|
||||
key = f.read(L)
|
||||
result.append(key)
|
||||
f.readline()
|
||||
line = f.readline()
|
||||
assert line.startswith("V ")
|
||||
L = int(line.split()[1])
|
||||
value = f.read(L)
|
||||
f.readline()
|
||||
f.close()
|
||||
return result
|
||||
|
||||
possible_text_file = re.compile(r"\.([hc]|py|txt|sln|vcproj)$").search
|
||||
|
||||
for root, dirs, files in os.walk('.'):
|
||||
if '.svn' in dirs:
|
||||
dirs.remove('.svn')
|
||||
for fn in files:
|
||||
if possible_text_file(fn):
|
||||
if 'svn:eol-style' not in proplist(root, fn):
|
||||
path = os.path.join(root, fn)
|
||||
os.system('svn propset svn:eol-style native "%s"' % path)
|
@ -0,0 +1,233 @@
|
||||
""" TeXcheck.py -- rough syntax checking on Python style LaTeX documents.
|
||||
|
||||
Written by Raymond D. Hettinger <python at rcn.com>
|
||||
Copyright (c) 2003 Python Software Foundation. All rights reserved.
|
||||
|
||||
Designed to catch common markup errors including:
|
||||
* Unbalanced or mismatched parenthesis, brackets, and braces.
|
||||
* Unbalanced or mismatched \\begin and \\end blocks.
|
||||
* Misspelled or invalid LaTeX commands.
|
||||
* Use of forward slashes instead of backslashes for commands.
|
||||
* Table line size mismatches.
|
||||
|
||||
Sample command line usage:
|
||||
python texcheck.py -k chapterheading -m lib/librandomtex *.tex
|
||||
|
||||
Options:
|
||||
-m Munge parenthesis and brackets. [0,n) would normally mismatch.
|
||||
-k keyword: Keyword is a valid LaTeX command. Do not include the backslash.
|
||||
-d: Delimiter check only (useful for non-LaTeX files).
|
||||
-h: Help
|
||||
-s lineno: Start at lineno (useful for skipping complex sections).
|
||||
-v: Verbose. Trace the matching of //begin and //end blocks.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
import getopt
|
||||
from itertools import izip, count, islice
|
||||
import glob
|
||||
|
||||
cmdstr = r"""
|
||||
\section \module \declaremodule \modulesynopsis \moduleauthor
|
||||
\sectionauthor \versionadded \code \class \method \begin
|
||||
\optional \var \ref \end \subsection \lineiii \hline \label
|
||||
\indexii \textrm \ldots \keyword \stindex \index \item \note
|
||||
\withsubitem \ttindex \footnote \citetitle \samp \opindex
|
||||
\noindent \exception \strong \dfn \ctype \obindex \character
|
||||
\indexiii \function \bifuncindex \refmodule \refbimodindex
|
||||
\subsubsection \nodename \member \chapter \emph \ASCII \UNIX
|
||||
\regexp \program \production \token \productioncont \term
|
||||
\grammartoken \lineii \seemodule \file \EOF \documentclass
|
||||
\usepackage \title \input \maketitle \ifhtml \fi \url \Cpp
|
||||
\tableofcontents \kbd \programopt \envvar \refstmodindex
|
||||
\cfunction \constant \NULL \moreargs \cfuncline \cdata
|
||||
\textasciicircum \n \ABC \setindexsubitem \versionchanged
|
||||
\deprecated \seetext \newcommand \POSIX \pep \warning \rfc
|
||||
\verbatiminput \methodline \textgreater \seetitle \lineiv
|
||||
\funclineni \ulink \manpage \funcline \dataline \unspecified
|
||||
\textbackslash \mimetype \mailheader \seepep \textunderscore
|
||||
\longprogramopt \infinity \plusminus \shortversion \version
|
||||
\refmodindex \seerfc \makeindex \makemodindex \renewcommand
|
||||
\indexname \appendix \protect \indexiv \mbox \textasciitilde
|
||||
\platform \seeurl \leftmargin \labelwidth \localmoduletable
|
||||
\LaTeX \copyright \memberline \backslash \pi \centerline
|
||||
\caption \vspace \textwidth \menuselection \textless
|
||||
\makevar \csimplemacro \menuselection \bfcode \sub \release
|
||||
\email \kwindex \refexmodindex \filenq \e \menuselection
|
||||
\exindex \linev \newsgroup \verbatim \setshortversion
|
||||
\author \authoraddress \paragraph \subparagraph \cmemberline
|
||||
\textbar \C \seelink
|
||||
"""
|
||||
|
||||
def matchclose(c_lineno, c_symbol, openers, pairmap):
|
||||
"Verify that closing delimiter matches most recent opening delimiter"
|
||||
try:
|
||||
o_lineno, o_symbol = openers.pop()
|
||||
except IndexError:
|
||||
print "\nDelimiter mismatch. On line %d, encountered closing '%s' without corresponding open" % (c_lineno, c_symbol)
|
||||
return
|
||||
if o_symbol in pairmap.get(c_symbol, [c_symbol]): return
|
||||
print "\nOpener '%s' on line %d was not closed before encountering '%s' on line %d" % (o_symbol, o_lineno, c_symbol, c_lineno)
|
||||
return
|
||||
|
||||
def checkit(source, opts, morecmds=[]):
|
||||
"""Check the LaTeX formatting in a sequence of lines.
|
||||
|
||||
Opts is a mapping of options to option values if any:
|
||||
-m munge parenthesis and brackets
|
||||
-d delimiters only checking
|
||||
-v verbose trace of delimiter matching
|
||||
-s lineno: linenumber to start scan (default is 1).
|
||||
|
||||
Morecmds is a sequence of LaTeX commands (without backslashes) that
|
||||
are to be considered valid in the scan.
|
||||
"""
|
||||
|
||||
texcmd = re.compile(r'\\[A-Za-z]+')
|
||||
falsetexcmd = re.compile(r'\/([A-Za-z]+)') # Mismarked with forward slash
|
||||
|
||||
validcmds = set(cmdstr.split())
|
||||
for cmd in morecmds:
|
||||
validcmds.add('\\' + cmd)
|
||||
|
||||
if '-m' in opts:
|
||||
pairmap = {']':'[(', ')':'(['} # Munged openers
|
||||
else:
|
||||
pairmap = {']':'[', ')':'('} # Normal opener for a given closer
|
||||
openpunct = set('([') # Set of valid openers
|
||||
|
||||
delimiters = re.compile(r'\\(begin|end){([_a-zA-Z]+)}|([()\[\]])')
|
||||
braces = re.compile(r'({)|(})')
|
||||
doubledwords = re.compile(r'(\b[A-za-z]+\b) \b\1\b')
|
||||
spacingmarkup = re.compile(r'\\(ABC|ASCII|C|Cpp|EOF|infinity|NULL|plusminus|POSIX|UNIX)\s')
|
||||
|
||||
openers = [] # Stack of pending open delimiters
|
||||
bracestack = [] # Stack of pending open braces
|
||||
|
||||
tablestart = re.compile(r'\\begin{(?:long)?table([iv]+)}')
|
||||
tableline = re.compile(r'\\line([iv]+){')
|
||||
tableend = re.compile(r'\\end{(?:long)?table([iv]+)}')
|
||||
tablelevel = ''
|
||||
tablestartline = 0
|
||||
|
||||
startline = int(opts.get('-s', '1'))
|
||||
lineno = 0
|
||||
|
||||
for lineno, line in izip(count(startline), islice(source, startline-1, None)):
|
||||
line = line.rstrip()
|
||||
|
||||
# Check balancing of open/close parenthesis, brackets, and begin/end blocks
|
||||
for begend, name, punct in delimiters.findall(line):
|
||||
if '-v' in opts:
|
||||
print lineno, '|', begend, name, punct,
|
||||
if begend == 'begin' and '-d' not in opts:
|
||||
openers.append((lineno, name))
|
||||
elif punct in openpunct:
|
||||
openers.append((lineno, punct))
|
||||
elif begend == 'end' and '-d' not in opts:
|
||||
matchclose(lineno, name, openers, pairmap)
|
||||
elif punct in pairmap:
|
||||
matchclose(lineno, punct, openers, pairmap)
|
||||
if '-v' in opts:
|
||||
print ' --> ', openers
|
||||
|
||||
# Balance opening and closing braces
|
||||
for open, close in braces.findall(line):
|
||||
if open == '{':
|
||||
bracestack.append(lineno)
|
||||
if close == '}':
|
||||
try:
|
||||
bracestack.pop()
|
||||
except IndexError:
|
||||
print r'Warning, unmatched } on line %s.' % (lineno,)
|
||||
|
||||
# Optionally, skip LaTeX specific checks
|
||||
if '-d' in opts:
|
||||
continue
|
||||
|
||||
# Warn whenever forward slashes encountered with a LaTeX command
|
||||
for cmd in falsetexcmd.findall(line):
|
||||
if '822' in line or '.html' in line:
|
||||
continue # Ignore false positives for urls and for /rfc822
|
||||
if '\\' + cmd in validcmds:
|
||||
print 'Warning, forward slash used on line %d with cmd: /%s' % (lineno, cmd)
|
||||
|
||||
# Check for markup requiring {} for correct spacing
|
||||
for cmd in spacingmarkup.findall(line):
|
||||
print r'Warning, \%s should be written as \%s{} on line %d' % (cmd, cmd, lineno)
|
||||
|
||||
# Validate commands
|
||||
nc = line.find(r'\newcommand')
|
||||
if nc != -1:
|
||||
start = line.find('{', nc)
|
||||
end = line.find('}', start)
|
||||
validcmds.add(line[start+1:end])
|
||||
for cmd in texcmd.findall(line):
|
||||
if cmd not in validcmds:
|
||||
print r'Warning, unknown tex cmd on line %d: \%s' % (lineno, cmd)
|
||||
|
||||
# Check table levels (make sure lineii only inside tableii)
|
||||
m = tablestart.search(line)
|
||||
if m:
|
||||
tablelevel = m.group(1)
|
||||
tablestartline = lineno
|
||||
m = tableline.search(line)
|
||||
if m and m.group(1) != tablelevel:
|
||||
print r'Warning, \line%s on line %d does not match \table%s on line %d' % (m.group(1), lineno, tablelevel, tablestartline)
|
||||
if tableend.search(line):
|
||||
tablelevel = ''
|
||||
|
||||
# Style guide warnings
|
||||
if 'e.g.' in line or 'i.e.' in line:
|
||||
print r'Style warning, avoid use of i.e or e.g. on line %d' % (lineno,)
|
||||
|
||||
for dw in doubledwords.findall(line):
|
||||
print r'Doubled word warning. "%s" on line %d' % (dw, lineno)
|
||||
|
||||
lastline = lineno
|
||||
for lineno, symbol in openers:
|
||||
print "Unmatched open delimiter '%s' on line %d" % (symbol, lineno)
|
||||
for lineno in bracestack:
|
||||
print "Unmatched { on line %d" % (lineno,)
|
||||
print 'Done checking %d lines.' % (lastline,)
|
||||
return 0
|
||||
|
||||
def main(args=None):
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
optitems, arglist = getopt.getopt(args, "k:mdhs:v")
|
||||
opts = dict(optitems)
|
||||
if '-h' in opts or args==[]:
|
||||
print __doc__
|
||||
return 0
|
||||
|
||||
if len(arglist) < 1:
|
||||
print 'Please specify a file to be checked'
|
||||
return 1
|
||||
|
||||
for i, filespec in enumerate(arglist):
|
||||
if '*' in filespec or '?' in filespec:
|
||||
arglist[i:i+1] = glob.glob(filespec)
|
||||
|
||||
morecmds = [v for k,v in optitems if k=='-k']
|
||||
err = []
|
||||
|
||||
for filename in arglist:
|
||||
print '=' * 30
|
||||
print "Checking", filename
|
||||
try:
|
||||
f = open(filename)
|
||||
except IOError:
|
||||
print 'Cannot open file %s.' % arglist[0]
|
||||
return 2
|
||||
|
||||
try:
|
||||
err.append(checkit(f, opts, morecmds))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
return max(err)
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
2078
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/texi2html.py
Normal file
2078
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/texi2html.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,205 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""Script to synchronize two source trees.
|
||||
|
||||
Invoke with two arguments:
|
||||
|
||||
python treesync.py slave master
|
||||
|
||||
The assumption is that "master" contains CVS administration while
|
||||
slave doesn't. All files in the slave tree that have a CVS/Entries
|
||||
entry in the master tree are synchronized. This means:
|
||||
|
||||
If the files differ:
|
||||
if the slave file is newer:
|
||||
normalize the slave file
|
||||
if the files still differ:
|
||||
copy the slave to the master
|
||||
else (the master is newer):
|
||||
copy the master to the slave
|
||||
|
||||
normalizing the slave means replacing CRLF with LF when the master
|
||||
doesn't use CRLF
|
||||
|
||||
"""
|
||||
|
||||
import os, sys, stat, getopt
|
||||
|
||||
# Interactivity options
|
||||
default_answer = "ask"
|
||||
create_files = "yes"
|
||||
create_directories = "no"
|
||||
write_slave = "ask"
|
||||
write_master = "ask"
|
||||
|
||||
def main():
|
||||
global always_no, always_yes
|
||||
global create_directories, write_master, write_slave
|
||||
opts, args = getopt.getopt(sys.argv[1:], "nym:s:d:f:a:")
|
||||
for o, a in opts:
|
||||
if o == '-y':
|
||||
default_answer = "yes"
|
||||
if o == '-n':
|
||||
default_answer = "no"
|
||||
if o == '-s':
|
||||
write_slave = a
|
||||
if o == '-m':
|
||||
write_master = a
|
||||
if o == '-d':
|
||||
create_directories = a
|
||||
if o == '-f':
|
||||
create_files = a
|
||||
if o == '-a':
|
||||
create_files = create_directories = write_slave = write_master = a
|
||||
try:
|
||||
[slave, master] = args
|
||||
except ValueError:
|
||||
print "usage: python", sys.argv[0] or "treesync.py",
|
||||
print "[-n] [-y] [-m y|n|a] [-s y|n|a] [-d y|n|a] [-f n|y|a]",
|
||||
print "slavedir masterdir"
|
||||
return
|
||||
process(slave, master)
|
||||
|
||||
def process(slave, master):
|
||||
cvsdir = os.path.join(master, "CVS")
|
||||
if not os.path.isdir(cvsdir):
|
||||
print "skipping master subdirectory", master
|
||||
print "-- not under CVS"
|
||||
return
|
||||
print "-"*40
|
||||
print "slave ", slave
|
||||
print "master", master
|
||||
if not os.path.isdir(slave):
|
||||
if not okay("create slave directory %s?" % slave,
|
||||
answer=create_directories):
|
||||
print "skipping master subdirectory", master
|
||||
print "-- no corresponding slave", slave
|
||||
return
|
||||
print "creating slave directory", slave
|
||||
try:
|
||||
os.mkdir(slave)
|
||||
except os.error, msg:
|
||||
print "can't make slave directory", slave, ":", msg
|
||||
return
|
||||
else:
|
||||
print "made slave directory", slave
|
||||
cvsdir = None
|
||||
subdirs = []
|
||||
names = os.listdir(master)
|
||||
for name in names:
|
||||
mastername = os.path.join(master, name)
|
||||
slavename = os.path.join(slave, name)
|
||||
if name == "CVS":
|
||||
cvsdir = mastername
|
||||
else:
|
||||
if os.path.isdir(mastername) and not os.path.islink(mastername):
|
||||
subdirs.append((slavename, mastername))
|
||||
if cvsdir:
|
||||
entries = os.path.join(cvsdir, "Entries")
|
||||
for e in open(entries).readlines():
|
||||
words = e.split('/')
|
||||
if words[0] == '' and words[1:]:
|
||||
name = words[1]
|
||||
s = os.path.join(slave, name)
|
||||
m = os.path.join(master, name)
|
||||
compare(s, m)
|
||||
for (s, m) in subdirs:
|
||||
process(s, m)
|
||||
|
||||
def compare(slave, master):
|
||||
try:
|
||||
sf = open(slave, 'r')
|
||||
except IOError:
|
||||
sf = None
|
||||
try:
|
||||
mf = open(master, 'rb')
|
||||
except IOError:
|
||||
mf = None
|
||||
if not sf:
|
||||
if not mf:
|
||||
print "Neither master nor slave exists", master
|
||||
return
|
||||
print "Creating missing slave", slave
|
||||
copy(master, slave, answer=create_files)
|
||||
return
|
||||
if not mf:
|
||||
print "Not updating missing master", master
|
||||
return
|
||||
if sf and mf:
|
||||
if identical(sf, mf):
|
||||
return
|
||||
sft = mtime(sf)
|
||||
mft = mtime(mf)
|
||||
if mft > sft:
|
||||
# Master is newer -- copy master to slave
|
||||
sf.close()
|
||||
mf.close()
|
||||
print "Master ", master
|
||||
print "is newer than slave", slave
|
||||
copy(master, slave, answer=write_slave)
|
||||
return
|
||||
# Slave is newer -- copy slave to master
|
||||
print "Slave is", sft-mft, "seconds newer than master"
|
||||
# But first check what to do about CRLF
|
||||
mf.seek(0)
|
||||
fun = funnychars(mf)
|
||||
mf.close()
|
||||
sf.close()
|
||||
if fun:
|
||||
print "***UPDATING MASTER (BINARY COPY)***"
|
||||
copy(slave, master, "rb", answer=write_master)
|
||||
else:
|
||||
print "***UPDATING MASTER***"
|
||||
copy(slave, master, "r", answer=write_master)
|
||||
|
||||
BUFSIZE = 16*1024
|
||||
|
||||
def identical(sf, mf):
|
||||
while 1:
|
||||
sd = sf.read(BUFSIZE)
|
||||
md = mf.read(BUFSIZE)
|
||||
if sd != md: return 0
|
||||
if not sd: break
|
||||
return 1
|
||||
|
||||
def mtime(f):
|
||||
st = os.fstat(f.fileno())
|
||||
return st[stat.ST_MTIME]
|
||||
|
||||
def funnychars(f):
|
||||
while 1:
|
||||
buf = f.read(BUFSIZE)
|
||||
if not buf: break
|
||||
if '\r' in buf or '\0' in buf: return 1
|
||||
return 0
|
||||
|
||||
def copy(src, dst, rmode="rb", wmode="wb", answer='ask'):
|
||||
print "copying", src
|
||||
print " to", dst
|
||||
if not okay("okay to copy? ", answer):
|
||||
return
|
||||
f = open(src, rmode)
|
||||
g = open(dst, wmode)
|
||||
while 1:
|
||||
buf = f.read(BUFSIZE)
|
||||
if not buf: break
|
||||
g.write(buf)
|
||||
f.close()
|
||||
g.close()
|
||||
|
||||
def okay(prompt, answer='ask'):
|
||||
answer = answer.strip().lower()
|
||||
if not answer or answer[0] not in 'ny':
|
||||
answer = raw_input(prompt)
|
||||
answer = answer.strip().lower()
|
||||
if not answer:
|
||||
answer = default_answer
|
||||
if answer[:1] == 'y':
|
||||
return 1
|
||||
if answer[:1] == 'n':
|
||||
return 0
|
||||
print "Yes or No please -- try again:"
|
||||
return okay(prompt)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,52 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"Replace tabs with spaces in argument files. Print names of changed files."
|
||||
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
|
||||
def main():
|
||||
tabsize = 8
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "t:")
|
||||
if not args:
|
||||
raise getopt.error, "At least one file argument required"
|
||||
except getopt.error, msg:
|
||||
print msg
|
||||
print "usage:", sys.argv[0], "[-t tabwidth] file ..."
|
||||
return
|
||||
for optname, optvalue in opts:
|
||||
if optname == '-t':
|
||||
tabsize = int(optvalue)
|
||||
|
||||
for filename in args:
|
||||
process(filename, tabsize)
|
||||
|
||||
def process(filename, tabsize, verbose=True):
|
||||
try:
|
||||
f = open(filename)
|
||||
text = f.read()
|
||||
f.close()
|
||||
except IOError, msg:
|
||||
print "%r: I/O error: %s" % (filename, msg)
|
||||
return
|
||||
newtext = text.expandtabs(tabsize)
|
||||
if newtext == text:
|
||||
return
|
||||
backup = filename + "~"
|
||||
try:
|
||||
os.unlink(backup)
|
||||
except os.error:
|
||||
pass
|
||||
try:
|
||||
os.rename(filename, backup)
|
||||
except os.error:
|
||||
pass
|
||||
with open(filename, "w") as f:
|
||||
f.write(newtext)
|
||||
if verbose:
|
||||
print filename
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,60 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Variant of "which".
|
||||
# On stderr, near and total misses are reported.
|
||||
# '-l<flags>' argument adds ls -l<flags> of each file found.
|
||||
|
||||
import sys
|
||||
if sys.path[0] in (".", ""): del sys.path[0]
|
||||
|
||||
import sys, os
|
||||
from stat import *
|
||||
|
||||
def msg(str):
|
||||
sys.stderr.write(str + '\n')
|
||||
|
||||
def main():
|
||||
pathlist = os.environ['PATH'].split(os.pathsep)
|
||||
|
||||
sts = 0
|
||||
longlist = ''
|
||||
|
||||
if sys.argv[1:] and sys.argv[1][:2] == '-l':
|
||||
longlist = sys.argv[1]
|
||||
del sys.argv[1]
|
||||
|
||||
for prog in sys.argv[1:]:
|
||||
ident = ()
|
||||
for dir in pathlist:
|
||||
filename = os.path.join(dir, prog)
|
||||
try:
|
||||
st = os.stat(filename)
|
||||
except os.error:
|
||||
continue
|
||||
if not S_ISREG(st[ST_MODE]):
|
||||
msg(filename + ': not a disk file')
|
||||
else:
|
||||
mode = S_IMODE(st[ST_MODE])
|
||||
if mode & 0111:
|
||||
if not ident:
|
||||
print filename
|
||||
ident = st[:3]
|
||||
else:
|
||||
if st[:3] == ident:
|
||||
s = 'same as: '
|
||||
else:
|
||||
s = 'also: '
|
||||
msg(s + filename)
|
||||
else:
|
||||
msg(filename + ': not executable')
|
||||
if longlist:
|
||||
sts = os.system('ls ' + longlist + ' ' + filename)
|
||||
if sts: msg('"ls -l" exit status: ' + repr(sts))
|
||||
if not ident:
|
||||
msg(prog + ': not found')
|
||||
sts = 1
|
||||
|
||||
sys.exit(sts)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,57 @@
|
||||
"""Add Python to the search path on Windows
|
||||
|
||||
This is a simple script to add Python to the Windows search path. It
|
||||
modifies the current user (HKCU) tree of the registry.
|
||||
|
||||
Copyright (c) 2008 by Christian Heimes <christian@cheimes.de>
|
||||
Licensed to PSF under a Contributor Agreement.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import site
|
||||
import os
|
||||
import _winreg
|
||||
|
||||
HKCU = _winreg.HKEY_CURRENT_USER
|
||||
ENV = "Environment"
|
||||
PATH = "PATH"
|
||||
DEFAULT = u"%PATH%"
|
||||
|
||||
def modify():
|
||||
pythonpath = os.path.dirname(os.path.normpath(sys.executable))
|
||||
scripts = os.path.join(pythonpath, "Scripts")
|
||||
appdata = os.environ["APPDATA"]
|
||||
if hasattr(site, "USER_SITE"):
|
||||
userpath = site.USER_SITE.replace(appdata, "%APPDATA%")
|
||||
userscripts = os.path.join(userpath, "Scripts")
|
||||
else:
|
||||
userscripts = None
|
||||
|
||||
with _winreg.CreateKey(HKCU, ENV) as key:
|
||||
try:
|
||||
envpath = _winreg.QueryValueEx(key, PATH)[0]
|
||||
except WindowsError:
|
||||
envpath = DEFAULT
|
||||
|
||||
paths = [envpath]
|
||||
for path in (pythonpath, scripts, userscripts):
|
||||
if path and path not in envpath and os.path.isdir(path):
|
||||
paths.append(path)
|
||||
|
||||
envpath = os.pathsep.join(paths)
|
||||
_winreg.SetValueEx(key, PATH, 0, _winreg.REG_EXPAND_SZ, envpath)
|
||||
return paths, envpath
|
||||
|
||||
def main():
|
||||
paths, envpath = modify()
|
||||
if len(paths) > 1:
|
||||
print "Path(s) added:"
|
||||
print '\n'.join(paths[1:])
|
||||
else:
|
||||
print "No path was added"
|
||||
print "\nPATH is now:\n%s\n" % envpath
|
||||
print "Expanded:"
|
||||
print _winreg.ExpandEnvironmentStrings(envpath)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
116
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/xxci.py
Normal file
116
AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/xxci.py
Normal file
@ -0,0 +1,116 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# xxci
|
||||
#
|
||||
# check in files for which rcsdiff returns nonzero exit status
|
||||
|
||||
import sys
|
||||
import os
|
||||
from stat import *
|
||||
import fnmatch
|
||||
|
||||
EXECMAGIC = '\001\140\000\010'
|
||||
|
||||
MAXSIZE = 200*1024 # Files this big must be binaries and are skipped.
|
||||
|
||||
def getargs():
|
||||
args = sys.argv[1:]
|
||||
if args:
|
||||
return args
|
||||
print 'No arguments, checking almost *, in "ls -t" order'
|
||||
list = []
|
||||
for file in os.listdir(os.curdir):
|
||||
if not skipfile(file):
|
||||
list.append((getmtime(file), file))
|
||||
list.sort()
|
||||
if not list:
|
||||
print 'Nothing to do -- exit 1'
|
||||
sys.exit(1)
|
||||
list.sort()
|
||||
list.reverse()
|
||||
for mtime, file in list: args.append(file)
|
||||
return args
|
||||
|
||||
def getmtime(file):
|
||||
try:
|
||||
st = os.stat(file)
|
||||
return st[ST_MTIME]
|
||||
except os.error:
|
||||
return -1
|
||||
|
||||
badnames = ['tags', 'TAGS', 'xyzzy', 'nohup.out', 'core']
|
||||
badprefixes = ['.', ',', '@', '#', 'o.']
|
||||
badsuffixes = \
|
||||
['~', '.a', '.o', '.old', '.bak', '.orig', '.new', '.prev', '.not', \
|
||||
'.pyc', '.fdc', '.rgb', '.elc', ',v']
|
||||
ignore = []
|
||||
|
||||
def setup():
|
||||
ignore[:] = badnames
|
||||
for p in badprefixes:
|
||||
ignore.append(p + '*')
|
||||
for p in badsuffixes:
|
||||
ignore.append('*' + p)
|
||||
try:
|
||||
f = open('.xxcign', 'r')
|
||||
except IOError:
|
||||
return
|
||||
ignore[:] = ignore + f.read().split()
|
||||
|
||||
def skipfile(file):
|
||||
for p in ignore:
|
||||
if fnmatch.fnmatch(file, p): return 1
|
||||
try:
|
||||
st = os.lstat(file)
|
||||
except os.error:
|
||||
return 1 # Doesn't exist -- skip it
|
||||
# Skip non-plain files.
|
||||
if not S_ISREG(st[ST_MODE]): return 1
|
||||
# Skip huge files -- probably binaries.
|
||||
if st[ST_SIZE] >= MAXSIZE: return 1
|
||||
# Skip executables
|
||||
try:
|
||||
data = open(file, 'r').read(len(EXECMAGIC))
|
||||
if data == EXECMAGIC: return 1
|
||||
except:
|
||||
pass
|
||||
return 0
|
||||
|
||||
def badprefix(file):
|
||||
for bad in badprefixes:
|
||||
if file[:len(bad)] == bad: return 1
|
||||
return 0
|
||||
|
||||
def badsuffix(file):
|
||||
for bad in badsuffixes:
|
||||
if file[-len(bad):] == bad: return 1
|
||||
return 0
|
||||
|
||||
def go(args):
|
||||
for file in args:
|
||||
print file + ':'
|
||||
if differing(file):
|
||||
showdiffs(file)
|
||||
if askyesno('Check in ' + file + ' ? '):
|
||||
sts = os.system('rcs -l ' + file) # ignored
|
||||
sts = os.system('ci -l ' + file)
|
||||
|
||||
def differing(file):
|
||||
cmd = 'co -p ' + file + ' 2>/dev/null | cmp -s - ' + file
|
||||
sts = os.system(cmd)
|
||||
return sts != 0
|
||||
|
||||
def showdiffs(file):
|
||||
cmd = 'rcsdiff ' + file + ' 2>&1 | ${PAGER-more}'
|
||||
sts = os.system(cmd)
|
||||
|
||||
def askyesno(prompt):
|
||||
s = raw_input(prompt)
|
||||
return s in ['y', 'yes']
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
setup()
|
||||
go(getargs())
|
||||
except KeyboardInterrupt:
|
||||
print '[Intr]'
|
Reference in New Issue
Block a user