newconfig is no more.

Signed-off-by: Patrick Georgi <patrick.georgi@coresystems.de>
Acked-by: Ronald G. Minnich <rminnich@gmail.com>


git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5089 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
Patrick Georgi
2010-02-07 21:43:48 +00:00
parent 389240f288
commit abf2ad716d
653 changed files with 15 additions and 64257 deletions

View File

@@ -34,8 +34,6 @@ cpus=1
# Configure-only mode
configureonly=0
# use old config method "newconfig"
oldconfig=0
# One might want to adjust these in case of cross compiling
for i in make gmake gnumake nonexistant_make; do
@@ -122,117 +120,9 @@ function architecture
{
VENDOR=$1
MAINBOARD=$2
if [ $oldconfig -eq 1 ]; then
ARCH=`cat $ROOT/src/mainboard/$VENDOR/$MAINBOARD/Config.lb | \
grep ^arch | cut -f 2 -d\ `
echo $ARCH | sed s/ppc/powerpc/
else
ARCH=`cat $ROOT/src/mainboard/$VENDOR/$MAINBOARD/Kconfig | \
grep "select ARCH_"|cut -f2- -d_`
echo $ARCH | sed s/X86/i386/
fi
}
function create_config_old
{
VENDOR=$1
MAINBOARD=$2
CONFIG=$3
TARCH=$( architecture $VENDOR $MAINBOARD )
TARGCONFIG=$ROOT/targets/$VENDOR/$MAINBOARD/Config-abuild.lb
if [ "$CONFIG" != "" ]; then
TARGCONFIG=$ROOT/targets/$VENDOR/$MAINBOARD/$CONFIG
fi
# get a working payload for the board if we have one.
# the --payload option expects a directory containing
# a shell script payload.sh
# Usage: payload.sh [VENDOR] [DEVICE]
# the script returns an absolute path to the payload binary.
if [ -f $payloads/payload.sh ]; then
PAYLOAD=`sh $payloads/payload.sh $VENDOR $MAINBOARD`
printf "Using payload $PAYLOAD\n"
fi
mkdir -p $TARGET
if [ -f $TARGCONFIG ]; then
cp $TARGCONFIG $TARGET/Config-${VENDOR}_${MAINBOARD}.lb
printf "Using existing test target $TARGCONFIG"
xml " <config>$TARGCONFIG</config>"
else
printf " Creating config file..."
xml " <config>autogenerated</config>"
( cat << EOF
# This will make a target directory of ./VENDOR_MAINBOARD
target VENDOR_MAINBOARD
mainboard VENDOR/MAINBOARD
option CC="CROSSCC"
option CONFIG_CROSS_COMPILE="CROSS_PREFIX"
option HOSTCC="CROSS_HOSTCC"
__COMPRESSION__
__LOGLEVEL__
EOF
if [ "$TARCH" == i386 ] ; then
cat <<EOF
romimage "normal"
option CONFIG_USE_FALLBACK_IMAGE=0
option COREBOOT_EXTRA_VERSION=".0-normal"
payload __PAYLOAD__
end
romimage "fallback"
option CONFIG_USE_FALLBACK_IMAGE=1
option COREBOOT_EXTRA_VERSION=".0-fallback"
payload __PAYLOAD__
end
buildrom ./coreboot.rom CONFIG_ROM_SIZE "normal" "fallback"
EOF
else
cat <<EOF
romimage "only"
option COREBOOT_EXTRA_VERSION=".0"
payload __PAYLOAD__
end
buildrom ./coreboot.rom CONFIG_ROM_SIZE "only"
EOF
fi
) > $TARGET/Config-${VENDOR}_${MAINBOARD}.lb
fi
if [ "$loglevel" != "default" ]; then
LOGLEVEL1="option CONFIG_MAXIMUM_CONSOLE_LOGLEVEL=$loglevel"
LOGLEVEL2="option CONFIG_DEFAULT_CONSOLE_LOGLEVEL=$loglevel"
else
LOGLEVEL1="# no loglevel override"
LOGLEVEL2=""
fi
COMPRESSION="# no compression"
if which lzma >/dev/null 2>/dev/null; then
if [ "$PAYLOAD" != /dev/null ]; then
COMPRESSION="option CONFIG_COMPRESSED_PAYLOAD_LZMA=1"
fi
fi
cp $TARGET/Config-${VENDOR}_${MAINBOARD}.lb $TARGET/Config-${VENDOR}_${MAINBOARD}.lb.pre
sed -e s:VENDOR:$VENDOR:g \
-e s:MAINBOARD:$MAINBOARD:g \
-e s:payload\ __PAYLOAD__:payload\ $PAYLOAD:g \
-e s:CROSSCC:"$CC":g \
-e s:CROSS_PREFIX:"$CROSS_COMPILE":g \
-e s:CROSS_HOSTCC:"$HOSTCC":g \
-e s:__COMPRESSION__:"$COMPRESSION":g \
-e s:__LOGLEVEL__:"$LOGLEVEL1"\
"$LOGLEVEL2":g \
$TARGET/Config-${VENDOR}_${MAINBOARD}.lb.pre > $TARGET/Config-${VENDOR}_${MAINBOARD}.lb
printf " ok\n"
ARCH=`cat $ROOT/src/mainboard/$VENDOR/$MAINBOARD/Kconfig | \
grep "select ARCH_"|cut -f2- -d_`
echo $ARCH | sed s/X86/i386/
}
function create_config
@@ -308,66 +198,12 @@ function create_config
fi
}
function create_builddir
{
VENDOR=$1
MAINBOARD=$2
printf " Creating builddir..."
target_dir=$TARGET
config_dir=$ROOT/util/newconfig
yapps2_py=$config_dir/yapps2.py
config_g=$config_dir/config.g
config_lb=Config-${VENDOR}_${MAINBOARD}.lb
cd $target_dir
build_dir=${VENDOR}_${MAINBOARD}
config_py=$build_dir/config.py
if [ ! -d $build_dir ] ; then
mkdir -p $build_dir
fi
if [ ! -f $config_py ]; then
$PYTHON $yapps2_py $config_g $config_py &> $build_dir/py.log
fi
# make sure config.py is up-to-date
export PYTHONPATH=$config_dir
$PYTHON $config_py $config_lb $ROOT &> $build_dir/config.log
if [ $? -eq 0 ]; then
printf "ok\n"
xml " <builddir>ok</builddir>"
xml " <log>"
xmlfile $build_dir/config.log
xml " </log>"
xml ""
return 0
else
printf "FAILED! Log excerpt:\n"
xml " <builddir>failed</builddir>"
xml " <log>"
xmlfile $build_dir/config.log
xml " </log>"
xml ""
tail -n $CONTEXT $build_dir/config.log 2> /dev/null || tail -$CONTEXT $build_dir/config.log
return 1
fi
}
function create_buildenv
{
VENDOR=$1
MAINBOARD=$2
CONFIG=$3
if [ $oldconfig -eq 1 ]; then
create_config_old $VENDOR $MAINBOARD $CONFIG
create_builddir $VENDOR $MAINBOARD
else
create_config $VENDOR $MAINBOARD $CONFIG
fi
create_config $VENDOR $MAINBOARD $CONFIG
}
function compile_target
@@ -382,20 +218,14 @@ function compile_target
CURR=$( pwd )
stime=`perl -e 'print time();'`
if [ $oldconfig -eq 1 ]; then
cd $TARGET/${VENDOR}_${MAINBOARD}
eval $MAKE $silent -j $cpus &> make.log
ret=$?
else
build_dir=$TARGET/${VENDOR}_${MAINBOARD}
eval $MAKE $silent -j $cpus obj=${build_dir} \
&> ${build_dir}/make.log
ret=$?
mv .config ${build_dir}/config.build
mv .xcompile ${build_dir}/xcompile.build
mv ..config.tmp ${build_dir}/config.deps
cd $TARGET/${VENDOR}_${MAINBOARD}
fi
build_dir=$TARGET/${VENDOR}_${MAINBOARD}
eval $MAKE $silent -j $cpus obj=${build_dir} \
&> ${build_dir}/make.log
ret=$?
mv .config ${build_dir}/config.build
mv .xcompile ${build_dir}/xcompile.build
mv ..config.tmp ${build_dir}/config.deps
cd $TARGET/${VENDOR}_${MAINBOARD}
etime=`perl -e 'print time();'`
duration=$(( $etime - $stime ))
if [ $ret -eq 0 ]; then
@@ -692,15 +522,15 @@ target=""
buildall=false
verbose=false
test -f util/newconfig/config.g && ROOT=$( pwd )
test -f ../util/newconfig/config.g && ROOT=$( cd ..; pwd )
test -f util/sconfig/config.g && ROOT=$( pwd )
test -f ../util/sconfig/config.g && ROOT=$( cd ..; pwd )
test "$ROOT" = "" && ROOT=$( cd ../..; pwd )
# parse parameters.. try to find out whether we're running GNU getopt
getoptbrand="`getopt -V`"
if [ "${getoptbrand:0:6}" == "getopt" ]; then
# Detected GNU getopt that supports long options.
args=`getopt -l version,verbose,help,all,target:,broken,payloads:,test,cpus:,silent,xml,config,loglevel:,oldconfig Vvhat:bp:Tc:sxCl:o -- "$@"`
args=`getopt -l version,verbose,help,all,target:,broken,payloads:,test,cpus:,silent,xml,config,loglevel: Vvhat:bp:Tc:sxCl: -- "$@"`
eval set "$args"
else
# Detected non-GNU getopt
@@ -731,7 +561,6 @@ while true ; do
-sb|--scan-build) shift; scanbuild=true;;
-C|--config) shift; configureonly=1;;
-l|--loglevel) shift; loglevel="$1"; shift;;
-o|--oldconfig) shift; oldconfig=1;;
--) shift; break;;
-*) printf "Invalid option\n\n"; myhelp; exit 1;;
*) break;;

View File

@@ -1,135 +0,0 @@
# Coreboot codebase analysis tool
#
# This makefile collects source usage information for all working targets.
#
# Written 7/2006 by Josiah England <josiah@lanl.gov>
#
# This file is subject to the terms and conditions of the GNU General
# Public License. See the file COPYING in the main directory of this
# archive for more details.
TOP := $(shell cd ../.. && pwd)
BUILD_BASE := $(TOP)/targets
IGNORE_ERRORS := 2>/dev/null # Comment out this line for some ugly verbosity
IGNORE_VENDORS := emulation momentum embeddedplanet motorola totalimpact
IGNORE_C := static.c# romcc.c
quote = "#"
VENDORS := $(shell ls -l $(TOP)/targets | grep ^d | grep -Eo [[:alnum:]_-]+$$$(foreach ignored, $(IGNORE_VENDORS), | grep -v $(ignored)))
#<VENDOR>_BOARDS assignments
$(foreach VENDOR, $(VENDORS), $(eval $(VENDOR)_BOARDS := $(shell ls $(TOP)/targets/$(VENDOR))))
TARGETS := $(foreach VENDOR, $(VENDORS), $(addprefix $(VENDOR)/, $($(VENDOR)_BOARDS)))
# The following delayed-evalutate variables are only to be used in rule commands.
CONFIG_MAINBOARD = $(TOP)/src/mainboard/$(shell grep ^mainboard $(dir $*)/Config.lb|grep -Eo [-[:alnum:]_/]+[[:space:]]?$$)
IMAGE_DIR = $(firstword $(shell grep -Eo ^romimage[[:space:]]+\"[[:alnum:]_-/]+ $(dir $*)/Config.lb|sed -r s/romimage[[:space:]]+\"//))
# Evaluate one assignment to variable "$1" from file "$2"
load_var = $(eval $(shell grep -E ^[[:space:]]*$1[[:space:]]*:*= $2 $(IGNORE_ERRORS)))
.PHONY: clean analysis
analysis: analysis.dat
gnuplot -persist '$<'
# Generate gnuplot data file
analysis.dat: analysis.txt
@ echo Writing gnuplot data file \($@\).
@ echo -e > $@ "# gnuplot dataset auto-generated $(shell date)" \
"\nset title \"Coreboot Codebase Analysis\"" \
"\nset style data boxes" \
"\nset style fill solid .5" \
$(foreach target, $(TARGETS), "\n"set label \"$(target)\" at $(words $(labels))$(eval labels += $(target)),-145 rotate front) \
"\nplot [-.5:] '-' t 'Source:' , '-' t 'Nested C:' , '-' t 'Headers:' , '-' t 'romcc Sources:' , '-' t 'romcc Headers:'"
@ grep -F "C files" $< | grep -Eo [[:digit:]]+ >> $@
@ echo e >> $@
@ grep -F "Nested C" $< | grep -Eo [[:digit:]]+ >> $@
@ echo e >> $@
@ grep -F "Headers" $< | grep -Eo [[:digit:]]+ | sed -r s/\([[:digit:]]+\)/'-'\\1/>> $@
@ echo e >> $@
@ grep -F "romcc C" $< | grep -Eo [[:digit:]]+ >> $@
@ echo e >> $@
@ grep -F "romcc H" $< | grep -Eo [[:digit:]]+ | sed -r s/\([[:digit:]]+\)/'-'\\1/>> $@
@ echo e >> $@
analysis.txt: $(foreach target, $(TARGETS), $(BUILD_BASE)/$(target)/$(shell grep ^target $(BUILD_BASE)/$(target)/Config.lb | grep -Eo [[:alnum:]_-]+[[:space:]]?$$)/analysis/info)
@ echo -e "\n\n"Compiling individual target analysis info into $@.
cat $? | tee -a $@
# Prevent automatic deletion of intermediate files
.SECONDARY: $(prepend $(foreach target, $(TARGETS), $(BUILD_BASE)/$(target)/$(shell grep ^target $(BUILD_BASE)/$(target)/Config.lb | grep -Eo [[:alnum:]_-]+$$)), /analysis, /analysis/c_files, /analysis/h_files, /analysis/info, /Makefile)
# FIXME: This rule is necessary even if the Makefile already exists.
%/Makefile:
@ echo \*\*\* Building target: $(notdir $*) \*\*\*
-@ cd $(TOP)/targets && ./buildtarget $(dir $*) 1>>build.log 2>>builderrors.log
@ echo -e >> $*/$(IMAGE_DIR)/Makefile "depend:\n\t"'@ makedepend -v -f- -- $$(CPPFLAGS) -- $$(SOURCES)'
%/analysis/c_files: %/Makefile
@ echo Analysis directory is $*/analysis
-@ mkdir $*/analysis $(IGNORE_ERRORS)
@ echo -n Finding C source files...
@ grep -Eo \\$$+[\(][A-Z_]+[\)][/-_[:alnum:]]+'\.c\>' $*/$(IMAGE_DIR)/Makefile | grep -v $(IGNORE_C) | sort -u > $@
@ echo " "Done.
# Grep for .c files #included within others (only one level deep).
# sed commands provide full pathname for included .c files, assuming two things:
# 1. If include statement has no directory component, the file is in same dir.
# 2. If included file has a directory component, it's base is from $(TOP)/src/.
%/analysis/nested_c_files: %/analysis/c_files
@ echo -n Finding nested .c includes...
$(eval c_files := $(shell cat $<))
@ grep -Eo '\#'include[[:space:]\"]+[/-_[:alnum:]]+'\.c' $(c_files) | sed s/\#include[[:space:]]// > $@.tmp
@ sed -r s/\([/-_[:alnum:]]+\\/\)\([-_[:alnum:]]+'.c'\):\"\([-_[:alnum:]]+'.c'$$\)/\\1\\2:' '\\1\\3/ $@.tmp | \
sed -r s/\([/-_[:alnum:]]+\\/\)\([-_[:alnum:]]+'.c'\):\"\([/-_[:alnum:]]+'.c'$$\)/\\1\\2:' '\$$\(TOP\)\\/src\\/\\3/ > $@
@ rm $@.tmp
@ echo " "Done.
%/analysis/h_files: %/analysis/c_files %/analysis/nested_c_files
@ echo -n Finding all included headers...
$(call load_var,TARGET_DIR, $*/Makefile.settings)
@ $(MAKE) -C $(TARGET_DIR)/$(IMAGE_DIR) depend $(IGNORE_ERRORS) | grep -v makedepend | grep -Eo [/-_[:alnum:]]+'\.h' | sort -u > $@ && \
$(MAKE) -C $(TARGET_DIR)/$(IMAGE_DIR) "SOURCES := $(shell grep [/-_[:alnum:]]+'.c' $(word 2, $?))" depend $(IGNORE_ERRORS) | grep -v makedepend | grep -Eo [/-_[:alnum:]]+'\.h' | sort -u >> $@
@ echo " "Done.
#%/auto.inc:
# Determine which sources use romcc by their inclusion in auto.inc #FIXME better
%/analysis/romcc_files: %/analysis/c_files %/analysis/nested_c_files
$(call load_var,TARGET_DIR, $*/Makefile.settings)
@ $(if $(findstring cache_as_ram, $(shell cat $<)), \
echo none, \
echo -n \* Uses romcc - making auto.inc... && \
$(MAKE) -iC $(TARGET_DIR)/$(IMAGE_DIR) auto.inc $(IGNORE_ERRORS) 1>/dev/null && \
echo " "to find sources that use romcc. && \
grep -Eo [/-_[:alnum:]]+'\.c' $(TARGET_DIR)/$(IMAGE_DIR)/auto.inc | sort -u) \
> $@
# Full pathnames of found files are gathered from nested_c_files and c_files.
%/analysis/romcc_sources: %/analysis/c_files %/analysis/nested_c_files %/analysis/romcc_files
@ echo -e $(foreach file, $(shell cat $(dir $@)/romcc_files), "\n"'$(firstword $(shell grep -Eho [/-_\$$\(\)[:alnum:]]+/'$(file)' $(dir $@)/nested_c_files $(dir $@)/c_files))') >$@
%/analysis/romcc_headers: %/analysis/romcc_sources
@ echo -n Finding headers used by any romcc source...
$(eval romcc_sources = $(shell cat $(dir $@)/romcc_sources))
$(call load_var,TARGET_DIR, $*/Makefile.settings)
@ $(MAKE) -C $(TARGET_DIR)/$(IMAGE_DIR) "SOURCES := $(romcc_sources)" depend $(IGNORE_ERRORS) | grep -v makedepend | grep -Eo [/-_[:alnum:]]+'\.'h | sort -u > $@
@ echo " "Done.
$(BUILD_BASE)/%/analysis/info: $(BUILD_BASE)/%/analysis/h_files $(BUILD_BASE)/%/analysis/romcc_headers
@ echo -e Target: $(subst /, , $(dir $*)) \
"\n"Uses $(if $(findstring cache_as_ram, $(shell grep -F '.c' $(dir $@)/c_files)),CAR,romcc) \
"\n"C files: $(shell grep -Eo [-_[:alnum:]]+'\.c' $(dir $@)/c_files $(dir $@)/nested_c_files $(dir $@)/romcc_sources | sort -u | grep -Fc '.c') \
"\n"Nested C: $(shell grep -Eo [-_[:alnum:]]+'\.c' $(dir $@)/nested_c_files | sort -u | grep -Fc '.c') \
"\n"Headers: $(shell grep -Eo [-_[:alnum:]]+'\.h' $(dir $@)/h_files $(dir $@)/romcc_headers | sort -u | grep -Fc '.h') \
"\n"romcc C: $(shell grep -Ec [-_[:alnum:]]+'\.c' $(dir $@)/romcc_sources) \
"\n"romcc H: $(shell grep -Ec [-_[:alnum:]]+'\.h' $(dir $@)/romcc_headers) \
"\n">> $@
clean-builds:
rm -rf $(foreach target, $(TARGETS), $(BUILD_BASE)/$(target)/$(shell grep ^target $(BUILD_BASE)/$(target)/Config.lb | grep -Eo [[:alnum:]_-]+[[:space:]]?$$))
clean:
rm -rf $(foreach target, $(TARGETS), $(BUILD_BASE)/$(target)/$(shell grep ^target $(BUILD_BASE)/$(target)/Config.lb | grep -Eo [[:alnum:]_-]+[[:space:]]?$$)/analysis) analysis.txt analysis.dat

View File

@@ -1,137 +0,0 @@
#!/bin/bash
# $1 board name
normalize() {
# $1 filename
cat $1 | while read line; do
if echo $line | grep '= 0x' > /dev/null; then
first=`echo $line | cut -d= -f1`
last=`echo $line |cut -d= -f2 |cut -d\; -f1`
echo $first = $(($last + 0))\;
else
echo $line
fi
done
}
BOARDPATH=`echo $1 | sed s,/,_,g`
A=`mktemp tmp.XXXXXXXXXX`
rm -rf $A
mkdir -p $A
sort coreboot-builds/$BOARDPATH/fallback/ldoptions > $A/old
sort build/ldoptions > $A/new
if [ `grep -c "^CONFIG_NORTHBRIDGE_AMD_AMDFAM10" $A/new` -eq 0 ]; then
sed \
-e "/^CONFIG_AMDMCT / d" \
-e "/^CONFIG_C[BD]B / d" \
-e "/^CONFIG_EXT_CONF_SUPPORT / d" \
-e "/^CONFIG_EXT_RT_TBL_SUPPORT / d" \
-e "/^CONFIG_HT3_SUPPORT / d" \
$A/old > $A/old.tmp && mv $A/old.tmp $A/old
fi
if [ `grep -c "^CONFIG_NORTHBRIDGE_AMD_AMDK8" $A/new` -eq 0 ]; then
sed \
-e "/^CONFIG_K8_HT_FREQ_1G_SUPPORT / d" \
$A/old > $A/old.tmp && mv $A/old.tmp $A/old
fi
if [ `grep -c "^CONFIG_NORTHBRIDGE_AMD_AMDFAM10" $A/new` -eq 0 -a `grep -c "^CONFIG_NORTHBRIDGE_AMD_AMDK8" $A/new` -eq 0 ]; then
sed \
-e "/^CONFIG_APIC_ID_OFFSET / d" \
-e "/^CONFIG_CPU_SOCKET_TYPE / d" \
-e "/^CONFIG_DIMM_SUPPORT / d" \
-e "/^CONFIG_HT_CHAIN_UNITID_BASE / d" \
-e "/^CONFIG_HT_CHAIN_END_UNITID_BASE / d" \
-e "/^CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC / d" \
-e "/^CONFIG_HW_MEM_HOLE_SIZEK / d" \
-e "/^CONFIG_MEM_TRAIN_SEQ / d" \
-e "/^CONFIG_SB_HT_CHAIN_ON_BUS0 / d" \
-e "/^CONFIG_SB_HT_CHAIN_UNITID_OFFSET_ONLY / d" \
$A/old > $A/old.tmp && mv $A/old.tmp $A/old
fi
sed \
-e "/^CONFIG_CONSOLE_/ d" \
-e "/^CONFIG_MAXIMUM_CONSOLE_LOGLEVEL/ d" \
-e "/^CONFIG_DEFAULT_CONSOLE_LOGLEVEL/ d" \
-e "/^CONFIG_RESET_/ d" \
-e "/^CONFIG_XIP_ROM_/ d" \
-e "/^CONFIG_PRECOMPRESSED_PAYLOAD / d" \
-e "/^CONFIG_K8_MEM_BANK_B_ONLY / d" \
-e "/^CONFIG_MULTIBOOT / d" \
-e "/^CONFIG_ARCH_POWERPC / d" \
-e "/^CONFIG_RESET / d" \
-e "/^CONFIG_ROM_PAYLOAD / d" \
-e "/^CONFIG_ROM_SECTION_/ d" \
-e "/^CONFIG_UNCOMPRESSED / d" \
-e "/^CONFIG_COMPRESS / d" \
-e "/^CONFIG_COMPRESSED_PAYLOAD_LZMA / d" \
-e "/^CONFIG_ASSEMBLER_DEBUG / d" \
-e "/^CONFIG_HAVE_FAILOVER_BOOT / d" \
-e "/^CONFIG_FAILOVER_SIZE / d" \
-e "/^CONFIG_FALLBACK_SIZE / d" \
-e "/^CONFIG_ROMBASE / d" \
-e "/^CONFIG_ROM_IMAGE_SIZE / d" \
-e "/^CONFIG_STACK_SIZE / d" \
-e "/^CONFIG_GDB_STUB / d" \
-e "/^CONFIG_VIDEO_MB / d" \
-e "/^CONFIG_HAVE_MOVNTI / d" \
-e "/^CONFIG_PCIE_CONFIGSPACE_HOLE / d" \
$A/old > $A/old.filtered
sed \
-e "/^CONFIG_VENDOR_/ d" \
-e "/^CONFIG_ARCH_POWERPC / d" \
-e "/^CONFIG_MAXIMUM_CONSOLE_LOGLEVEL/ d" \
-e "/^CONFIG_DEFAULT_CONSOLE_LOGLEVEL/ d" \
-e "/^CONFIG_COREBOOT_ROMSIZE_/ d" \
-e "/^CONFIG_BOARD_/ d" \
-e "/^CONFIG_HAVE_MOVNTI / d" \
-e "/^CONFIG_[NORTHSOUTH]*BRIDGE_/ d" \
-e "/^CONFIG_SUPERIO_/ d" \
-e "/^CONFIG_GX1_VIDEOMODE_/ d" \
-e "/^CONFIG_CONSOLE_/ d" \
-e "/^CONFIG_PAYLOAD_/ d" \
-e "/^CONFIG_XIP_ROM_/ d" \
-e "/^CONFIG_MULTIBOOT/ d" \
-e "/^CONFIG_HAVE_FAILOVER_BOOT / d" \
-e "/^CONFIG_COMPRESSED_PAYLOAD_LZMA / d" \
-e "/^CONFIG_CPU_[A-Z]*_MODEL_/ d" \
-e "/^CONFIG_CPU_[A-Z]*_SOCKET_/ d" \
-e "/^CONFIG_CPU_AMD_/ d" \
-e "/^CONFIG_CPU_INTEL_/ d" \
-e "/^CONFIG_CPU_VIA_/ d" \
-e "/^CONFIG_ROMBASE / d" \
-e "/^CONFIG_ROM_IMAGE_SIZE / d" \
-e "/^CONFIG_STACK_SIZE / d" \
-e "/^CONFIG_GDB_STUB / d" \
-e "/^CONFIG_VIDEO_MB / d" \
-e "/^CONFIG_EXPERT / d" \
-e "/^CONFIG_SSE / d" \
-e "/^CONFIG_MMX / d" \
-e "/^CONFIG_VGA_BIOS / d" \
-e "/^CONFIG_WARNINGS_ARE_ERRORS / d" \
-e "/^CONFIG_TINY_BOOTBLOCK / d" \
-e "/^CONFIG_BIG_BOOTBLOCK / d" \
-e "/^CONFIG_BOOTBLOCK_NORTHBRIDGE_INIT / d" \
-e "/^CONFIG_BOOTBLOCK_SOUTHBRIDGE_INIT / d" \
$A/new > $A/new.filtered
normalize $A/old.filtered > $A/old.normalized
normalize $A/new.filtered > $A/new.normalized
diff -u $A/old.normalized $A/new.normalized | \
grep ^[+-][^+-] | \
sed -e "s,^+,p ," -e "s,^-,m ," | \
sort -k2,2 -k1,1 | \
sed -e "s,^p ,+," -e "s,^m ,-," | \
while read line; do
key=`echo $line|cut -f1 -d\=`
value=`echo $line|cut -f2 -d\= | tr -d \\;`
printf "%s = 0x%x\n" "$key" $value
done
rm -rf $A

View File

@@ -1,68 +0,0 @@
#!/bin/sh
#
# coreboot autobuilder for kconfig
#
# This script builds coreboot images for all available targets.
#
# (C) 2009 coresystems GmbH
# written by Patrick Georgi <patrick.georgi@coresystems.de>
#
# This file is subject to the terms and conditions of the GNU General
# Public License, version 2. See the file COPYING in the main directory
# of this archive for more details.
TARGETDIR=kbuildall.results
BOARD=$1
if [ ! -f util/kbuildall/kbuildall ]; then
echo "This application must be run from the"
echo "toplevel directory of a coreboot checkout."
exit 1
fi
for make in make gmake gnumake; do
if [ "`$make --version 2>/dev/null | grep -c GNU`" -gt 0 ]; then
MAKE=$make
break
fi
done
builddefconfig() {
# $1: mainboarddir
$MAKE distclean
grep "depends[\t ]on[\t ]*VENDOR" src/mainboard/$1/../Kconfig | sed "s,^.*\(VENDOR_.*\)[^A-Z0-9_]*,CONFIG_\1=y," > .config
grep "config[\t ]*BOARD" src/mainboard/$1/Kconfig | sed "s,^.*\(BOARD_.*\)[^A-Z0-9_]*,CONFIG_\1=y," >> .config
grep "select[\t ]*ARCH" src/mainboard/$1/Kconfig | sed "s,^.*\(ARCH_.*\)[^A-Z0-9_]*,CONFIG_\1=y," >> .config
echo "CONFIG_MAINBOARD_DIR=$1" >> .config
yes "" | $MAKE oldconfig
}
ALLTARGETS=`(cd src/mainboard; ls */*/Config.lb | sed s,/Config.lb,,)`
TARGETCOUNT=`echo $ALLTARGETS | wc -w`
if [ -n "$BOARD" ]; then
TARGETCOUNT=1
ALLTARGETS=$BOARD
else
rm -rf $TARGETDIR
fi
mkdir -p $TARGETDIR
i=0
for dir in $ALLTARGETS; do
i=`expr $i + 1`
if [ ! -f src/mainboard/$dir/Kconfig ]; then
echo "[$i/$TARGETCOUNT] ($dir) no Kconfig"
echo "$dir nokconfig" >> $TARGETDIR/_overview.txt
continue
fi
name=`echo $dir | sed s,/,_,g`
printf "[$i/$TARGETCOUNT] $dir "
builddefconfig $dir > $TARGETDIR/$name.buildconfig.log 2>&1
result=`$MAKE > $TARGETDIR/$name.buildcoreboot.log 2>&1 && echo ok || echo fail`
echo "$result."
if [ "$result" = "ok" ]; then
util/abuild/abuild -o -C -t $dir
sh util/compareboard/compareboard $dir | tee $TARGETDIR/$name.variables.txt
fi
echo "$dir $result" >> $TARGETDIR/_overview.txt
done

View File

@@ -1,18 +0,0 @@
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,31 +0,0 @@
ALL: $(shell echo *.g | sed s/\\.g/\\.py/g )
%.py: %.g yapps2.py yappsrt.py Makefile
python yapps2.py $<
DOC: yapps2.ps yapps2.pdf manual/index.html
yapps2.ps: yapps2.dvi
dvips -q yapps2.dvi -o yapps2.ps
yapps2.pdf: yapps2.ps
ps2pdf yapps2.ps
yapps2.dvi: yapps2.tex
latex yapps2.tex
manual/index.html: yapps2.aux yapps2.tex
rm manual/yapps2.css
latex2html -dir 'manual' -mkdir -lcase_tags -font_size 12pt -split 4 -toc_depth 4 -html_version 4.0,unicode,table -t 'Yapps 2.0 Manual' -address 'Amit J Patel, amitp@cs.stanford.edu' -info 0 -show_section_numbers -up_title 'Yapps Page' -up_url 'http://theory.stanford.edu/~amitp/yapps/' -strict -image_type png yapps2.tex
echo '@import url("http://www-cs-students.stanford.edu/~amitp/amitp.css");' > manual/yapps2-new.css
echo 'hr { display:none; }' >> manual/yapps2-new.css
echo 'h1 br, h2 br { display:none; }' >>manual/yapps2-new.css
cat manual/yapps2.css >> manual/yapps2-new.css
rm manual/yapps2.css
mv manual/yapps2-new.css manual/yapps2.css
DISTRIB:
cd ..; zip -u yapps2.zip yapps2/{LICENSE,yapps2.py,yappsrt.py,parsedesc.g,examples/*.g,NOTES,yapps2.tex,Makefile,manual/*.html,manual/*.css,manual/*.png}
clean:
rm -f config.py yappsrt.pyc parsedesc.py

View File

@@ -1,46 +0,0 @@
April 14, 2002:
I haven't worked on Yapps for a while, mainly because I spent all my energy
on trying to graduate. Now that I've finished school, I have several projects
I want to start working on again, including Yapps.
Notes for myself:
Add a debugging mode that helps you understand how the grammar
is constructed and how things are being parsed
Look into an English output mode that would use natural language
to describe a grammar
Optimize unused variables
Add a convenience to automatically gather up the values returned
from subpatterns, put them into a list, and return them
Improve the documentation
Write some larger examples
Get rid of old-style regex support
Use SRE's lex support to speed up lexing (this may be hard given that
yapps allows for context-sensitive lexers)
Look over Dan Connoly's experience with Yapps (bugs, frustrations, etc.)
and see what improvements could be made
Add something to pretty-print the grammar (without the actions)
Maybe conditionals? Follow this rule only if <condition> holds.
But this would be useful mainly when multiple rules match, and we
want the first matching rule. The conditional would mean we skip to
the next rule. Maybe this is part of the attribute grammar system,
where rule X<0> can be specified separately from X<N>.
Convenience functions that could build return values for all rules
without specifying the code for each rule individually
Patterns (abstractions over rules) -- for example, comma separated values
have a certain rule pattern that gets replicated all over the place
"Gather" mode that simply outputs the return values for certain nodes.
For example, if you just want all expressions, you could ask yapps
to gather the results of the 'expr' rule into a list. This would
ignore all the higher level structure.
Look at everyone's Yapps grammars, and come up with larger examples
http://www.w3.org/2000/10/swap/SemEnglish.g
http://www.w3.org/2000/10/swap/kifExpr.g
http://www.w3.org/2000/10/swap/rdfn3.g
It would be nice if you could feed text into Yapps (push model) instead
of Yapps reading text out of a string (pull model). However, I think
that would make the resulting parser code mostly unreadable
(like yacc, etc.). Coroutines/stacklesspython may be the answer.

File diff suppressed because it is too large Load Diff

View File

@@ -1,196 +0,0 @@
######################################################################
# The remainder of this file is from parsedesc.{g,py}
def append(lst, x):
"Imperative append"
lst.append(x)
return lst
def add_inline_token(tokens, str):
tokens.insert( 0, (str, eval(str, {}, {})) )
return Terminal(str)
def cleanup_choice(lst):
if len(lst) == 0: return Sequence([])
if len(lst) == 1: return lst[0]
return apply(Choice, tuple(lst))
def cleanup_sequence(lst):
if len(lst) == 1: return lst[0]
return apply(Sequence, tuple(lst))
def cleanup_rep(node, rep):
if rep == 'star': return Star(node)
elif rep == 'plus': return Plus(node)
else: return node
def resolve_name(tokens, id, args):
if id in map(lambda x: x[0], tokens):
# It's a token
if args:
print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
return Terminal(id)
else:
# It's a name, so assume it's a nonterminal
return NonTerminal(id, args)
%%
parser ParserDescription:
option: "context-insensitive-scanner"
ignore: "[ \t\r\n]+"
ignore: "#.*?\r?\n"
token END: "$"
token ATTR: "<<.+?>>"
token STMT: "{{.+?}}"
token ID: '[a-zA-Z_][a-zA-Z_0-9]*'
token STR: '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'
token LP: '\\('
token RP: '\\)'
token LB: '\\['
token RB: '\\]'
token OR: '[|]'
token STAR: '[*]'
token PLUS: '[+]'
token QUEST: '[?]'
token COLON: ':'
rule Parser: "parser" ID ":"
Options
Tokens
Rules<<Tokens>>
END
{{ return Generator(ID,Options,Tokens,Rules) }}
rule Options: {{ opt = {} }}
( "option" ":" Str {{ opt[Str] = 1 }} )*
{{ return opt }}
rule Tokens: {{ tok = [] }}
(
"token" ID ":" Str {{ tok.append( (ID,Str) ) }}
| "ignore" ":" Str {{ tok.append( ('#ignore',Str) ) }}
)*
{{ return tok }}
rule Rules<<tokens>>:
{{ rul = [] }}
(
"rule" ID OptParam ":" ClauseA<<tokens>>
{{ rul.append( (ID,OptParam,ClauseA) ) }}
)*
{{ return rul }}
rule ClauseA<<tokens>>:
ClauseB<<tokens>>
{{ v = [ClauseB] }}
( OR ClauseB<<tokens>> {{ v.append(ClauseB) }} )*
{{ return cleanup_choice(v) }}
rule ClauseB<<tokens>>:
{{ v = [] }}
( ClauseC<<tokens>> {{ v.append(ClauseC) }} )*
{{ return cleanup_sequence(v) }}
rule ClauseC<<tokens>>:
ClauseD<<tokens>>
( PLUS {{ return Plus(ClauseD) }}
| STAR {{ return Star(ClauseD) }}
| {{ return ClauseD }} )
rule ClauseD<<tokens>>:
STR {{ t = (STR, eval(STR,{},{})) }}
{{ if t not in tokens: tokens.insert( 0, t ) }}
{{ return Terminal(STR) }}
| ID OptParam {{ return resolve_name(tokens, ID, OptParam) }}
| LP ClauseA<<tokens>> RP {{ return ClauseA }}
| LB ClauseA<<tokens>> RB {{ return Option(ClauseA) }}
| STMT {{ return Eval(STMT[2:-2]) }}
rule OptParam: [ ATTR {{ return ATTR[2:-2] }} ] {{ return '' }}
rule Str: STR {{ return eval(STR,{},{}) }}
%%
# This replaces the default main routine
yapps_options = [
('context-insensitive-scanner', 'context-insensitive-scanner',
'Scan all tokens (see docs)')
]
def generate(inputfilename, outputfilename='', dump=0, **flags):
"""Generate a grammar, given an input filename (X.g)
and an output filename (defaulting to X.py)."""
if not outputfilename:
if inputfilename[-2:]=='.g': outputfilename = inputfilename[:-2]+'.py'
else: raise "Invalid Filename", outputfilename
print 'Input Grammar:', inputfilename
print 'Output File:', outputfilename
DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
preparser, postparser = None, None # Code before and after the parser desc
# Read the entire file
s = open(inputfilename,'r').read()
# See if there's a separation between the pre-parser and parser
f = find(s, DIVIDER)
if f >= 0: preparser, s = s[:f]+'\n\n', s[f+len(DIVIDER):]
# See if there's a separation between the parser and post-parser
f = find(s, DIVIDER)
if f >= 0: s, postparser = s[:f], '\n\n'+s[f+len(DIVIDER):]
# Create the parser and scanner
p = ParserDescription(ParserDescriptionScanner(s))
if not p: return
# Now parse the file
t = wrap_error_reporter(p, 'Parser')
if not t: return # Error
if preparser is not None: t.preparser = preparser
if postparser is not None: t.postparser = postparser
# Check the options
for f in t.options.keys():
for opt,_,_ in yapps_options:
if f == opt: break
else:
print 'Warning: unrecognized option', f
# Add command line options to the set
for f in flags.keys(): t.options[f] = flags[f]
# Generate the output
if dump:
t.dump_information()
else:
t.output = open(outputfilename, 'w')
t.generate_output()
if __name__=='__main__':
import sys, getopt
optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['dump'])
if not args or len(args) > 2:
print 'Usage:'
print ' python', sys.argv[0], '[flags] input.g [output.py]'
print 'Flags:'
print (' --dump' + ' '*40)[:35] + 'Dump out grammar information'
for flag, _, doc in yapps_options:
print (' -f' + flag + ' '*40)[:35] + doc
else:
# Read in the options and create a list of flags
flags = {}
for opt in optlist:
for flag, name, _ in yapps_options:
if opt == ('-f', flag):
flags[name] = 1
break
else:
if opt == ('--dump', ''):
flags['dump'] = 1
else:
print 'Warning - unrecognized option: ', opt[0], opt[1]
apply(generate, tuple(args), flags)

View File

@@ -1,6 +0,0 @@
target x
mainboard amd/solo
# option X=1
# makerule x y "z"
payload /dev/null
end

View File

@@ -1,779 +0,0 @@
# Yapps 2.0 - yet another python parser system
# Amit J Patel, January 1999
# See http://theory.stanford.edu/~amitp/Yapps/ for documentation and updates
# v2.0.1 changes (October 2001):
# * The exceptions inherit the standard Exception class (thanks Rich Salz)
# * The scanner can use either a different set of regular expressions
# per instance, or allows the subclass to define class fields with
# the patterns. This improves performance when many Scanner objects
# are being created, because the regular expressions don't have to
# be recompiled each time. (thanks Amaury Forgeot d'Arc)
# v2.0.2 changes (April 2002)
# * Fixed a bug in generating the 'else' clause when the comment was too
# long. v2.0.1 was missing a newline. (thanks Steven Engelhardt)
# v2.0.3 changes (August 2002)
# * Fixed a bug with inline tokens using the r"" syntax.
from string import *
from yappsrt import *
import re
INDENT = " "*4
class Generator:
def __init__(self, name, options, tokens, rules):
self.change_count = 0
self.name = name
self.options = options
self.preparser = ''
self.postparser = None
self.tokens = {} # Map from tokens to regexps
self.ignore = [] # List of token names to ignore in parsing
self.terminals = [] # List of token names (to maintain ordering)
for n,t in tokens:
if n == '#ignore':
n = t
self.ignore.append(n)
if n in self.tokens.keys() and self.tokens[n] != t:
print 'Warning: token', n, 'multiply defined.'
self.tokens[n] = t
self.terminals.append(n)
self.rules = {} # Map from rule names to parser nodes
self.params = {} # Map from rule names to parameters
self.goals = [] # List of rule names (to maintain ordering)
for n,p,r in rules:
self.params[n] = p
self.rules[n] = r
self.goals.append(n)
import sys
self.output = sys.stdout
def __getitem__(self, name):
# Get options
return self.options.get(name, 0)
def non_ignored_tokens(self):
return filter(lambda x, i=self.ignore: x not in i, self.terminals)
def changed(self):
self.change_count = 1+self.change_count
def subset(self, a, b):
"See if all elements of a are inside b"
for x in a:
if x not in b: return 0
return 1
def equal_set(self, a, b):
"See if a and b have the same elements"
if len(a) != len(b): return 0
if a == b: return 1
return self.subset(a, b) and self.subset(b, a)
def add_to(self, parent, additions):
"Modify parent to include all elements in additions"
for x in additions:
if x not in parent:
parent.append(x)
self.changed()
def equate(self, a, b):
self.add_to(a, b)
self.add_to(b, a)
def write(self, *args):
for a in args:
self.output.write(a)
def in_test(self, x, full, b):
if not b: return '0'
if len(b)==1: return '%s == %s' % (x, `b[0]`)
if full and len(b) > len(full)/2:
# Reverse the sense of the test.
not_b = filter(lambda x, b=b: x not in b, full)
return self.not_in_test(x, full, not_b)
return '%s in %s' % (x, `b`)
def not_in_test(self, x, full, b):
if not b: return '1'
if len(b)==1: return '%s != %s' % (x, `b[0]`)
return '%s not in %s' % (x, `b`)
def peek_call(self, a):
a_set = (`a`[1:-1])
if self.equal_set(a, self.non_ignored_tokens()): a_set = ''
if self['context-insensitive-scanner']: a_set = ''
return 'self._peek(%s)' % a_set
def peek_test(self, a, b):
if self.subset(a, b): return '1'
if self['context-insensitive-scanner']: a = self.non_ignored_tokens()
return self.in_test(self.peek_call(a), a, b)
def not_peek_test(self, a, b):
if self.subset(a, b): return '0'
return self.not_in_test(self.peek_call(a), a, b)
def calculate(self):
while 1:
for r in self.goals:
self.rules[r].setup(self, r)
if self.change_count == 0: break
self.change_count = 0
while 1:
for r in self.goals:
self.rules[r].update(self)
if self.change_count == 0: break
self.change_count = 0
def dump_information(self):
self.calculate()
for r in self.goals:
print ' _____' + '_'*len(r)
print ('___/Rule '+r+'\\' + '_'*80)[:79]
queue = [self.rules[r]]
while queue:
top = queue[0]
del queue[0]
print `top`
top.first.sort()
top.follow.sort()
eps = []
if top.accepts_epsilon: eps = ['(null)']
print ' FIRST:', join(top.first+eps, ', ')
print ' FOLLOW:', join(top.follow, ', ')
for x in top.get_children(): queue.append(x)
def generate_output(self):
self.calculate()
self.write(self.preparser)
self.write("from string import *\n")
self.write("import re\n")
self.write("from yappsrt import *\n")
self.write("\n")
self.write("class ", self.name, "Scanner(Scanner):\n")
self.write(" patterns = [\n")
for p in self.terminals:
self.write(" (%s, re.compile(%s)),\n" % (
`p`, `self.tokens[p]`))
self.write(" ]\n")
self.write(" def __init__(self, str):\n")
self.write(" Scanner.__init__(self,None,%s,str)\n" %
`self.ignore`)
self.write("\n")
self.write("class ", self.name, "(Parser):\n")
for r in self.goals:
self.write(INDENT, "def ", r, "(self")
if self.params[r]: self.write(", ", self.params[r])
self.write("):\n")
self.rules[r].output(self, INDENT+INDENT)
self.write("\n")
self.write("\n")
self.write("def parse(rule, text):\n")
self.write(" P = ", self.name, "(", self.name, "Scanner(text))\n")
self.write(" return wrap_error_reporter(P, rule)\n")
self.write("\n")
if self.postparser is not None:
self.write(self.postparser)
else:
self.write("if __name__=='__main__':\n")
self.write(INDENT, "from sys import argv, stdin\n")
self.write(INDENT, "if len(argv) >= 2:\n")
self.write(INDENT*2, "if len(argv) >= 3:\n")
self.write(INDENT*3, "f = open(argv[2],'r')\n")
self.write(INDENT*2, "else:\n")
self.write(INDENT*3, "f = stdin\n")
self.write(INDENT*2, "print parse(argv[1], f.read())\n")
self.write(INDENT, "else: print 'Args: <rule> [<filename>]'\n")
######################################################################
class Node:
def __init__(self):
self.first = []
self.follow = []
self.accepts_epsilon = 0
self.rule = '?'
def setup(self, gen, rule):
# Setup will change accepts_epsilon,
# sometimes from 0 to 1 but never 1 to 0.
# It will take a finite number of steps to set things up
self.rule = rule
def used(self, vars):
"Return two lists: one of vars used, and the other of vars assigned"
return vars, []
def get_children(self):
"Return a list of sub-nodes"
return []
def __repr__(self):
return str(self)
def update(self, gen):
if self.accepts_epsilon:
gen.add_to(self.first, self.follow)
def output(self, gen, indent):
"Write out code to _gen_ with _indent_:string indentation"
gen.write(indent, "assert 0 # Invalid parser node\n")
class Terminal(Node):
def __init__(self, token):
Node.__init__(self)
self.token = token
self.accepts_epsilon = 0
def __str__(self):
return self.token
def update(self, gen):
Node.update(self, gen)
if self.first != [self.token]:
self.first = [self.token]
gen.changed()
def output(self, gen, indent):
gen.write(indent)
if re.match('[a-zA-Z_]+$', self.token):
gen.write(self.token, " = ")
gen.write("self._scan(%s)\n" % `self.token`)
class Eval(Node):
def __init__(self, expr):
Node.__init__(self)
self.expr = expr
def setup(self, gen, rule):
Node.setup(self, gen, rule)
if not self.accepts_epsilon:
self.accepts_epsilon = 1
gen.changed()
def __str__(self):
return '{{ %s }}' % strip(self.expr)
def output(self, gen, indent):
gen.write(indent, strip(self.expr), '\n')
class NonTerminal(Node):
def __init__(self, name, args):
Node.__init__(self)
self.name = name
self.args = args
def setup(self, gen, rule):
Node.setup(self, gen, rule)
try:
self.target = gen.rules[self.name]
if self.accepts_epsilon != self.target.accepts_epsilon:
self.accepts_epsilon = self.target.accepts_epsilon
gen.changed()
except KeyError: # Oops, it's nonexistent
print 'Error: no rule <%s>' % self.name
self.target = self
def __str__(self):
return '<%s>' % self.name
def update(self, gen):
Node.update(self, gen)
gen.equate(self.first, self.target.first)
gen.equate(self.follow, self.target.follow)
def output(self, gen, indent):
gen.write(indent)
gen.write(self.name, " = ")
gen.write("self.", self.name, "(", self.args, ")\n")
class Sequence(Node):
def __init__(self, *children):
Node.__init__(self)
self.children = children
def setup(self, gen, rule):
Node.setup(self, gen, rule)
for c in self.children: c.setup(gen, rule)
if not self.accepts_epsilon:
# If it's not already accepting epsilon, it might now do so.
for c in self.children:
# any non-epsilon means all is non-epsilon
if not c.accepts_epsilon: break
else:
self.accepts_epsilon = 1
gen.changed()
def get_children(self):
return self.children
def __str__(self):
return '( %s )' % join(map(lambda x: str(x), self.children))
def update(self, gen):
Node.update(self, gen)
for g in self.children:
g.update(gen)
empty = 1
for g_i in range(len(self.children)):
g = self.children[g_i]
if empty: gen.add_to(self.first, g.first)
if not g.accepts_epsilon: empty = 0
if g_i == len(self.children)-1:
next = self.follow
else:
next = self.children[1+g_i].first
gen.add_to(g.follow, next)
if self.children:
gen.add_to(self.follow, self.children[-1].follow)
def output(self, gen, indent):
if self.children:
for c in self.children:
c.output(gen, indent)
else:
# Placeholder for empty sequences, just in case
gen.write(indent, 'pass\n')
class Choice(Node):
def __init__(self, *children):
Node.__init__(self)
self.children = children
def setup(self, gen, rule):
Node.setup(self, gen, rule)
for c in self.children: c.setup(gen, rule)
if not self.accepts_epsilon:
for c in self.children:
if c.accepts_epsilon:
self.accepts_epsilon = 1
gen.changed()
def get_children(self):
return self.children
def __str__(self):
return '( %s )' % join(map(lambda x: str(x), self.children), ' | ')
def update(self, gen):
Node.update(self, gen)
for g in self.children:
g.update(gen)
for g in self.children:
gen.add_to(self.first, g.first)
gen.add_to(self.follow, g.follow)
for g in self.children:
gen.add_to(g.follow, self.follow)
if self.accepts_epsilon:
gen.add_to(self.first, self.follow)
def output(self, gen, indent):
test = "if"
gen.write(indent, "_token_ = ", gen.peek_call(self.first), "\n")
tokens_seen = []
tokens_unseen = self.first[:]
if gen['context-insensitive-scanner']:
# Context insensitive scanners can return ANY token,
# not only the ones in first.
tokens_unseen = gen.non_ignored_tokens()
for c in self.children:
testset = c.first[:]
removed = []
for x in testset:
if x in tokens_seen:
testset.remove(x)
removed.append(x)
if x in tokens_unseen: tokens_unseen.remove(x)
tokens_seen = tokens_seen + testset
if removed:
if not testset:
print 'Error in rule', self.rule+':', c, 'never matches.'
else:
print 'Warning:', self
print ' * These tokens are being ignored:', join(removed, ', ')
print ' due to previous choices using them.'
if testset:
if not tokens_unseen: # context sensitive scanners only!
if test=='if':
# if it's the first AND last test, then
# we can simply put the code without an if/else
c.output(gen, indent)
else:
gen.write(indent, "else: ")
t = gen.in_test('', [], testset)
if len(t) < 70-len(indent):
gen.write("#", t)
gen.write("\n")
c.output(gen, indent+INDENT)
else:
gen.write(indent, test, " ",
gen.in_test('_token_', tokens_unseen, testset),
":\n")
c.output(gen, indent+INDENT)
test = "elif"
if gen['context-insensitive-scanner'] and tokens_unseen:
gen.write(indent, "else:\n")
gen.write(indent, INDENT, "raise SyntaxError(self._pos, ")
gen.write("'Could not match ", self.rule, "')\n")
class Wrapper(Node):
def __init__(self, child):
Node.__init__(self)
self.child = child
def setup(self, gen, rule):
Node.setup(self, gen, rule)
self.child.setup(gen, rule)
def get_children(self):
return [self.child]
def update(self, gen):
Node.update(self, gen)
self.child.update(gen)
gen.add_to(self.first, self.child.first)
gen.equate(self.follow, self.child.follow)
class Option(Wrapper):
def setup(self, gen, rule):
Wrapper.setup(self, gen, rule)
if not self.accepts_epsilon:
self.accepts_epsilon = 1
gen.changed()
def __str__(self):
return '[ %s ]' % str(self.child)
def output(self, gen, indent):
if self.child.accepts_epsilon:
print 'Warning in rule', self.rule+': contents may be empty.'
gen.write(indent, "if %s:\n" %
gen.peek_test(self.first, self.child.first))
self.child.output(gen, indent+INDENT)
class Plus(Wrapper):
def setup(self, gen, rule):
Wrapper.setup(self, gen, rule)
if self.accepts_epsilon != self.child.accepts_epsilon:
self.accepts_epsilon = self.child.accepts_epsilon
gen.changed()
def __str__(self):
return '%s+' % str(self.child)
def update(self, gen):
Wrapper.update(self, gen)
gen.add_to(self.follow, self.first)
def output(self, gen, indent):
if self.child.accepts_epsilon:
print 'Warning in rule', self.rule+':'
print ' * The repeated pattern could be empty. The resulting'
print ' parser may not work properly.'
gen.write(indent, "while 1:\n")
self.child.output(gen, indent+INDENT)
union = self.first[:]
gen.add_to(union, self.follow)
gen.write(indent+INDENT, "if %s: break\n" %
gen.not_peek_test(union, self.child.first))
class Star(Plus):
def setup(self, gen, rule):
Wrapper.setup(self, gen, rule)
if not self.accepts_epsilon:
self.accepts_epsilon = 1
gen.changed()
def __str__(self):
return '%s*' % str(self.child)
def output(self, gen, indent):
if self.child.accepts_epsilon:
print 'Warning in rule', self.rule+':'
print ' * The repeated pattern could be empty. The resulting'
print ' parser probably will not work properly.'
gen.write(indent, "while %s:\n" %
gen.peek_test(self.follow, self.child.first))
self.child.output(gen, indent+INDENT)
######################################################################
# The remainder of this file is from parsedesc.{g,py}
def append(lst, x):
"Imperative append"
lst.append(x)
return lst
def add_inline_token(tokens, str):
tokens.insert( 0, (str, eval(str, {}, {})) )
return Terminal(str)
def cleanup_choice(lst):
if len(lst) == 0: return Sequence([])
if len(lst) == 1: return lst[0]
return apply(Choice, tuple(lst))
def cleanup_sequence(lst):
if len(lst) == 1: return lst[0]
return apply(Sequence, tuple(lst))
def cleanup_rep(node, rep):
if rep == 'star': return Star(node)
elif rep == 'plus': return Plus(node)
else: return node
def resolve_name(tokens, id, args):
if id in map(lambda x: x[0], tokens):
# It's a token
if args:
print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
return Terminal(id)
else:
# It's a name, so assume it's a nonterminal
return NonTerminal(id, args)
from string import *
import re
from yappsrt import *
class ParserDescriptionScanner(Scanner):
def __init__(self, str):
Scanner.__init__(self,[
('"rule"', 'rule'),
('"ignore"', 'ignore'),
('"token"', 'token'),
('"option"', 'option'),
('":"', ':'),
('"parser"', 'parser'),
('[ \011\015\012]+', '[ \011\015\012]+'),
('#.*?\015?\012', '#.*?\015?\012'),
('END', '$'),
('ATTR', '<<.+?>>'),
('STMT', '{{.+?}}'),
('ID', '[a-zA-Z_][a-zA-Z_0-9]*'),
('STR', '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'),
('LP', '\\('),
('RP', '\\)'),
('LB', '\\['),
('RB', '\\]'),
('OR', '[|]'),
('STAR', '[*]'),
('PLUS', '[+]'),
], ['[ \011\015\012]+', '#.*?\015?\012'], str)
class ParserDescription(Parser):
def Parser(self):
self._scan('"parser"')
ID = self._scan('ID')
self._scan('":"')
Options = self.Options()
Tokens = self.Tokens()
Rules = self.Rules(Tokens)
END = self._scan('END')
return Generator(ID,Options,Tokens,Rules)
def Options(self):
opt = {}
while self._peek('"option"', '"token"', '"ignore"', 'END', '"rule"') == '"option"':
self._scan('"option"')
self._scan('":"')
Str = self.Str()
opt[Str] = 1
return opt
def Tokens(self):
tok = []
while self._peek('"token"', '"ignore"', 'END', '"rule"') in ['"token"', '"ignore"']:
_token_ = self._peek('"token"', '"ignore"')
if _token_ == '"token"':
self._scan('"token"')
ID = self._scan('ID')
self._scan('":"')
Str = self.Str()
tok.append( (ID,Str) )
else: # == '"ignore"'
self._scan('"ignore"')
self._scan('":"')
Str = self.Str()
tok.append( ('#ignore',Str) )
return tok
def Rules(self, tokens):
rul = []
while self._peek('"rule"', 'END') == '"rule"':
self._scan('"rule"')
ID = self._scan('ID')
OptParam = self.OptParam()
self._scan('":"')
ClauseA = self.ClauseA(tokens)
rul.append( (ID,OptParam,ClauseA) )
return rul
def ClauseA(self, tokens):
ClauseB = self.ClauseB(tokens)
v = [ClauseB]
while self._peek('OR', 'RP', 'RB', '"rule"', 'END') == 'OR':
OR = self._scan('OR')
ClauseB = self.ClauseB(tokens)
v.append(ClauseB)
return cleanup_choice(v)
def ClauseB(self, tokens):
v = []
while self._peek('STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END') in ['STR', 'ID', 'LP', 'LB', 'STMT']:
ClauseC = self.ClauseC(tokens)
v.append(ClauseC)
return cleanup_sequence(v)
def ClauseC(self, tokens):
ClauseD = self.ClauseD(tokens)
_token_ = self._peek('PLUS', 'STAR', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END')
if _token_ == 'PLUS':
PLUS = self._scan('PLUS')
return Plus(ClauseD)
elif _token_ == 'STAR':
STAR = self._scan('STAR')
return Star(ClauseD)
else:
return ClauseD
def ClauseD(self, tokens):
_token_ = self._peek('STR', 'ID', 'LP', 'LB', 'STMT')
if _token_ == 'STR':
STR = self._scan('STR')
t = (STR, eval(STR,{},{}))
if t not in tokens: tokens.insert( 0, t )
return Terminal(STR)
elif _token_ == 'ID':
ID = self._scan('ID')
OptParam = self.OptParam()
return resolve_name(tokens, ID, OptParam)
elif _token_ == 'LP':
LP = self._scan('LP')
ClauseA = self.ClauseA(tokens)
RP = self._scan('RP')
return ClauseA
elif _token_ == 'LB':
LB = self._scan('LB')
ClauseA = self.ClauseA(tokens)
RB = self._scan('RB')
return Option(ClauseA)
else: # == 'STMT'
STMT = self._scan('STMT')
return Eval(STMT[2:-2])
def OptParam(self):
if self._peek('ATTR', '":"', 'PLUS', 'STAR', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END') == 'ATTR':
ATTR = self._scan('ATTR')
return ATTR[2:-2]
return ''
def Str(self):
STR = self._scan('STR')
return eval(STR,{},{})
# This replaces the default main routine
yapps_options = [
('context-insensitive-scanner', 'context-insensitive-scanner',
'Scan all tokens (see docs)')
]
def generate(inputfilename, outputfilename='', dump=0, **flags):
"""Generate a grammar, given an input filename (X.g)
and an output filename (defaulting to X.py)."""
if not outputfilename:
if inputfilename[-2:]=='.g': outputfilename = inputfilename[:-2]+'.py'
else: raise "Invalid Filename", outputfilename
print 'Input Grammar:', inputfilename
print 'Output File:', outputfilename
DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
preparser, postparser = None, None # Code before and after the parser desc
# Read the entire file
s = open(inputfilename,'r').read()
# See if there's a separation between the pre-parser and parser
f = find(s, DIVIDER)
if f >= 0: preparser, s = s[:f]+'\n\n', s[f+len(DIVIDER):]
# See if there's a separation between the parser and post-parser
f = find(s, DIVIDER)
if f >= 0: s, postparser = s[:f], '\n\n'+s[f+len(DIVIDER):]
# Create the parser and scanner
p = ParserDescription(ParserDescriptionScanner(s))
if not p: return
# Now parse the file
t = wrap_error_reporter(p, 'Parser')
if not t: return # Error
if preparser is not None: t.preparser = preparser
if postparser is not None: t.postparser = postparser
# Check the options
for f in t.options.keys():
for opt,_,_ in yapps_options:
if f == opt: break
else:
print 'Warning: unrecognized option', f
# Add command line options to the set
for f in flags.keys(): t.options[f] = flags[f]
# Generate the output
if dump:
t.dump_information()
else:
t.output = open(outputfilename, 'w')
t.generate_output()
if __name__=='__main__':
import sys, getopt
optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['dump'])
if not args or len(args) > 2:
print 'Usage:'
print ' python', sys.argv[0], '[flags] input.g [output.py]'
print 'Flags:'
print (' --dump' + ' '*40)[:35] + 'Dump out grammar information'
for flag, _, doc in yapps_options:
print (' -f' + flag + ' '*40)[:35] + doc
else:
# Read in the options and create a list of flags
flags = {}
for opt in optlist:
for flag, name, _ in yapps_options:
if opt == ('-f', flag):
flags[name] = 1
break
else:
if opt == ('--dump', ''):
flags['dump'] = 1
else:
print 'Warning - unrecognized option: ', opt[0], opt[1]
apply(generate, tuple(args), flags)

File diff suppressed because it is too large Load Diff

View File

@@ -1,172 +0,0 @@
# Yapps 2.0 Runtime
#
# This module is needed to run generated parsers.
from string import *
import exceptions
import re
class SyntaxError(Exception):
"""When we run into an unexpected token, this is the exception to use"""
def __init__(self, pos=-1, msg="Bad Token"):
self.pos = pos
self.msg = msg
def __repr__(self):
if self.pos < 0: return "#<syntax-error>"
else: return "SyntaxError[@ char " + `self.pos` + ": " + self.msg + "]"
class NoMoreTokens(Exception):
"""Another exception object, for when we run out of tokens"""
pass
class Scanner:
def __init__(self, patterns, ignore, input):
"""Patterns is [(terminal,regex)...]
Ignore is [terminal,...];
Input is a string"""
self.tokens = []
self.restrictions = []
self.input = input
self.pos = 0
self.ignore = ignore
# The stored patterns are a pair (compiled regex,source
# regex). If the patterns variable passed in to the
# constructor is None, we assume that the class already has a
# proper .patterns list constructed
if patterns is not None:
self.patterns = []
for k,r in patterns:
self.patterns.append( (k, re.compile(r)) )
def token(self, i, restrict=0):
"""Get the i'th token, and if i is one past the end, then scan
for another token; restrict is a list of tokens that
are allowed, or 0 for any token."""
if i == len(self.tokens): self.scan(restrict)
if i < len(self.tokens):
# Make sure the restriction is more restricted
if restrict and self.restrictions[i]:
for r in restrict:
if r not in self.restrictions[i]:
raise "Unimplemented: restriction set changed"
return self.tokens[i]
raise NoMoreTokens()
def __repr__(self):
"""Print the last 10 tokens that have been scanned in"""
output = ''
for t in self.tokens[-10:]:
output = '%s\n (@%s) %s = %s' % (output,t[0],t[2],`t[3]`)
return output
def scan(self, restrict):
"""Should scan another token and add it to the list, self.tokens,
and add the restriction to self.restrictions"""
# Keep looking for a token, ignoring any in self.ignore
while 1:
# Search the patterns for the longest match, with earlier
# tokens in the list having preference
best_match = -1
best_pat = '(error)'
for p, regexp in self.patterns:
# First check to see if we're ignoring this token
if restrict and p not in restrict and p not in self.ignore:
continue
m = regexp.match(self.input, self.pos)
if m and len(m.group(0)) > best_match:
# We got a match that's better than the previous one
best_pat = p
best_match = len(m.group(0))
# If we didn't find anything, raise an error
if best_pat == '(error)' and best_match < 0:
msg = "Bad Token"
if restrict:
msg = "Trying to find one of "+join(restrict,", ")
raise SyntaxError(self.pos, msg)
# If we found something that isn't to be ignored, return it
if best_pat not in self.ignore:
# Create a token with this data
token = (self.pos, self.pos+best_match, best_pat,
self.input[self.pos:self.pos+best_match])
self.pos = self.pos + best_match
# Only add this token if it's not in the list
# (to prevent looping)
if not self.tokens or token != self.tokens[-1]:
self.tokens.append(token)
self.restrictions.append(restrict)
return
else:
# This token should be ignored ..
self.pos = self.pos + best_match
class Parser:
def __init__(self, scanner):
self._scanner = scanner
self._pos = 0
def _peek(self, *types):
"""Returns the token type for lookahead; if there are any args
then the list of args is the set of token types to allow"""
tok = self._scanner.token(self._pos, types)
return tok[2]
def _scan(self, type):
"""Returns the matched text, and moves to the next token"""
tok = self._scanner.token(self._pos, [type])
if tok[2] != type:
raise SyntaxError(tok[0], 'Trying to find '+type)
self._pos = 1+self._pos
return tok[3]
def print_error(input, err, scanner):
"""This is a really dumb long function to print error messages nicely."""
p = err.pos
# Figure out the line number
line = count(input[:p], '\n')
print err.msg+" on line "+`line+1`+":"
# Now try printing part of the line
text = input[max(p-80,0):p+80]
p = p - max(p-80,0)
# Strip to the left
i = rfind(text[:p],'\n')
j = rfind(text[:p],'\r')
if i < 0 or (j < i and j >= 0): i = j
if i >= 0 and i < p:
p = p - i - 1
text = text[i+1:]
# Strip to the right
i = find(text,'\n',p)
j = find(text,'\r',p)
if i < 0 or (j < i and j >= 0): i = j
if i >= 0:
text = text[:i]
# Now shorten the text
while len(text) > 70 and p > 60:
# Cut off 10 chars
text = "..." + text[10:]
p = p - 7
# Now print the string, along with an indicator
print '> ',text
print '> ',' '*p + '^'
print 'List of nearby tokens:', scanner
def wrap_error_reporter(parser, rule):
try: return getattr(parser, rule)()
except SyntaxError, s:
input = parser._scanner.input
try:
print_error(input, s, parser._scanner)
except ImportError:
print 'Syntax Error',s.msg,'on line',1+count(input[:s.pos], '\n')
except NoMoreTokens:
print 'Could not complete parsing; stopped around here:'
print parser._scanner

View File

@@ -1,17 +0,0 @@
uses CONFIG_PCI_OPTION_ROM_RUN_YABEL
uses CONFIG_PCI_OPTION_ROM_RUN_REALMODE
if CONFIG_PCI_OPTION_ROM_RUN_YABEL
dir yabel
dir x86emu
else
if CONFIG_PCI_OPTION_ROM_RUN_REALMODE
object x86.o
object x86_interrupts.o
object x86_asm.S
else
object biosemu.o
dir x86emu
end
end

View File

@@ -1,9 +0,0 @@
makedefine CPPFLAGS += -I$(TOP)/util/x86emu/include
object debug.o
object decode.o
object fpu.o
object ops.o
object ops2.o
object prim_ops.o
object sys.o

View File

@@ -1,9 +0,0 @@
object biosemu.o
object debug.o
object device.o
object interrupt.o
object io.o
object mem.o
object pmm.o
#object vbe.o
dir compat

View File

@@ -1 +0,0 @@
object functions.o