gcc/maintainer-scripts/update_web_docs

149 lines
3.5 KiB
Plaintext
Raw Normal View History

#!/bin/sh -x
# Run this from /tmp.
CVSROOT=/cvs/gcc
export CVSROOT
PATH=/usr/local/bin:$PATH
WWWBASE=/www/gcc/htdocs
# Process options -rrelease and -ddirectory
RELEASE=""
SUBDIR=""
while [ $# -gt 0 ]; do
case $1 in
-r*)
if [ -n "$RELEASE" ]; then
echo "Multiple releases specified" >&2
exit 1
fi
RELEASE="${1#-r}"
if [ -z "$RELEASE" ]; then
shift
RELEASE="$1"
if [ -z "$RELEASE" ]; then
echo "No release specified with -r" >&2
exit 1
fi
fi
;;
-d*)
if [ -n "$SUBDIR" ]; then
echo "Multiple subdirectories specified" >&2
exit 1
fi
SUBDIR="${1#-d}"
if [ -z "$SUBDIR" ]; then
shift
SUBDIR="$1"
if [ -z "$SUBDIR" ]; then
echo "No subdirectory specified with -d" >&2
exit 1
fi
fi
;;
*)
echo "Unknown argument \"$1\"" >&2
exit 1
;;
esac
shift
done
if [ -n "$RELEASE" ] && [ -z "$SUBDIR" ]; then
echo "Release specified without subdirectory" >&2
exit 1
fi
if [ -z "$SUBDIR" ]; then
DOCSDIR=$WWWBASE/onlinedocs
else
DOCSDIR=$WWWBASE/onlinedocs/$SUBDIR
fi
if [ ! -d $DOCSDIR ]; then
mkdir $DOCSDIR
fi
if [ -z "$RELEASE" ]; then
RELEASE=HEAD
DO_THANKS_HTML=y
else
DO_THANKS_HTML=n
fi
WORKDIR=/tmp/gcc-doc-update.$$
/bin/rm -rf $WORKDIR
/bin/mkdir $WORKDIR
cd $WORKDIR
# Find all the texi files in the repository, except those in directories
# we do not care about (Attic, texinfo, etc).
find $CVSROOT/gcc -name \*.texi,v -print | fgrep -v -f/home/gccadmin/scripts/doc_exclude | sed -e s#$CVSROOT/##g -e s#,v##g > FILES
# Checkout all the texi files and get them into a single directory.
# If we ever have texi files with the same name we'll have to do this
# differently.
cvs -Q co -r$RELEASE `cat FILES`
mv `find . -name \*.texi -print` .
# Now convert the relavent files from texi to html
for file in c-tree cpp chill cppinternals gcc gcj gxxint g77 objc-features porting; do
/usr/local/bin/texi2html -glossary -menu -split_chapter ${file}.texi
done
# Then build a gzipped copy of each of the resulting .html files
for file in *.html; do
cat $file | gzip --best > $file.gz
done
# On the 15th of the month, wipe all the old files from the
# web server.
today=`date +%d`
if test $today = 15; then
find $DOCSDIR -type f -maxdepth 1 -print | grep -v index.html | xargs rm
fi
# And copy the resulting html files to the web server
for file in *.html; do
cat $DOCSDIR/$file |
sed -e '/^<!-- Created on/d' \
-e '/^by <I>GCC Administrator<\/I> on/d' > file1
cat $file |
sed -e '/^<!-- Created on/d' \
-e '/^by <I>GCC Administrator<\/I> on/d' > file2
if cmp -s file1 file2; then
:
else
cp $file ${file}.gz $DOCSDIR
fi
done
news_file=`grep "News About GNU Fortran" $DOCSDIR/g77_toc.html | sed -e '/HREF=.*[^.]/ s#^.*HREF="##g' | sed -e 's%#SEC.*%%g'`
bugs_file=`grep "Known Causes of Trouble with GNU Fortran" $DOCSDIR/g77_toc.html | sed -e '/HREF=.*[^.]/ s#^.*HREF="##g' | sed -e 's%#SEC.*%%g'`
contrib_file=`grep "Contributors to GCC" $DOCSDIR/gcc_toc.html | sed -e '/HREF=.*[^.]/ s#^.*HREF="##g' | sed -e 's%#SEC.*%%g'`
cd $DOCSDIR
rm -f g77_news.html
rm -f g77_bugs.html
rm -f g77_news.html.gz
rm -f g77_bugs.html.gz
ln $news_file g77_news.html
ln $bugs_file g77_bugs.html
ln ${news_file}.gz g77_news.html.gz
ln ${bugs_file}.gz g77_bugs.html.gz
if [ "$DO_THANKS_HTML" = y ]; then
cd $WWWBASE
rm -f thanks.html
rm -f thanks.html.gz
ln onlinedocs/$contrib_file thanks.html
ln onlinedocs/${contrib_file}.gz thanks.html.gz
fi
rm -rf $WORKDIR