#!/bin/sh
# syncweb -- why pull data at build time when storage is cheap
#
# pulls all the contents from the alexkarle.com repo into the
# appropriate directories and updates the index.gph files.
#
# usage:
#
#   $ ./bin/syncweb.sh ../alexkarle.com
#   $ git commit -am "syncweb"
set -e

die() {
       echo "$*" 1>&2
       exit 1
}

[ -z "$1" ] && die "usage: syncweb /path/to/alexkarle.com-repo"

[ ! -d "$1/.git" ] && die "$1 is not a repo"

web=$1
burrow=$(cd $(dirname "$0") && git rev-parse --show-toplevel)

cp -a "$web"/www/jam-tuesday/20* "$burrow/jam-tuesday"

cp -a "$web"/www/blog/*.txt "$burrow/blog"
cp -a "$web"/www/atom.xml "$burrow/blog"

cd "$burrow"

(cat blog/index.tmpl; ./bin/blogidx.sh) > blog/index.gph
for d in jam-tuesday bin; do
       (cat $d/index.tmpl; ./bin/dirlist $d) > $d/index.gph
done