#!/bin/bash
## GophHub - Gopher proxy for GitHub API
VERSION=1.2
## Designed for Gophernicus CGI environment
##
## URL parameters:
##
# repo=[ GitHub repo "owner/name" ]
# L If missing, will extract repo "owner/name" and directory/file path from GitHub URL in $SEARCHREQUEST
#
# path=[ full directory/file path within repo ]
#
# file=1
# L Retrieve file instead of directory listing
# file=readme
# L Retrieve HTML-formatted default README file for the repo
#
# raw=1
# L Retrieve raw files
#
# html=1
# L Retrieve HTML-formatted Markdown/ASCIIDoc files, or attempt syntax highlighting with Enscript
#
# num=1
# L Add line numbers to source code files
#
# settings=1
# L Display settings menu
##
## Configuration Options:
##
# Path to store links to programs allowed to run in Bash restricted mode
EXECPATH=/home/freet/public_gopher/gophhub/bin
# Gophermap to display as "homepage", when no repo is specified
MENUMAP=/home/freet/public_gopher/gophhub/menu.gophermap
# CGI script location
CGIPATH='/~freet/cgi-bin/gophhub.sh'
# Gophermap location
GOPHERMAPPATH='/~freet/gophhub/'
# API result cache location (directory will be created if it doesn't exist)
CACHEDIR=/tmp/gophhub
# API result cache time (minutes)
CACHETIME=15
# Maximum file size (in bytes) to offer API file download with
MAXSIZE=512000
# Maximum number of lines from the README text to be shown in repo root directory gophermap
READMEMAX=25
# Maximum width of wrapped text in repo root gophermap ($COLUMNS value is set by Gophernicus)
WRAP=$COLUMNS
# GitHub API token (optional)
https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token
APITOKEN=
#
## BEGIN SCRIPT
##
[ $WRAP -gt 2 ] && WRAP=$[ $WRAP - 1 ] || WRAP=66
# Set up GitHub API request headers
VHEADER='X-GitHub-Api-Version: 2022-11-28'
[ "$APITOKEN" ] && AUTHHEADER="Authorization: Bearer $APITOKEN" || AUTHHEADER=
# Display HTML-rendered text content
htmlview ()
{
fullpath="$repo/$path"
if [ "$1" == "readme.htm" -o "$1" == "mdfile.htm" ]
then
echo -e '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "
http://www.w3.org/TR/html4/loose.dtd">\n'\
'<html>\n<head>\n'\
" <title>GophHub - $repo README</title>\n"\
' <meta name="viewport" content="width=device-width">\n'\
'</head>\n<body>\n'
if [ "$1" == "readme.htm" ]
then
echo "<h1><a href=\"gopher://$SERVER_NAME/1$GOPHERMAPPATH?${carry#&}\">GophHub</a> - <a href=\"gopher://$SERVER_NAME/1$GOPHERMAPPATH?repo=${repo}${carry}\">$repo</a> README</h1><hr>"
else
# Remove file name from path
[ "${fullpath%/*}" == "$repo" ] && urlpath= || urlpath="${path%/*}"
echo -e "<h1><a href=\"gopher://$SERVER_NAME/1$GOPHERMAPPATH?${carry#&}\">GophHub</a> - <a href=\"gopher://$SERVER_NAME/1$GOPHERMAPPATH?repo=${repo}${carry}&path=${urlpath}\">${fullpath%/*}</a>/${path##*/}</h1><hr>\n"\
"<p align=right><a href=\"gopher://$SERVER_NAME/0$CGIPATH?repo=${repo}&file=1&path=${path}\">Raw File</a></p>"
fi
# HTML-viewable file format extensions regexp
# Markdown & ASCIIDoc extensions + source code formats supported by Enscript (in enscript-1.6.6/states/hl/enscript.st)
viewable='\(md\)\?\(markdown\?\)\?\(adoc\)\?\(asciidoc\)\?\(txt\)\?\(c\)\?\(h\)\?\(c++\)\?\(cpp\)\?\(cc\)\?\(cxx\)\?\(m\)\?\(mpl\)\?\(mp\)\?\(maple\)\?\(scm\)\?\(scheme\)\?\(ads\)\?\(adb\)\?\(ada\)\?\(s\)\?\(st\)\?\(MOD\)\?\(DEF\)\?\(mi\)\?\(tcl\)\?\(v\)\?\(vh\)\?\(html\)\?\(vhd\)\?\(vhdl\)\?\(scr\)\?\(syn\)\?\(synth\)\?\(idl\)\?\(hs\)\?\(lhs\)\?\(gs\)\?\(lgs\)\?\(pm\)\?\(pl\)\?\(eps\)\?\(ps\)\?\(py\)\?\(pyx\)\?\(js\)\?\(java\)\?\(pas\)\?\(pp\)\?\(p\)\?\(f\)\?\(f90\)\?\(awk\)\?\(sh\)\?\(vba\)\?\(csh\)\?\(m4\)\?\(il\)\?\(wrl\)\?\(inf\)\?\(tex\)\?\(wmlscript\)\?\(wmls\)\?'
# Remove file name from path (adds leading slash for ascending from first sub-directory in step 5 below)
[ "${fullpath%/*}" == "$repo" ] && urlpath= || urlpath=/"${path%/*}"
# Convert GitHub links in HTML to GophHub/raw-download links (needs GNU Sed extensions):
# 1) Replace absolute links to root directories of GitHub repos with GophHub links
# 2) Replace absolute links to sub-directories of GitHub repos with GophHub links
# 3) Replace absolute links to MarkDown or ASCIIdoc files with HTML-rendered GophHub links
# 4) Replace absolute links to files at GitHub with raw.githubusercontent.com URLs
# 5) Replace relative links to files with absolute raw.githubusercontent.com URLs ("../" to parent)
# 6) Replace relative links to files with absolute raw.githubusercontent.com URLs (lower directories only)
# 7) Convert relative directory links from 5 and 6 to GophHub links
# 8) Convert relative MarkDown or ASCIIdoc file links to HTML-rendered GophHub links
# 9) Fix "id" elements by removing "user-content-", which is prepended by GitHub's HTML renderer
sed -e 's%href="https*://w*\.*github\.com/\([^/"]*/[^/#"]*\)/\?[^/"]*"%href="'"gopher://$SERVER_NAME/1$GOPHERMAPPATH?repo=\1\&html=1\&num=$num"'"%ig' \
-e 's%href="https*://w*\.*github\.com/\([^/"]*/[^/"]*\)/tree/[^/"]*/\([^#"]*\)"%href="'"gopher://$SERVER_NAME/1$GOPHERMAPPATH?repo=\1\&path=\2\&html=1\&num=$num"'"%ig' \
-e 's%href="https*://w*\.*github\.com/\([^/"]*/[^/"]*\)/blob/[^/"]*/\([^"]*\.'"$viewable"'\)\(#[^"]*\)*"%href="'"gopher://$SERVER_NAME/h$CGIPATH?repo=\1\&path=\2\&html=1\&num=$num\&file=1"'"%ig' \
-e 's%\(\(src=\)\|\(href=\)\)"https*://w*\.*github\.com/\([^/"]*/[^/"]*\)/\(raw\)\?\(blob\)\?/-\?\(master\)\?/\([^#"]*\)\(#[^"]*\)*"%\1"
https://raw.githubusercontent.com/\4/master/\8"%ig' \
-e 's%\(\(src=\)\|\(href=\)\)"\.\.\/\?\([^#":]*\)\(#[^"]*\)*"%\1"
https://raw.githubusercontent.com/'"${repo}/master${urlpath%/*}/"'\4\"%ig' \
-e 's%\(\(src=\)\|\(href=\)\)"\.\?/\?\([^#":]\+\)\(#[^"]*\)*"%\1"
https://raw.githubusercontent.com/'"${repo}/master$urlpath/"'\4\"%ig' \
-e 's%href="
https://raw.githubusercontent.com/\([^/"]*/[^/"]*\)/master/\([^".]*\)"%href="'"gopher://$SERVER_NAME/1$GOPHERMAPPATH?repo=\1\&path=\2\&html=1\&num=$num"'"%ig' \
-e 's%href="
https://raw.githubusercontent.com/\([^/"]*/[^/"]*\)/master/\([^"]*\.'"$viewable"'\)"%href="'"gopher://$SERVER_NAME/h$CGIPATH?repo=\1\&path=\2\&html=1\&num=$num\&file=1"'"%ig' \
-e 's%id="user-content-%id="%g' "$1"
echo -e '<HR>\n<ADDRESS>Generated by <A HREF="
gopher://aussies.space/1/~freet/scripts">GophHub</A> '"$VERSION.</ADDRESS>\n</BODY>\n</HTML>"
else
# Add GophHub heading to GNU Enscript output
# Remove file name from path
[ "${fullpath%/*}" == "$repo" ] && urlpath= || urlpath="${path%/*}"
echo -e '<!DOCTYPE html PUBLIC "-//IETF//DTD HTML 2.0//EN">\n'\
'<HTML>\n<HEAD>\n'\
" <TITLE>GophHub - $repo/$path</TITLE>\n"\
'</HEAD>\n<BODY BGCOLOR="#FFFFFF" TEXT="#000000" LINK="#1F00FF" ALINK="#FF0000" VLINK="#9900DD">\n'\
"<H1><a href=\"gopher://$SERVER_NAME/1$GOPHERMAPPATH?${carry#&}\">GophHub</a> - <A HREF=\"gopher://$SERVER_NAME/1$GOPHERMAPPATH?repo=${repo}${carry}&path=${urlpath}\">${fullpath%/*}</A>/${path##*/}</H1><hr>\n"\
"<p align=right><a href=\"gopher://$SERVER_NAME/0$CGIPATH?repo=${repo}&file=1&path=${path}\">Raw File</a></p>"
if [ $num -gt 0 ]
then
# Add line numbers with pr, negatively offset by the number of HTML lines preceeding the preformatted text
pr -Tn -N -10 "$1" | sed -n -e 's/[[:blank:]]*0[[:blank:]]*<PRE>/<PRE>/' -e '/<PRE>/,/<\/PRE>/p'
else
# Display file contents without line numbering
sed -n '/<PRE>/,/<\/PRE>/p' "$1"
fi
echo -e "<HR>\n<ADDRESS>Generated by <A HREF="
http://www.iki.fi/~mtr/genscript/">GNU Enscript 1.6.6</A>, and"\
'<A HREF="
gopher://aussies.space/1/~freet/scripts">GophHub</A> '"$VERSION.</ADDRESS>\n</BODY>\n</HTML>"
fi
}
## Main
repo=
path=
carry=
commits=
raw=0
html=0
num=0
file=0
settings=0
# Turn query string into positional parameters, as if they were arguments
oldIFS="$IFS"
IFS='&'
set -- $QUERY_STRING
IFS="$oldIFS"
# Process query string
while [ "$1" ]
do
case "$1" in
repo=*) repo="${1#*=}" ;;
path=*) path="${1#*=}" ;;
file=*) file="${1#*=}" ;;
raw=*) raw="${1#*=}"; carry="${carry}&raw=$raw" ;;
html=*) html="${1#*=}"; carry="${carry}&html=$html" ;;
num=*) num="${1#*=}"; carry="${carry}&num=$num" ;;
settings=1) settings=1 ;;
esac
shift
done
# If no "repo" specified, display home menu gophermap
if [ -z "$repo" ]
then
# Decode percent-encoded characters (eg. from Web proxies)
SEARCHREQUEST="$(printf '%b' "${SEARCHREQUEST//%/\\x}")"
# Extract repo "owner/name" from URL in search request
if repo="`expr \"$SEARCHREQUEST\" : '.*github\.[[:alpha:]]*/\([^/]*/[^/#]*\)'`"
then
# Extract directory/file path from URL in search request
if ! path="`expr \"$SEARCHREQUEST\" : '.*github\.[[:alpha:]]*/[^/]*/[^/]*/tree/[^/]*/\(.*\)'`"
then
if path="`expr \"$SEARCHREQUEST\" : '.*github\.[[:alpha:]]*/[^/]*/[^/]*/blob/[^/]*/\([^#]*\)'`"
then
file=1
fi
fi
else
homepage="`cat \"$MENUMAP\"`"
fi
fi
# Show settings gophermap
if [ $settings -gt 0 -o -z "$repo" ]
then
# Restricted mode (only using built-ins)
PATH=
set -r
# Display GophHub home menu gophermap ($MENUMAP) with variable values filled in
if [ -z "$repo" ]
then
# For compatibility, only add ? on the end of the search URL if an option is selected
[ "$carry" ] && search_carry="?${carry#&}" || search_carry=
eval "echo \"$homepage\""
fi
# Display settings options with [*] toggle boxes
echo -e "!GophHub Settings\nSelect menu item to toggle...\n"
if [ "$html" != "1" ]; then html=1;mark=" ";else html=0;mark="*"; fi
echo -e "1[$mark]View files in HTML via Gopher\t${REQUEST}?repo=${repo}${carry//&html=?/}&html=$html&path=${path}"
if [ "$raw" != "1" ]; then raw=1;mark=" ";else raw=0;mark="*"; fi
echo -e "1[$mark]Download raw files via Gopher rather than HTTPS\t${REQUEST}?repo=${repo}${carry//&raw=?/}&raw=$raw&path=${path}"
if [ "$num" != "1" ]; then num=1;mark=" ";else num=0;mark="*"; fi
echo -e "1[$mark]Number lines in source files (Gopher downloads only)\t${REQUEST}?repo=${repo}${carry//&num=?/}&num=$num&path=${path}"
echo -e "\nNote: These settings are only applied to files under ${MAXSIZE}B"
[ "$repo" ] && echo -e "\n1Return to directory list\t${REQUEST}?repo=${repo}${carry}&path=${path}"
exit
fi
cachecount=0
# Cache downloaded files in "$CACHEDIR"
if [ -d "$CACHEDIR" ]
then
#Clean up old downloads
find "$CACHEDIR" -maxdepth 1 -mindepth 1 -type d -mmin +$CACHETIME -exec rm -rf '{}' +
cachecount=`ls "$CACHEDIR" | wc -l`
else
mkdir "$CACHEDIR" || exit 1
fi
# Checksum of repo and path string is used as ID for downloaded/cached files
cachesum="`echo \"$repo/$path\" | md5sum`"
cachesum=${cachesum% -}
# Check for cached files
if [ ! -d "$CACHEDIR"/$cachesum ]
then
if ! mkdir "$CACHEDIR"/$cachesum
then
echo -e "ERROR:\nCouldn't create temporary directory"
exit 1
fi
fi
cd "$CACHEDIR"/$cachesum
[ -L git_dir.json ] || ln -s index.html git_dir.json
# Link to all the programs that are used after this
for prog in cat sed wget fold pr grep enscript numfmt
do
[ -f "$EXECPATH"/$prog ] || ln -sf "`which $prog`" "$EXECPATH"/$prog
done
# Set path to only execute intended programs
PATH="$EXECPATH"
# Disable error output
exec 2>/dev/null
# Switch to Bash restricted mode
set -r
# Download data from GitHub API and apply syntax highlighting to files if required
if [ -z "$path" ]
then
if [ "$file" == "readme" ]
then
# View README in HTML
wget -q -nc -O readme.htm --header="$VHEADER" --header="$AUTHHEADER" --header="Accept: application/vnd.github.html" \
"
https://api.github.com/repos/$repo/readme"
if [ -s readme.htm ]
then
htmlview readme.htm
else
echo "<html><head><title>GophHub - README not found</title></head><body><h1 align=center>README empty or not found</h1></body></html>"
fi
exit
fi
# if this is the root directory, grab extra info
wget -q -nc --header="$VHEADER" --header="$AUTHHEADER" --header="Accept: application/vnd.github.raw" "
https://api.github.com/repos/$repo" \
"
https://api.github.com/repos/$repo/contents/" "
https://api.github.com/repos/$repo/readme" "
https://api.github.com/repos/$repo/releases/latest" \
"
https://api.github.com/repos/$repo/commits/master?per_page=1"
if [ ! -s "${repo##*/}" ]
then
echo "Failed to retrieve data for repo: $repo"
exit
fi
[ -s "master?per_page=1" ] && commits="master?per_page=1"
else
if [ $file -gt 0 ]
then
# Find filename extension
fileext="${path##*.}"
# If no filename extension, assume plain text
[ "$fileext" == "$path" ] && fileext=txt
if [ $html -gt 0 ]
then
if [ "$fileext" == "md" -o "$fileext" == "markdown" -o "$fileext" == "adoc" -o "$fileext" == "asciidoc" -o "$fileext" == "txt" ]
then
# Download HTML-rendered Markdown or AsciiDoc
wget -q -nc -O mdfile.htm --header="$VHEADER" --header="$AUTHHEADER" --header="Accept: application/vnd.github.html" \
"
https://api.github.com/repos/$repo/contents/$path"
if [ -s mdfile.htm ]
then
htmlview mdfile.htm
else
echo "<html><head><title>GophHub - File not found</title></head><body><h1 align=center>File empty or not found</h1></body></html>"
fi
exit
else
# Download raw file via API
wget -q -nc -O "file.$fileext" --header="$VHEADER" --header="$AUTHHEADER" --header="Accept: application/vnd.github.raw" \
"
https://api.github.com/repos/$repo/contents/$path"
if [ -s "file.$fileext" ]
then
# Add syntax highlighting in HTML
[ -f file.htm ] || enscript -q -E -w html --color -o file.htm "file.$fileext"
htmlview file.htm
else
echo "<html><head><title>GophHub - File not found</title></head><body><h1 align=center>File empty or not found</h1></body></html>"
fi
exit
fi
else
# Download raw file via API
wget -q -nc -O "file.$fileext" --header="$VHEADER" --header="$AUTHHEADER" --header="Accept: application/vnd.github.raw" \
"
https://api.github.com/repos/$repo/contents/$path"
if [ -s "file.$fileext" ]
then
if [ $num -gt 0 ]
then
# Display file contents with line numbering
pr -Tn "file.$fileext"
else
# Display file contents
cat "file.$fileext"
fi
else
echo "File empty or not found"
fi
exit
fi
else
# Download directory data
wget -q -nc --header="$VHEADER" --header="$AUTHHEADER" --header="Accept: application/vnd.github+json" \
"
https://api.github.com/repos/$repo/contents/${path%/}/"
fi
fi
# Below here is only for displaying directory listings. File retrieval exits above.
# Display repo info
if [ -z "$path" ]
then
oldIFS="$IFS"
IFS=\" # remove quotes from data using IFS
# Read in lines of JSON data with useful fields and parse them by splitting at the '"' character
# It's slightly faster to filter the input using Grep
grep '"description":'$'\n''"homepage":'$'\n''"size":'$'\n''"language":'$'\n''"spdx_id":'$'\n''"date":'$'\n''"message":' "${repo#*/}" $commits \
| while read -r start FIELD sep VALUE comma || {
# This part runs after the end of the end of output from Grep
echo "!GophHub - ${repo}:"
echo "$DESCVAL" | fold -s -w $WRAP | pr -o 1 -T
[ "$HOMEVAL" ] && echo -e "h Project's Website\tURL:$HOMEVAL"
echo -e "h Download Repo ZIP Archive\tURL:
https://api.github.com/repos/$repo/zipball"
echo -e "h GitHub URL:
https://github.com/$repo.git\tURL:
https://github.com/$repo.git\n"\
"Repo Size: $SIZEVAL\n"\
"Programming Language: $LANGVAL\n"\
"License: $LICENSEVAL"
[ "$DATEVAL" ] && echo -e " Latest Commit Date: $DATEVAL\n"\
"Latest Commit Message:\n$MESSAGEVAL"
false
}
do
case "$FIELD" in
description) DESCVAL="$VALUE" ;;
homepage) HOMEVAL="$VALUE" ;;
size) sep="${sep#: *}"; SIZEVAL="`numfmt --to=iec --from=iec \"${sep%,}\"K`" ;;
language) LANGVAL="$VALUE" ;;
spdx_id) LICENSEVAL="$VALUE" ;;
date) DATEVAL="$VALUE" ;;
message) MESSAGEVAL="`echo -e \"$VALUE\" | fold -s -w $WRAP | pr -o 1 -T `" ;;
esac
done
IFS="$oldIFS" # Take '"' off IFS
echo "______________________________________________________________________"
else
fullpath="$repo/$path" # Remove deepest directory from path
[ "${fullpath%/*}" == "$repo" ] && urlpath= || urlpath="${path%/*}"
echo -e "!GophHub - $repo/$path\n______________________________________________________________________"
echo -e "1..\t${REQUEST}?repo=${repo}${carry}&path=$urlpath"
fi
# List directory contents
# Lists values into arrays before outputting directory list
oldIFS="$IFS"
IFS=\" # remove quotes from data using IFS
dirno=0
# Read in lines of JSON data with useful fields and parse them by splitting at the '"' character.
# Data for each directory entry is read into an array. The array index increments whenever an existing entry is found.
grep '"type":'$'\n''"name":'$'\n''"size":'$'\n''"path":'$'\n''"download_url":' git_dir.json \
| while read -r start FIELD sep VALUE comma || {
# This part runs after the end of the end of output from Grep
while [ $dirno -ge 0 ]
do
case "${TYPEVAL[$dirno]}" in
file) if [ $html -gt 0 -a ${SIZEVAL[$dirno]} -le $MAXSIZE ]
then
# API HTML file download links
echo -e "h${NAMEVAL[$dirno]}\t$CGIPATH?repo=${repo}${carry}&file=1&path=${PATHVAL[$dirno]}"
elif [ $raw -gt 0 -a ${SIZEVAL[$dirno]} -le $MAXSIZE ]
then
# API raw file download links
echo -e "0${NAMEVAL[$dirno]}\t$CGIPATH?repo=${repo}${carry}&file=1&path=${PATHVAL[$dirno]}"
else
# Default to HTTPS file download links to raw.githubusercontent.com
echo -e "h${NAMEVAL[$dirno]}\tURL:${URLVAL[$dirno]}"
fi
;;
# Directory links
dir) echo -e "1${NAMEVAL[$dirno]}\t${REQUEST}?repo=${repo}${carry}&path=${PATHVAL[$dirno]}" ;;
esac
dirno=$[ $dirno - 1 ]
done
false
}
do
case "$FIELD" in
type) [ "${TYPEVAL[$dirno]}" ] && dirno=$[ $dirno + 1 ]; TYPEVAL[$dirno]="$VALUE" ;;
name) [ "${NAMEVAL[$dirno]}" ] && dirno=$[ $dirno + 1 ]; NAMEVAL[$dirno]="$VALUE" ;;
size) [ "${SIZEVAL[$dirno]}" ] && dirno=$[ $dirno + 1 ]; sep="${sep#: *}"; SIZEVAL[$dirno]="${sep%,}" ;;
path) [ "${PATHVAL[$dirno]}" ] && dirno=$[ $dirno + 1 ]; PATHVAL[$dirno]="$VALUE" ;;
download_url) [ "${URLVAL[$dirno]}" ] && dirno=$[ $dirno + 1 ]; URLVAL[$dirno]="$VALUE" ;;
esac
done
IFS="$oldIFS" # Take '"' off IFS
# Display README, latest release info, and latest release download links
if [ -z "$path" ]
then
echo '______________________________________________________________________'
# Display default readme for the repo, with text wrapped and indented one space from the left.
# Outputs one 'page' of $READMEMAX lines.
[ $READMEMAX -gt 0 ] && fold -w $WRAP -s readme | pr -o 1 -T -l $READMEMAX +1:1
echo '______________________________________________________________________'
echo -e "hFull README in HTML\t$CGIPATH?repo=${repo}${carry}&file=readme"
# Show latest release, if there is one
if [ -f latest ]
then
echo '______________________________________________________________________'
IFS=\" # remove quotes from data using IFS
dirno=0
# Read in lines of JSON data with useful fields and parse them by splitting at the '"' character
# Size and browser_download_url values for each release file are held in an array.
# Other fields should only appear once.
grep '"browser_download_url":'$'\n''"size":'$'\n''"tag_name":'$'\n''"published_at":'$'\n''"tarball_url":'$'\n''"zipball_url":' latest \
| while read -r start FIELD sep VALUE comma || {
# This part runs after the end of the end of output from Grep
echo -e " Latest Release: $TAGVAL\n Date: $DATEVAL\n Release Files:"
while [ $dirno -gt -1 ]
do
[ "$URLVAL" ] && echo -e "h${URLVAL[$dirno]##*/} (${SIZEVAL[$dirno]})\tURL:${URLVAL[$dirno]}"
dirno=$[ $dirno - 1 ]
done
echo -e "hSource code TAR.GZ\tURL:$TARVAL\nhSource code ZIP\tURL:$ZIPVAL"
false
}
do
case "$FIELD" in
tag_name) [ -z "$TAGVAL" ] && TAGVAL="$VALUE" ;;
published_at) [ -z "$DATEVAL" ] && DATEVAL="$VALUE" ;;
size) [ "${SIZEVAL[$dirno]}" ] && dirno=$[ $dirno + 1 ]; sep="${sep#: *}"; SIZEVAL[$dirno]="`numfmt --to=iec \"${sep%,}\"`" ;;
browser_download_url) [ "${URLVAL[$dirno]}" ] && dirno=$[ $dirno + 1 ]; URLVAL[$dirno]="$VALUE" ;;
tarball_url) [ -z "$TARVAL" ] && TARVAL="$VALUE" ;;
zipball_url) [ -z "$ZIPVAL" ] && ZIPVAL="$VALUE" ;;
esac
done
IFS="$oldIFS" # Take '"' off IFS
fi
echo '______________________________________________________________________'
else
echo -e '______________________________________________________________________\n'\
"1Repo Home\t${REQUEST}?repo=${repo}${carry}"
fi
echo -e "1Settings\t${REQUEST}?repo=${repo}${carry}&settings=1&path=${path}\n"\
"1GophHub Home\t$REQUEST?${carry#&}\n\n"\
"New files/directories accessed in the last $CACHETIME minutes:\n"\
" $cachecount"