aboutsummaryrefslogtreecommitdiff
path: root/src/wrappers/html2markdown.in
blob: 0fece3ccdb3dbadecdd583d4fdd1156abe35e307 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
#!/bin/sh -e
# converts html to markdown
# uses an available program to fetch URL and tidy to normalize it first

REQUIRED=tidy

### common.sh

grab_url_with () {
    url="${1:?internal error: grab_url_with: url required}"

    shift
    cmdline="$@"

    prog=
    prog_opts=
    if [ -n "$cmdline" ]; then
	eval "set -- $cmdline"
	prog=$1
	shift
	prog_opts="$@"
    fi

    if [ -z "$prog" ]; then
	# Locate a sensible web grabber (note the order).
	for p in wget lynx w3m curl links w3c; do
		if pathfind $p; then
		    prog=$p
		    break
		fi
	done

	[ -n "$prog" ] || {
            errn "$THIS:  Couldn't find a program to fetch the file from URL "
	    err "(e.g. wget, w3m, lynx, w3c, or curl)."
	    return 1
	}
    else
	pathfind "$prog" || {
	    err "$THIS:  No such web grabber '$prog' found; aborting."
	    return 1
	}
    fi

    # Setup proper base options for known grabbers.
    base_opts=
    case "$prog" in
    wget)  base_opts="-O-" ;;
    lynx)  base_opts="-source" ;;
    w3m)   base_opts="-dump_source" ;;
    curl)  base_opts="" ;;
    links) base_opts="-source" ;;
    w3c)   base_opts="-n -get" ;;
    *)     err "$THIS:  unhandled web grabber '$prog'; hope it succeeds."
    esac

    err "$THIS: invoking '$prog $base_opts $prog_opts $url'..."
    eval "set -- $base_opts $prog_opts"
    $prog "$@" "$url"
}

encoding=
grabber=
nograb=
while getopts e:g:nh opt; do
    case $opt in
    e) encoding="$OPTARG" ;;
    g) grabber="$OPTARG" ;;
    n) nograb=1 ;;
    h|?)
        usage "[-e encoding] [-g grabber_command] [-n] [-h] [input_file|url]"
        exit 2 ;;
    esac
done

shift $(($OPTIND - 1))

### postopts.sh

### singlearg.sh

inurl=
if [ -n "$1" ] && ! [ -f "$1" ]; then
    if [ -n "$nograb" ]; then
        err "'$1' not found; refusing to treat input as URL."
        exit 1
    fi
    # Treat given argument as an URL.
    inurl="$1"
fi

if [ -n "$inurl" ]; then
    err "Attempting to fetch file from '$inurl'..."

    ### tempdir.sh

    grabber_out=$THIS_TEMPDIR/grabber.out
    grabber_log=$THIS_TEMPDIR/grabber.log
    if ! grab_url_with "$inurl" "$grabber" 1>$grabber_out \
                                           2>$grabber_log; then
        errn "grab_url_with failed"
        if [ -f $grabber_log ]; then
            err " with the following error log."
            err
            cat >&2 $grabber_log
        else
            err .
        fi
        exit 1
    fi

    set -- $grabber_out
fi

if [ -z "$encoding" ] && [ "x$@" != "x" ]; then
    # Try to determine character encoding unless not specified
    # and input is STDIN.
    encoding=$(
        head "$@" |
        LC_ALL=C tr 'A-Z' 'a-z' |
        sed -ne '/<meta .*content-type.*charset=/ {
            s/.*charset=["'\'']*\([-a-zA-Z0-9]*\).*["'\'']*/\1/p
        }'
    )
fi

if [ -n "$encoding" ] && [ -n "$HAVE_ICONV" ]; then
    alias to_utf8='iconv -f "$encoding" -t utf-8'
elif [ -n "$inurl" ]; then # assume web pages are UTF-8
    alias to_utf8='cat'
fi # else just use local encoding

to_utf8 "$@" | tidy -utf8 2>/dev/null |
runpandoc -r html -w markdown -s | from_utf8