Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/bin/bash
- [ -z "$1" ] && echo "Usage: $0 "'/directory/with/valadoc-generated-html/**
- Processing several dirs at once is supported.
- Make sure you add ** after the names to make the shell list files recursively.'
- ( # start a subshell so we can grab all the output at once and pipe it
- ( # and another one, for deduplication this time
- for file in "$@"; do
- if (basename "$file" | grep -q ".html"); then
- sed -n -e '/div class="site_content"/,$p' "$file" \
- | tr --delete '\n' \
- | sed 's|</li>|&\n|g' \
- | grep 'li class=' \
- | grep -v 'class="deprecated"' \
- | sed --regexp-extended 's|<div class="leaf_brief_description">[[:space:]]*</div>||' \
- | grep -v 'brief_description'
- fi
- done
- ) | grep "href" | while read line; do
- line="${line##*<li}"
- type="$(echo "$line" | cut -d \" -f 2)"
- name="$(echo ${line#*href=} | cut -d \" -f 2)"
- echo "$type ${name%.htm*}"
- done
- # Beware: Dark awk magic ahead!
- # Source: http://www.unixcl.com/2008/03/remove-duplicates-without-sorting-file.html
- ) | grep -v 'virtual_method' | awk ' !x[$0]++' -
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement