Refactor the long `plumb $(...)'. - plumb - Open certain URL patterns with an ad-hoc opener (plumber)
 (HTM) hg clone https://bitbucket.org/iamleot/plumb
 (DIR) Log
 (DIR) Files
 (DIR) Refs
 (DIR) README
       ---
 (DIR) changeset 85894d036f2e0d5a90ca4005dd7273e16920b632
 (DIR) parent 5153ee30a77d923948ce04f1f3c418771ad1aba6
 (HTM) Author: Leonardo Taccari <iamleot@gmail.com>
       Date:   Mon, 26 Mar 2018 10:59:57 
       
       Refactor the long `plumb $(...)'.
       
       Separate the logic in single function filters: one_word_per_line(), urlize() and
       extract_urls(). Adjust `plumb $(...)' to use them accordingly.
       
       Diffstat:
        dplumb |  59 +++++++++++++++++++++++++++++++++++++++++++----------------
        1 files changed, 43 insertions(+), 16 deletions(-)
       ---
       diff -r 5153ee30a77d -r 85894d036f2e dplumb
       --- a/dplumb    Mon Mar 26 10:03:43 2018 +0200
       +++ b/dplumb    Mon Mar 26 10:59:57 2018 +0200
       @@ -32,22 +32,35 @@
        
        
        #
       -# dplumb, dmenu interface for plumb
       +# Put a signe word in a line.
       +#
       +one_word_per_line()
       +{
       +
       +       tr -s '[:space:]' '\n'
       +}
       +
       +#
       +# Convert predefined patterns to user-defined URLs.
        #
       -#  - Put a word per line
       -#  - URL-ize patterns
       -#  - Extract URLs inside possible delimeters
       -#    (<...>, (...), [...], "...", '...')
       -#  - Pipe all URLs to ${PLUMB_DMENU}
       -#  - Invoke plumb against the selected entry
       +urlize()
       +{
       +
       +       sed \
       +           -E -e 's;^(CVE-[0-9]+-[0-9]+).*$;cve://\1;g' \
       +           -E -e 's;^doi:(//)?;doi://;g' \
       +           -E -e 's;^ar[Xx]iv:(//)?;arxiv://;g'
       +}
       +
        #
       -plumb "$(
       -tr -s '[:space:]' '\n' |
       -sed \
       -    -E -e 's;^(CVE-[0-9]+-[0-9]+).*$;cve://\1;g' \
       -    -E -e 's;^doi:(//)?;doi://;g' \
       -    -E -e 's;^ar[Xx]iv:(//)?;arxiv://;g' |
       -awk \
       +# Extract all URLs ignoring possible delimeters.
       +#
       +# Current delimeters characters are: <...>, (...), [...], "...", '...'.
       +#
       +extract_urls()
       +{
       +
       +       awk \
        '
        /:\/\// {
               # Extract URLs inside possible delimiters
       @@ -61,7 +74,21 @@
                       print substr($0, RSTART, RLENGTH)
               }
        }
       -' |
       -${PLUMB_DMENU} )"
       +'
       +
       +}
       +
       +
       +#
       +# dplumb, dmenu interface for plumb
       +#
       +# Process the stdin, extract URLs, pass them to ${PLUMB_DMENU} and open the
       +# selected one to plumb.
       +#
       +plumb "$(
       +    one_word_per_line |
       +    urlize |
       +    extract_urls |
       +    ${PLUMB_DMENU} )"
        
        exit 0