summaryrefslogtreecommitdiff
path: root/doc/utils
diff options
context:
space:
mode:
Diffstat (limited to 'doc/utils')
-rw-r--r--doc/utils/cleanhtml.sed1
-rwxr-xr-xdoc/utils/cleanhtml.sh12
-rw-r--r--doc/utils/contents.awk109
-rw-r--r--doc/utils/four2perm.120
-rw-r--r--doc/utils/four2perm.c140
-rw-r--r--doc/utils/html2four.126
-rw-r--r--doc/utils/html2four.c298
-rw-r--r--doc/utils/html2txt.sed86
-rw-r--r--doc/utils/killtoodeepcontents.pl59
-rwxr-xr-xdoc/utils/man2html.script48
-rw-r--r--doc/utils/man_xref.c125
-rwxr-xr-xdoc/utils/mkhtmlman44
-rw-r--r--doc/utils/perm1.awk1
-rw-r--r--doc/utils/perm2.awk46
-rw-r--r--doc/utils/rfc_pg.c76
-rw-r--r--doc/utils/xref.sed56
16 files changed, 1147 insertions, 0 deletions
diff --git a/doc/utils/cleanhtml.sed b/doc/utils/cleanhtml.sed
new file mode 100644
index 000000000..59d3866b8
--- /dev/null
+++ b/doc/utils/cleanhtml.sed
@@ -0,0 +1 @@
+/<STYLE>/,/<\/STYLE>/d
diff --git a/doc/utils/cleanhtml.sh b/doc/utils/cleanhtml.sh
new file mode 100755
index 000000000..a3ea2afac
--- /dev/null
+++ b/doc/utils/cleanhtml.sh
@@ -0,0 +1,12 @@
+# script to clean up HTML files
+# removes formatting added by htmldoc
+#
+# first argument is sedscript to use
+f=$1
+shift
+# remaining args are files to process
+for i
+do
+ sed -f $f $i > tmp
+ mv tmp $i
+done
diff --git a/doc/utils/contents.awk b/doc/utils/contents.awk
new file mode 100644
index 000000000..5cc07f246
--- /dev/null
+++ b/doc/utils/contents.awk
@@ -0,0 +1,109 @@
+# table-of-contents extractor
+# Copyright (C) 1999 Sandy Harris.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# RCSID $Id: contents.awk,v 1.1 2004/03/15 20:35:24 as Exp $
+BEGIN {
+ # initialise indent counter
+ indent = 0
+ # define variables for section breaks
+ b0 = "==================================================="
+ b1 = "---------------------------------------------------"
+ b2 = "\t------------------------------------------"
+ # TURN OFF HTML formatting
+ print "<html>"
+ print "<body>"
+ print "<pre>"
+ # print a header
+ blurb()
+ print "Section headings printed, indentation shows structure"
+}
+# start of new file
+FNR == 1 {
+ print b0
+ print "HTML file: " "<a href=\"" FILENAME "\">" FILENAME "</a>"
+ print b1
+}
+# print header lines
+# actual printing is done by tagged() function
+# which adds tag if last line was <a name=...>
+$0 ~/<h1>/ {
+ text = $0
+ tabs = ""
+ gsub(/.*<h1>/, "", text)
+ gsub(/<\/h1>/, "", text)
+ tagged( text )
+}
+$0 ~/<h2>/ {
+ text = $0
+ tabs = "\t"
+ gsub(/.*<h2>/, "", text)
+ gsub(/<\/h2>/, "", text)
+ tagged(text)
+}
+$0 ~/<h3>/ {
+ text = $0
+ tabs = "\t\t"
+ gsub(/.*<h3>/, "", text)
+ gsub(/<\/h3>/, "", text)
+ tagged(text)
+}
+$0 ~/<h4>/ {
+ text = $0
+ tabs = "\t\t\t"
+ gsub(/.*<h4>/, "", text)
+ gsub(/<\/h4>/, "", text)
+ tagged( text )
+}
+# if current line is not header
+# and we have stored tag from <a name=..> line
+# make link to that tag
+$0 !~ /<h[1-4]/ {
+ if( length(name) )
+ print "[ <a href=\"" FILENAME "#" name "\">" name "</a>" " ]"
+ name = ""
+}
+# for <a name=whatever> lines
+# save name in a variable
+# not printed until we see next line
+$0 ~ /<a name=.*>/ {
+ name = $0
+ # strip anything before or after name tag
+ gsub(/.*<a name=/, "", name)
+ gsub(/>.*/, "", name)
+ # strip quotes off name
+ gsub(/^"/, "", name)
+ gsub(/"$/, "", name)
+}
+END {
+ print b0
+ blurb()
+ print "Docs & script by Sandy Harris"
+ print "</pre>"
+ print "</body>"
+ print "</html>"
+}
+
+function tagged(text) { # print header with tag if available
+ if( length(name) ) # > 0 if previous line was a name
+ print tabs "<a href=\"" FILENAME "#" name "\">" text "</a>"
+ else
+ print tabs text
+ name = ""
+}
+
+function blurb() {
+ print "Linux FreeSWAN HTML documents"
+ print "Automatically generated Table of Contents"
+ print "Bug reports to the mailing list: linux-ipsec@clinet.fi"
+ print "<p>"
+}
diff --git a/doc/utils/four2perm.1 b/doc/utils/four2perm.1
new file mode 100644
index 000000000..1e5263b5b
--- /dev/null
+++ b/doc/utils/four2perm.1
@@ -0,0 +1,20 @@
+.TH FOUR2PERM 1 "August 1999"
+.\" RCSID $Id: four2perm.1,v 1.1 2004/03/15 20:35:24 as Exp $
+.SH NAME
+four2perm - generate permuted index from four-field lines
+.SH SYNOPSIS
+.B four2perm
+.SH DESCRIPTION
+.I four2perm
+expects input lines with four tab-separated fields, such as that
+created from HTML files by html2four(1). Given that, it does most
+of the work of generating a permuted index, gets things close
+enough that a simple pipeline through sort(1) and awk(1) can
+finish the job.
+.SH SEE ALSO
+.hy 0
+html2four(1)
+.SH HISTORY
+Written for the Linux FreeS/WAN project
+<http://www.xs4all.nl/~freeswan/>
+by Sandy Harris.
diff --git a/doc/utils/four2perm.c b/doc/utils/four2perm.c
new file mode 100644
index 000000000..5b575c1b5
--- /dev/null
+++ b/doc/utils/four2perm.c
@@ -0,0 +1,140 @@
+#include <ctype.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#define MAX_LINE 512
+
+void die( char * ) ;
+
+char buffer[MAX_LINE+1] ;
+char *prog_name ;
+
+void die( char *message )
+{
+ fflush(stdout) ;
+ fprintf(stderr, "%s: %s\n", prog_name, message) ;
+ exit(1) ;
+}
+
+int main(int argc, char* argv[])
+{
+ int errors ;
+ prog_name = *argv ;
+ if( argc != 1 )
+ die("pure filter, takes no arguments") ;
+ errors = 0 ;
+ while( fgets(buffer, MAX_LINE, stdin))
+ errors += do_line(buffer) ;
+ exit(errors ? 1 : 0 ) ;
+}
+
+int do_line(char *data)
+{
+ char *p, *q, *r, *end, *before, *after ;
+ // expecting two tab-separated fields
+ // point r to 2nd, null terminate 1st
+ for( r = data ; *r && *r != '\t' ; r++ )
+ ;
+ if( *r != '\t' )
+ return(1) ;
+ end = r++ ;
+ *end = '\0' ;
+ for( q = r ; *q ; q++ )
+ if( *q == '\n' )
+ *q = '\0' ;
+ if( !strlen(r) )
+ return(1) ;
+ // within 1st, parse as space-separated
+ // p will point to current word, q past its end
+ // before & after point to rest of text
+ // spaces converted to nulls & back as req'd
+ before = "" ;
+ for( p = data ; p < end ; p = q + 1 ) {
+ if( p > data ) {
+ before = data ;
+ p[-1] = '\0' ;
+ }
+ // find end of word
+ for( q = p ; *q && *q != ' ' ; q++ )
+ ;
+ if( q == end )
+ after = "" ;
+ else if( q < end ) {
+ after = q + 1 ;
+ *q = '\0' ;
+ }
+ else assert(0) ;
+ print_line(before, p, after, r) ;
+ if( q < end )
+ *q = ' ' ;
+ if( p > data )
+ p[-1] = ' ' ;
+ }
+ return(0) ;
+}
+
+// print formatted line for permuted index
+// two tab-separated fields
+// 1st is sort key
+// 2nd is printable line
+// pipe it through something like
+// sort -F | awk -F '\t' '{print $2}'
+// to get final output
+
+print_line( char *before, char *word, char *after, char *tag)
+{
+ int i , x, y, z ;
+/*
+ printf("%s\t%s\t%s\t%s\n", before, word, after, tag) ;
+*/
+ if( list_word(word) )
+ return ;
+ x = strlen(before) ;
+ y = strlen(word) ;
+ z = strlen(after) ;
+ // put in sortable field
+ // strip out with awk after sorting
+ printf("%s %s\t", word, after) ;
+ // shorten before string to fit field
+ for( ; x > 30 ; x-- )
+ before++ ;
+ printf("%30s", before) ;
+ // print keyword, html tagged
+ printf(" %s%s</a> ", tag, word) ;
+ // padding, outside tag
+ for( ; y < 18 ; y++ )
+ putchar(' ') ;
+ if( z )
+ printf("%s", after) ;
+ printf("\n") ;
+}
+
+// avoid indexing on common English words
+
+char *list[] = {
+ "the", "of", "a", "an", "to", "and", "or", "if", "for", "at",
+ "am", "is", "are", "was", "were", "have", "has", "had", "be", "been",
+ "on", "some", "with", "any", "into", "as", "by", "in", "out",
+ "that", "then", "this", "that", "than", "these", "those",
+ "he", "his", "him", "she", "her", "hers", "it", "its",
+ "&", "", "+", "-", "=", "--", "<", ">", "<=", ">=",
+ "!", "?", "#", "$", "%", "/", "\\", "\"", "\'",
+ NULL
+ } ;
+// interrogative words like "how" and "where" deliberately left out of
+// above list because users might want to search for "how to..." etc.
+
+// return 1 if word in list, else 0
+// case-insensitive comparison
+
+list_word( char *p )
+{
+ char **z ;
+ for( z = list ; *z != NULL ; z++ )
+ if( ! strcasecmp( p, *z ) )
+ return 1 ;
+ return 0 ;
+}
+
diff --git a/doc/utils/html2four.1 b/doc/utils/html2four.1
new file mode 100644
index 000000000..456ac5e98
--- /dev/null
+++ b/doc/utils/html2four.1
@@ -0,0 +1,26 @@
+.TH HTML2FOUR 1 "August 1999"
+.\" RCSID $Id: html2four.1,v 1.1 2004/03/15 20:35:24 as Exp $
+.SH NAME
+html2four - extract headers from HTML files into four-field lines
+.SH SYNOPSIS
+.B html2four
+[-digit] file*
+command [ argument ...]
+.SH DESCRIPTION
+.I html2four
+extracts information from HTML files and writes it out with four
+tab-separated fields: filename, last label (<a name=> tag) seen,
+header tag type (H[0-9]), and header text. This is an intermediate
+format convenient for generating a permuted index with four2perm(1)
+or a table of contents with a simple awkscript.
+
+The only option is a digit to limit the header levels extracted.
+For example, with -3 only h1, h2, h3 tags are taken. By default,
+it takes h[0-9], though HTML only defines levels 1 to 6.
+.SH SEE ALSO
+.hy 0
+four2perm(1)
+.SH HISTORY
+Written for the Linux FreeS/WAN project
+<http://www.xs4all.nl/~freeswan/>
+by Sandy Harris.
diff --git a/doc/utils/html2four.c b/doc/utils/html2four.c
new file mode 100644
index 000000000..fc1100d01
--- /dev/null
+++ b/doc/utils/html2four.c
@@ -0,0 +1,298 @@
+/*
+ extract headers from HTML files
+ in format suitable for turning into permuted index
+*/
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+/*
+ maximum sizes for input line and for name in <a> tag
+*/
+#define MAX_LINE 512
+#define MAX_NAME 64
+
+/*
+ functions
+ all return 0 for OK, 1 for errors
+*/
+int do_file( char *, FILE * ) ;
+int parse_line( char * ) ;
+int print_line( char *, char *) ;
+int print_header_problem( char * ) ;
+int sanity() ;
+
+void die( char * ) ;
+
+char *prog_name ;
+int max_level ;
+char *current_file ;
+
+int main(int argc, char* argv[])
+{
+ char *p ;
+ int temp, done, status ;
+ FILE *fp ;
+
+ prog_name = *argv ;
+ argc--,argv++ ;
+
+ max_level = 9 ;
+ if(argc && *argv ) {
+ p = *argv ;
+ if( p[0] == '-' ) {
+ if( isdigit(p[1]) && p[2] == '\0' ) {
+ max_level = p[1] - 0 ;
+ argc-- ;
+ argv++ ;
+ }
+ else die("unknown option") ;
+ } }
+
+ status = done = 0 ;
+ if( argc == 0) {
+ if( (status = do_file("STDIN", stdin)) == 0 )
+ done++ ;
+ }
+ else {
+/*
+ printf("ARGC = %d\n", argc ) ;
+*/
+ while( argc-- ) {
+ p = *argv++ ;
+/*
+ printf("ARGV P %s %s\n", *argv, p) ;
+*/
+ if( p == NULL ) {
+ fprintf(stderr, "%s: null filename pointer\n", prog_name) ;
+ status++ ;
+ }
+ else if( (fp = fopen(p,"r")) == NULL ) {
+ fprintf(stderr, "%s: cannot open file %s\n", prog_name, p) ;
+ status++ ;
+ }
+ else {
+ if( (temp = do_file(p, fp)) != 0 )
+ status++ ;
+ done++ ;
+ fclose(fp) ;
+ }
+ fflush(stderr) ;
+ fflush(stdout) ;
+ }
+ }
+/*
+ printf("%s: %d files processed, %d with errors\n", prog_name, done, status) ;
+*/
+ return( status ? 1 : 0 ) ;
+}
+
+void die( char *message )
+{
+ fflush(stdout) ;
+ fprintf(stderr, "%s: %s\n", prog_name, message) ;
+ exit(1) ;
+}
+
+int header_flags[10] ;
+int in_header ;
+
+char buffer[MAX_LINE+1] ;
+char label[MAX_NAME+1] ;
+
+int do_file( char *file, FILE *fp )
+{
+ int i, status, x, y ;
+ char *base, *p ;
+
+ status = 0 ;
+ in_header = 0 ;
+ label[0] = '\0' ;
+ for( i = 0 ; i < 10 ; i++ )
+ header_flags[i] = 0 ;
+ current_file = file ;
+
+ while( base = fgets(buffer, MAX_LINE, fp) ) {
+ // count < and > characters in line
+ for( x = y = 0, p = base ; *p ; p++ )
+ switch( *p ) {
+ case '<':
+ x++ ;
+ break ;
+ case '>':
+ y++ ;
+ break ;
+ default:
+ break ;
+ }
+ // skip line if no < or >
+ if( x == 0 && y == 0 )
+ continue ;
+ // report error for unequal count
+ else if( x != y ) {
+ if( strncmp( base, "<!--", 4) && strncmp(base, "-->", 3) ) {
+ fflush(stdout) ;
+ fprintf(stderr, "%s in file %s: unequal < > counts %d %d\n",
+ prog_name, file, x, y ) ;
+ fprintf(stderr, "%s: %s\n", prog_name, base) ;
+ fflush(stderr) ;
+ status = 1 ;
+ }
+ continue ;
+ }
+ // parse lines containing tags
+ else
+ if( parse_line(base) )
+ status = 1 ;
+ // check that header labelling is sane
+ for( i = x = y = 0 ; i < 10 ; i++ ) {
+ // count non-zero entries
+ if( x = header_flags[i] )
+ y++ ;
+ // should be in 0 or 1 headers at a time
+ if( x > 1 || x < 0 )
+ status = 1 ;
+ }
+ if( y > 1 )
+ status = 1 ;
+ }
+ return status ;
+}
+
+int parse_line( char *data )
+{
+ char *p, *q, *end ;
+ int x ;
+
+ // set end pointer
+ for( end = data ; *end ; end++ )
+ ;
+ // trim off trailing returns or newlines
+ for( p = end - 1, q = end ; q > data ; p--,q-- ) {
+ switch( *p ) {
+ case '\012':
+ case '\015':
+ *p = '\0' ;
+ continue ;
+ default:
+ break ; // out of switch()
+ }
+ break ; // out of for()
+ }
+ end = q ;
+ p = data ;
+ while( p < end ) {
+ // find tag delimiters
+ if( *p == '<') {
+ for( q = p + 1 ; *q ; q++ )
+ if( *q == '<' || *q == '>' )
+ break ;
+ // if we find another '<'
+ // restart tag search from it
+ if( *q == '<' ) {
+ p = q ;
+ continue ;
+ }
+ // "<>" is not interesting
+ if( q == p + 1 ) {
+ fflush(stdout) ;
+ fprintf(stderr, "%s: null tag\n", prog_name) ;
+ fprintf(stderr, "%s: line\n", prog_name, data) ;
+ fflush(stderr) ;
+ p = q + 1 ;
+ continue ;
+ }
+ // ignore delimiters once found
+ *q = '\0' ;
+ p++ ;
+ // p points to tag contents, null terminated
+ switch( *p ) {
+ // save contents of <a name= > tags
+ case 'a' :
+ case 'A' :
+ if( p[1] == ' ' &&
+ (p[2] == 'n' || p[2] == 'N') &&
+ (p[3] == 'a' || p[3] == 'A') &&
+ (p[4] == 'm' || p[4] == 'M') &&
+ (p[5] == 'e' || p[5] == 'E') &&
+ p[6] == '=' )
+ strncpy(label, p + 7, MAX_NAME) ;
+ break ;
+ case 'b' :
+ case 'B' :
+ if( in_header && strlen(p) == 2 &&
+ (p[1] == 'r' || p[1] == 'R') )
+ putchar(' ') ;
+ break ;
+ // header tags
+ case 'h' :
+ case 'H' :
+ if( strlen(p) == 2 && isdigit(p[1]) ) {
+ if( in_header )
+ fprintf(stderr, "%s: bad header nesting in %s\n",
+ prog_name, current_file) ;
+ x = p[1] - '0' ;
+ in_header = 1 ;
+ header_flags[x]++ ;
+ printf("%s\t%s\tH%d\t", current_file, label, x) ;
+ }
+ break ;
+ // only care about end-of-header
+ case '/':
+ p++ ;
+ switch( *p ) {
+ case 'h' :
+ case 'H' :
+ if( strlen(p) == 2 && isdigit(p[1]) ) {
+ if( ! in_header )
+ fprintf(stderr, "%s: bad header nesting in %s\n",
+ prog_name, current_file) ;
+ x = p[1] - '0' ;
+ in_header = 0 ;
+ header_flags[x]-- ;
+ printf("\n") ;
+ }
+ break ;
+ }
+ break ;
+ // uninteresting tag, look for next
+ default :
+ break ;
+ }
+ // tag done, point p beyond it
+ p = q + 1 ;
+ }
+ else if( in_header ) {
+ if( isprint(*p) && *p != '\n' )
+ putchar(*p) ;
+ else
+ putchar(' ');
+ p++ ;
+ }
+ else
+ p++ ;
+ }
+ return(0) ;
+}
+
+int print_line( char *tag, char *text)
+{
+ printf("%%s\ts\t%s\t%s\t\n", current_file, label, tag, text) ;
+ return 0 ;
+}
+
+int print_header_problem( char *file )
+{
+ int i ;
+ fflush(stdout) ;
+ fprintf(stderr, "%s: HEADER TAG PROBLEM in file %s\n", prog_name, file) ;
+ fprintf(stderr, "%s: counts", prog_name) ;
+ for ( i = 0 ; i < 10 ; i++ )
+ fprintf(stderr, "\t%d", i) ;
+ fprintf(stderr,"\n") ;
+ fflush(stderr) ;
+ return(0) ;
+}
+
diff --git a/doc/utils/html2txt.sed b/doc/utils/html2txt.sed
new file mode 100644
index 000000000..fc4940991
--- /dev/null
+++ b/doc/utils/html2txt.sed
@@ -0,0 +1,86 @@
+# skip over header material
+# Copyright (C) 1999 Sandy Harris.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# RCSID $Id: html2txt.sed,v 1.1 2004/03/15 20:35:24 as Exp $
+/<head>/,/<\/head>/d
+/<HEAD>/,/<\/HEAD>/d
+/<^body$>/d
+s/<body>//
+# eliminate possible DOS crud
+s/\015//
+#get rid of HTML comments
+s/<!--.*-->//
+/<!--/,/-->/d
+# citations & emphasis -> visible
+s/<cite>/"/g
+s/<\/cite>/"/g
+s/<em>/*/g
+s/<\/em>/*/g
+s/<strong>/!->/g
+s/<\/strong>/<-!/g
+s/<b>//g
+s/<\/b>//g
+s/<blockquote>/Quote -->/
+s/<\/blockquote>/<-- End Quote/
+# mark headers
+s/<h1>/Header 1: /
+s/<h2>/Header 2: /
+s/<h3>/Header 3: /
+s/<h4>/Header 4: /
+s/<h5>/Header 5: /
+s/<h6>/Header 6: /
+# remove some cruft
+s/<\/h[1-6]>//
+/^<a name=[a-zA-Z0-9\.]*>$/d
+s/<a name=[a-zA-Z0-9\.]*>//
+# definition lists
+s/<dl>//
+s/<\/dl>//
+s/^<dt>$/-----------------------------------------/
+s/^<dt>/-----------------------------------------\
+/
+s/<dd>/\
+/
+# other types of lists
+s/<li>//
+s/<ol>//
+s/<ul>//
+s/<\/ol>//
+s/<\/ul>//
+# tables
+s/<table>//
+s/<\/table>//
+s/<tr>//
+s/<td>/ /g
+# line break and paragraph markers
+# different subst depending where they are in line
+s/^<br>//
+s/<br>$//
+s/<br>/\
+/
+s/^<p>$//
+s/<p>$/\
+/
+s/^<p>/\
+/
+s/<p>/\
+\
+/
+s/<\/p>//
+# remove more cruft
+s/<pre>//
+s/<\/pre>//
+s/<\/body>//
+s/<\/html//
+s/<\/BODY>//
+s/<\/HTML>//
diff --git a/doc/utils/killtoodeepcontents.pl b/doc/utils/killtoodeepcontents.pl
new file mode 100644
index 000000000..a6fe551d6
--- /dev/null
+++ b/doc/utils/killtoodeepcontents.pl
@@ -0,0 +1,59 @@
+#!/usr/bin/perl
+
+$toc=0;
+$memo=0;
+
+while(<>) {
+ if(0 && /^Status of this Memo/) {
+ $memo=1;
+ print;
+ next;
+ }
+
+ if(/^Table of Contents/) {
+ print ".bp\n";
+ $toc=1;
+ print;
+ next;
+ }
+
+ if(!$toc && !$memo) {
+ print;
+ next;
+ }
+
+ if($toc) {
+ if(/^[0-9]*\.[0-9]*\.[0-9]* / ||
+# /^[0-9]*\.[0-9]* / ||
+ /^[0-9]*\.[0-9]*\.[0-9]*\.[0-9]* /) {
+ next;
+ }
+
+ if(/^14./) {
+ $toc=0;
+ }
+ if(/^\.bp/) {
+ next;
+ }
+ print;
+ }
+
+ if($memo) {
+ if(/^\.bp/) {
+ next;
+ }
+
+ if(/^Copyright Notice/) {
+ print ".fi\n";
+ print "This memo provides information for the Internet community. It does\n";
+ print "not specify an Internet standard of any kind. Distribution of this\n";
+ print "memo is unlimited.\n";
+ print "\n.ti 0\n";
+
+ print;
+
+ $memo=0;
+ next;
+ }
+ }
+}
diff --git a/doc/utils/man2html.script b/doc/utils/man2html.script
new file mode 100755
index 000000000..515911c81
--- /dev/null
+++ b/doc/utils/man2html.script
@@ -0,0 +1,48 @@
+#!/bin/sh
+
+# Assumes man2html command in path
+# That is a Perl script downloadable from
+# http://www.oac.uci.edu/indiv/ehood/man2html.html
+
+# also uses our man_xref utility
+
+case $# in
+2) ;;
+*) echo "Usage: $0 mantree destdir" >&2 ; exit 2 ;;
+esac
+
+mkdir -p $2
+rm -f $2/*
+
+# handle all sections just in case
+# only 3 5 8 expected
+for i in `find $1 -name 'ipsec*.[1-9]'`
+do
+ b=`basename $i`
+ # then parse that into section number s
+ # and name n
+ case $b in
+ *.1) s=1 ;;
+ *.2) s=2 ;;
+ *.3) s=3 ;;
+ *.4) s=4 ;;
+ *.5) s=5 ;;
+ *.6) s=6 ;;
+ *.7) s=7 ;;
+ *.8) s=8 ;;
+ *.9) s=9 ;;
+ *) echo "$0 has lost its mind" ; exit 1 ;;
+ esac
+ n=`basename $b \.$s`
+ # the echos are a kluge
+ # without them, the first section head is not tagged
+ (echo ; echo ; man $s $n ) | man2html > $2/$b.html
+done
+# man2html doesn't convert man page cross-references such as
+# ipsec.conf(5) into HTML links
+# So post-process to do that.
+for i in $2/*.html
+do
+ ../utils/man_xref $i > temp
+ mv temp $i
+done
diff --git a/doc/utils/man_xref.c b/doc/utils/man_xref.c
new file mode 100644
index 000000000..fc3afb696
--- /dev/null
+++ b/doc/utils/man_xref.c
@@ -0,0 +1,125 @@
+#include <stdio.h>
+#include <ctype.h>
+#include <assert.h>
+
+/*
+ look through HTMLized man pages
+ convert references like man(1) into HTML links
+
+ somewhat quick & dirty code
+ various dubious assumptions made:
+
+ [a-zA-Z0-9\-_\.]* defines legal characters in name
+ pagename(x) corresponds to pagename.x.html
+ (Fine *if* it's been converted by my scripts)
+ x in the above must be a single digit
+ (or we ignore it, which does no damage)
+ Lazy parsing: malloc() enough RAM to read in whole file
+ Limited syntax: exactly one input file, results to stdout
+
+ Sandy Harris
+*/
+
+int do_file( char *, char *) ;
+
+main(int argc, char **argv)
+{
+ FILE *in ;
+ char *progname;
+ long lsize ;
+ size_t size, nread;
+ char *buffer, *bufend ;
+ progname = *argv ;
+ if( argc != 2 ) {
+ fprintf(stderr,"usage: %s input-file\n", progname);
+ exit(1) ;
+ }
+ if( (in = fopen(argv[1],"r")) == NULL ) {
+ fprintf(stderr,"%s Can't open input file\n", progname);
+ exit(2) ;
+ }
+ if( (lsize = fseek(in, 0L, SEEK_END)) < 0L ) {
+ fprintf(stderr,"%s fseek() fails\n", progname);
+ exit(3) ;
+ }
+ lsize = ftell(in) ;
+ rewind(in) ;
+ size = (size_t) lsize ;
+ if( lsize != (long) size ) {
+ fprintf(stderr,"%s file too large\n", progname);
+ exit(4) ;
+ }
+ if( (buffer = (char *) malloc(size)) == NULL) {
+ fprintf(stderr,"%s malloc() failed\n", progname);
+ exit(5) ;
+ }
+ bufend = buffer + size ;
+ if( (nread = fread(buffer, size, 1, in)) != 1) {
+ fprintf(stderr,"%s fread() failed\n", progname);
+ exit(6) ;
+ }
+ do_file(buffer,bufend);
+}
+
+do_file(char *start, char *end)
+{
+ /* p is where to start parsing, one past last output */
+ /* q is how far we've parsed */
+ char *p, *q ;
+ int value ;
+ for( p = q = start ; p < end ; q = (q<end) ? (q+1) : q ) {
+ /* if p is beyond q, catch up */
+ if( q < p )
+ continue ;
+ /* move q ahead until we know if we've got manpage name */
+ if( isalnum(*q) )
+ continue ;
+ switch(*q) {
+ /* can appear in manpage name */
+ case '.':
+ case '_':
+ case '-':
+ case '(':
+ continue ;
+ break ;
+ /* whatever's between p and q
+ is not a manpage name
+ so output it
+ */
+ default:
+ /* leave p one past output */
+ for( ; p <= q ; p++ )
+ putchar(*p);
+ break ;
+ /* we may have a manpage name */
+ case ')':
+ value = do_name(p,q);
+ if(value) {
+ p = q ;
+ p++ ;
+ }
+ /* unreached with current do_name() */
+ else
+ for( ; p <= q ; p++ )
+ putchar(*p);
+ break ;
+} } }
+
+do_name(char *p, char *q)
+{
+ *q = '\0' ;
+ /* if end of string matches RE ([0-9])
+ with at least one legal character before it
+ add HTML xref stuff
+ */
+ if( (q-p > 3) && isdigit(q[-1]) && (q[-2]=='(')) {
+ q[-2] = '\0' ;
+ q-- ;
+ printf("<a href=\"%s.%s.html\">", p, q);
+ printf("%s(%s)", p, q);
+ printf("</a>");
+ }
+ // otherwise just print string
+ else printf("%s)", p);
+ return 1 ;
+}
diff --git a/doc/utils/mkhtmlman b/doc/utils/mkhtmlman
new file mode 100755
index 000000000..6d73bd1f2
--- /dev/null
+++ b/doc/utils/mkhtmlman
@@ -0,0 +1,44 @@
+#!/bin/sh
+# gathers manpages up into dir, converts them to HTML, including interlinking
+# Assumes RedHat6.0 man2html available.
+
+PATH=/usr/local/bin:/bin:/usr/bin:/usr/contrib/bin:$PATH ; export PATH
+
+# note, this is always run from freeswan/doc.
+
+TOPDIR=..
+
+case $# in
+1) exit 0 ;;
+0) echo "Usage: $0 destdir manpage ..." >&2 ; exit 1 ;;
+esac
+
+dir=$1
+shift
+mkdir -p $dir
+rm -f $dir/*
+
+for f
+do
+ b=`basename $f`
+ case $b in
+ ipsec*) ;; # ipsec.8, ipsec.conf.5, etc.
+ *) b="ipsec_$b" ;;
+ esac
+ cp $f $dir/$b
+ $TOPDIR/packaging/utils/manlink $f | while read from to
+ do
+ (cd $dir; ln -s ../$f $to)
+ done
+done
+
+# build the html (sed mess fixes overly-smart man2html's crud)
+refpat='"http://localhost/cgi-bin/man/man2html?\([1-8]\)+\([^"]*\)"'
+for f in $dir/*.[1-8]
+do
+ echo Processing $f
+ man2html <$f | sed 's;'"$refpat"';"\2.\1.html";g' >$f.html
+done
+
+# remove the source files (must wait until after all builds, due to symlinks)
+rm -f $dir/*.[1-8]
diff --git a/doc/utils/perm1.awk b/doc/utils/perm1.awk
new file mode 100644
index 000000000..d9f8f5565
--- /dev/null
+++ b/doc/utils/perm1.awk
@@ -0,0 +1 @@
+{ print $4 "\t<a href=\"" $1 "#" $2 "\">" }
diff --git a/doc/utils/perm2.awk b/doc/utils/perm2.awk
new file mode 100644
index 000000000..3c55fef11
--- /dev/null
+++ b/doc/utils/perm2.awk
@@ -0,0 +1,46 @@
+BEGIN {
+ print "<html>\n<body>"
+ print "<h2>Permuted Index of HTML headers in FreeS/WAN documents</h2>"
+ print "<h3>Jump to a letter</h3>"
+ print "<center><big><strong>"
+ print "<a href=\"#0\">numeric</a>"
+ print "<a href=\"#a\">A</a>"
+ print "<a href=\"#b\">B</a>"
+ print "<a href=\"#c\">C</a>"
+ print "<a href=\"#d\">D</a>"
+ print "<a href=\"#e\">E</a>"
+ print "<a href=\"#f\">F</a>"
+ print "<a href=\"#g\">G</a>"
+ print "<a href=\"#h\">H</a>"
+ print "<a href=\"#i\">I</a>"
+ print "<a href=\"#j\">J</a>"
+ print "<a href=\"#k\">K</a>"
+ print "<a href=\"#l\">L</a>"
+ print "<a href=\"#m\">M</a>"
+ print "<a href=\"#n\">N</a>"
+ print "<a href=\"#o\">O</a>"
+ print "<a href=\"#p\">P</a>"
+ print "<a href=\"#q\">Q</a>"
+ print "<a href=\"#r\">R</a>"
+ print "<a href=\"#s\">S</a>"
+ print "<a href=\"#t\">T</a>"
+ print "<a href=\"#u\">U</a>"
+ print "<a href=\"#v\">V</a>"
+ print "<a href=\"#w\">W</a>"
+ print "<a href=\"#x\">X</a>"
+ print "<a href=\"#y\">Y</a>"
+ print "<a href=\"#z\">Z</a>"
+ print "</strong></big></center>"
+ print "<hr>"
+ print "<pre>"
+ print "<a name=0>"
+ old =""
+ }
+{ x = tolower(substr($1,1,1))
+ if( (x ~ /[a-zA-Z]/) && (x != old) )
+ print "<a name=" x ">" $2
+ else
+ print $2
+ old = x
+ }
+END { print "</pre>\n</html>" }
diff --git a/doc/utils/rfc_pg.c b/doc/utils/rfc_pg.c
new file mode 100644
index 000000000..448cc1a36
--- /dev/null
+++ b/doc/utils/rfc_pg.c
@@ -0,0 +1,76 @@
+/*
+ * $Header: /var/cvsroot/strongswan/doc/utils/rfc_pg.c,v 1.1 2004/03/15 20:35:24 as Exp $
+ *
+ * from 2-nroff.template file.
+ *
+ * Remove N lines following any line that contains a form feed (^L).
+ * (Why can't this be done with awk or sed?)
+ *
+ * OPTION:
+ * -n# Number of lines to delete following each ^L (0 default).
+ * $Log: rfc_pg.c,v $
+ * Revision 1.1 2004/03/15 20:35:24 as
+ * added files from freeswan-2.04-x509-1.5.3
+ *
+ * Revision 1.1 2002/07/23 18:42:43 mcr
+ * required utility from IETF to help with formatting of drafts.
+ *
+ */
+#include <stdio.h>
+
+#define FORM_FEED '\f'
+#define OPTION "n:N:" /* for getopt() */
+
+extern char *optarg;
+extern int optind;
+
+main(argc, argv)
+int argc;
+char *argv[];
+{
+ int c, /* next input char */
+ nlines = 0; /* lines to delete after ^L */
+ void print_and_delete(); /* print line starting with ^L,
+ then delete N lines */
+
+/*********************** Process option (-nlines) ***********************/
+
+ while ((c = getopt(argc, argv, OPTION)) != EOF)
+ switch(c)
+ {
+ case 'n' :
+ case 'N' : nlines = atoi(optarg);
+ break;
+ }
+/************************* READ AND PROCESS CHARS **********************/
+
+ while ((c = getchar()) != EOF)
+ if (c == FORM_FEED)
+ print_and_delete(nlines); /* remove N lines after this one */
+ else
+ putchar(c); /* we write the form feed */
+ exit(0);
+}
+
+
+/*
+ * Print rest of line, then delete next N lines.
+ */
+void print_and_delete(n)
+int n; /* nbr of lines to delete */
+{
+ int c, /* next input char */
+ cntr = 0; /* count of deleted lines */
+
+ while ((c = getchar()) != '\n') /* finish current line */
+ putchar(c);
+ putchar('\n'); /* write the last CR */
+ putchar(FORM_FEED);
+
+ for ( ; cntr < n; cntr++)
+ while ((c = getchar()) != '\n')
+ if (c == EOF)
+ exit(0); /* exit on EOF */
+ putchar(c); /* write that last CR */
+}
+
diff --git a/doc/utils/xref.sed b/doc/utils/xref.sed
new file mode 100644
index 000000000..8c3b442cc
--- /dev/null
+++ b/doc/utils/xref.sed
@@ -0,0 +1,56 @@
+# turn end-of xref tags into <*>
+# Copyright (C) 1999 Sandy Harris.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# RCSID $Id: xref.sed,v 1.1 2004/03/15 20:35:24 as Exp $
+s/<\/a>/<*>/g
+# delete all xrefs that point
+# within our document set
+s/<a href="..\/Internet-docs\/rfc....\.txt">//
+# in same doc
+s/<a href="#[a-zA-Z0-9\.]*">//
+# pointer into another doc
+s/<a href="DES.html#[a-zA-Z0-9\.]*">//
+s/<a href="RFCs.html#[a-zA-Z0-9\.]*">//
+s/<a href="WWWref.html#[a-zA-Z0-9\.]*">//
+s/<a href="bibliography.html#[a-zA-Z0-9\.]*">//
+s/<a href="compatibility.html#[a-zA-Z0-9\.]*">//
+s/<a href="configuration.html#[a-zA-Z0-9\.]*">//
+s/<a href="contents.html#[a-zA-Z0-9\.]*">//
+s/<a href="debugging.html#[a-zA-Z0-9\.]*">//
+s/<a href="exportlaws.html#[a-zA-Z0-9\.]*">//
+s/<a href="glossary.html#[a-zA-Z0-9\.]*">//
+s/<a href="index.html#[a-zA-Z0-9\.]*">//
+s/<a href="overview.html#[a-zA-Z0-9\.]*">//
+s/<a href="roadmap.html#[a-zA-Z0-9\.]*">//
+s/<a href="testbed.html#[a-zA-Z0-9\.]*">//
+s/<a href="setup.html#[a-zA-Z0-9\.]*">//
+# pointer to head of doc
+s/<a href="DES.html">//
+s/<a href="RFCs.html">//
+s/<a href="WWWref.html">//
+s/<a href="bibliography.html">//
+s/<a href="compatibility.html">//
+s/<a href="configuration.html">//
+s/<a href="contents.html">//
+s/<a href="debugging.html">//
+s/<a href="exportlaws.html">//
+s/<a href="glossary.html">//
+s/<a href="index.html">//
+s/<a href="overview.html">//
+s/<a href="roadmap.html">//
+s/<a href="testbed.html">//
+s/<a href="setup.html">//
+# xref to non-HTML files
+s/<a href="standards">//
+s/<a href="impl.notes">//
+s/<a href="prob.report">//