/* URL handling.
- Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
-This file is part of Wget.
+This file is part of GNU Wget.
-This program is free software; you can redistribute it and/or modify
+GNU Wget is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
+the Free Software Foundation; either version 2 of the License, or (at
+your option) any later version.
-This program is distributed in the hope that it will be useful,
+GNU Wget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
+along with Wget; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#include <config.h>
#else
# include <strings.h>
#endif
-#include <ctype.h>
#include <sys/types.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#include "utils.h"
#include "url.h"
#include "host.h"
-#include "html.h"
#ifndef errno
extern int errno;
#endif
-/* Default port definitions */
-#define DEFAULT_HTTP_PORT 80
-#define DEFAULT_FTP_PORT 21
-
-/* URL separator (for findurl) */
-#define URL_SEPARATOR "!\"#'(),>`{}|<>"
-
-/* A list of unsafe characters for encoding, as per RFC1738. '@' and
- ':' (not listed in RFC) were added because of user/password
- encoding. */
-
-#ifndef WINDOWS
-# define URL_UNSAFE_CHARS "<>\"#%{}|\\^~[]`@:"
-#else /* WINDOWS */
-# define URL_UNSAFE_CHARS "<>\"%{}|\\^[]`"
-#endif /* WINDOWS */
-
-#define UNSAFE_CHAR(c) ( ((unsigned char)(c) <= ' ') /* ASCII 32 */ \
- || ((unsigned char)(c) > '~') /* ASCII 127 */ \
- || strchr (URL_UNSAFE_CHARS, c))
-
-/* If S contains unsafe characters, free it and replace it with a
- version that doesn't. */
-#define URL_CLEANSE(s) do \
-{ \
- if (contains_unsafe (s)) \
- { \
- char *uc_tmp = encode_string (s); \
- free (s); \
- (s) = uc_tmp; \
- } \
-} while (0)
-
-/* Is a directory "."? */
+/* Is X "."? */
#define DOTP(x) ((*(x) == '.') && (!*(x + 1)))
-/* Is a directory ".."? */
+/* Is X ".."? */
#define DDOTP(x) ((*(x) == '.') && (*(x + 1) == '.') && (!*(x + 2)))
-/* NULL-terminated list of strings to be recognized as prototypes (URL
- schemes). Note that recognized doesn't mean supported -- only HTTP
- and FTP are currently supported.
+static int urlpath_length PARAMS ((const char *));
- However, a string that does not match anything in the list will be
- considered a relative URL. Thus it's important that this list has
- anything anyone could think of being legal.
-
- There are wild things here. :-) Take a look at
- <URL:http://www.w3.org/pub/WWW/Addressing/schemes.html> for more
- fun. */
-static char *protostrings[] =
+struct scheme_data
{
- "cid:",
- "clsid:",
- "file:",
- "finger:",
- "ftp:",
- "gopher:",
- "hdl:",
- "http:",
- "https:",
- "ilu:",
- "ior:",
- "irc:",
- "java:",
- "javascript:",
- "lifn:",
- "mailto:",
- "mid:",
- "news:",
- "nntp:",
- "path:",
- "prospero:",
- "rlogin:",
- "service:",
- "shttp:",
- "snews:",
- "stanf:",
- "telnet:",
- "tn3270:",
- "wais:",
- "whois++:",
- NULL
+ char *leading_string;
+ int default_port;
};
-struct proto
+/* Supported schemes: */
+static struct scheme_data supported_schemes[] =
{
- char *name;
- uerr_t ind;
- unsigned short port;
-};
+ { "http://", DEFAULT_HTTP_PORT },
+#ifdef HAVE_SSL
+ { "https://", DEFAULT_HTTPS_PORT },
+#endif
+ { "ftp://", DEFAULT_FTP_PORT },
-/* Similar to former, but for supported protocols: */
-static struct proto sup_protos[] =
-{
- { "http://", URLHTTP, DEFAULT_HTTP_PORT },
- { "ftp://", URLFTP, DEFAULT_FTP_PORT },
- /*{ "file://", URLFILE, DEFAULT_FTP_PORT },*/
+ /* SCHEME_INVALID */
+ { NULL, -1 }
};
-static void parse_dir PARAMS ((const char *, char **, char **));
-static uerr_t parse_uname PARAMS ((const char *, char **, char **));
-static char *construct PARAMS ((const char *, const char *, int , int));
static char *construct_relative PARAMS ((const char *, const char *));
-static char process_ftp_type PARAMS ((char *));
\f
-/* Returns the number of characters to be skipped if the first thing
- in a URL is URL: (which is 0 or 4+). The optional spaces after
- URL: are also skipped. */
-int
-skip_url (const char *url)
-{
- int i;
+/* Support for encoding and decoding of URL strings. We determine
+ whether a character is unsafe through static table lookup. This
+ code assumes ASCII character set and 8-bit chars. */
- if (TOUPPER (url[0]) == 'U'
- && TOUPPER (url[1]) == 'R'
- && TOUPPER (url[2]) == 'L'
- && url[3] == ':')
- {
- /* Skip blanks. */
- for (i = 4; url[i] && ISSPACE (url[i]); i++);
- return i;
- }
- else
- return 0;
-}
+enum {
+ urlchr_reserved = 1,
+ urlchr_unsafe = 2
+};
-/* Returns 1 if the string contains unsafe characters, 0 otherwise. */
-int
-contains_unsafe (const char *s)
+#define R urlchr_reserved
+#define U urlchr_unsafe
+#define RU R|U
+
+#define urlchr_test(c, mask) (urlchr_table[(unsigned char)(c)] & (mask))
+
+/* rfc1738 reserved chars, preserved from encoding. */
+
+#define RESERVED_CHAR(c) urlchr_test(c, urlchr_reserved)
+
+/* rfc1738 unsafe chars, plus some more. */
+
+#define UNSAFE_CHAR(c) urlchr_test(c, urlchr_unsafe)
+
+const static unsigned char urlchr_table[256] =
{
- for (; *s; s++)
- if (UNSAFE_CHAR (*s))
- return 1;
- return 0;
-}
+ U, U, U, U, U, U, U, U, /* NUL SOH STX ETX EOT ENQ ACK BEL */
+ U, U, U, U, U, U, U, U, /* BS HT LF VT FF CR SO SI */
+ U, U, U, U, U, U, U, U, /* DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
+ U, U, U, U, U, U, U, U, /* CAN EM SUB ESC FS GS RS US */
+ U, 0, U, RU, 0, U, R, 0, /* SP ! " # $ % & ' */
+ 0, 0, 0, R, 0, 0, 0, R, /* ( ) * + , - . / */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 1 2 3 4 5 6 7 */
+ 0, 0, RU, R, U, R, U, R, /* 8 9 : ; < = > ? */
+ RU, 0, 0, 0, 0, 0, 0, 0, /* @ A B C D E F G */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* H I J K L M N O */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* P Q R S T U V W */
+ 0, 0, 0, U, U, U, U, 0, /* X Y Z [ \ ] ^ _ */
+ U, 0, 0, 0, 0, 0, 0, 0, /* ` a b c d e f g */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* h i j k l m n o */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* p q r s t u v w */
+ 0, 0, 0, U, U, U, U, U, /* x y z { | } ~ DEL */
+
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+};
/* Decodes the forms %xy in a URL to the character the hexadecimal
code of which is xy. xy are hexadecimal digits from
static void
decode_string (char *s)
{
- char *p = s;
+ char *t = s; /* t - tortoise */
+ char *h = s; /* h - hare */
- for (; *s; s++, p++)
+ for (; *h; h++, t++)
{
- if (*s != '%')
- *p = *s;
+ if (*h != '%')
+ {
+ copychar:
+ *t = *h;
+ }
else
{
- /* Do nothing if at the end of the string, or if the chars
- are not hex-digits. */
- if (!*(s + 1) || !*(s + 2)
- || !(ISXDIGIT (*(s + 1)) && ISXDIGIT (*(s + 2))))
- {
- *p = *s;
- continue;
- }
- *p = (ASC2HEXD (*(s + 1)) << 4) + ASC2HEXD (*(s + 2));
- s += 2;
+ /* Do nothing if '%' is not followed by two hex digits. */
+ if (!*(h + 1) || !*(h + 2)
+ || !(ISXDIGIT (*(h + 1)) && ISXDIGIT (*(h + 2))))
+ goto copychar;
+ *t = (XCHAR_TO_XDIGIT (*(h + 1)) << 4) + XCHAR_TO_XDIGIT (*(h + 2));
+ h += 2;
+ }
+ }
+ *t = '\0';
+}
+
+/* Like encode_string, but return S if there are no unsafe chars. */
+
+static char *
+encode_string_maybe (const char *s)
+{
+ const char *p1;
+ char *p2, *newstr;
+ int newlen;
+ int addition = 0;
+
+ for (p1 = s; *p1; p1++)
+ if (UNSAFE_CHAR (*p1))
+ addition += 2; /* Two more characters (hex digits) */
+
+ if (!addition)
+ return (char *)s;
+
+ newlen = (p1 - s) + addition;
+ newstr = (char *)xmalloc (newlen + 1);
+
+ p1 = s;
+ p2 = newstr;
+ while (*p1)
+ {
+ if (UNSAFE_CHAR (*p1))
+ {
+ const unsigned char c = *p1++;
+ *p2++ = '%';
+ *p2++ = XDIGIT_TO_XCHAR (c >> 4);
+ *p2++ = XDIGIT_TO_XCHAR (c & 0xf);
}
+ else
+ *p2++ = *p1++;
}
- *p = '\0';
+ *p2 = '\0';
+ assert (p2 - newstr == newlen);
+
+ return newstr;
}
-/* Encode the unsafe characters (as determined by URL_UNSAFE) in a
+/* Encode the unsafe characters (as determined by UNSAFE_CHAR) in a
given string, returning a malloc-ed %XX encoded string. */
+
char *
encode_string (const char *s)
{
- const char *b;
- char *p, *res;
- int i;
-
- b = s;
- for (i = 0; *s; s++, i++)
- if (UNSAFE_CHAR (*s))
- i += 2; /* Two more characters (hex digits) */
- res = (char *)xmalloc (i + 1);
- s = b;
- for (p = res; *s; s++)
- if (UNSAFE_CHAR (*s))
- {
- const unsigned char c = *s;
- *p++ = '%';
- *p++ = HEXD2ASC (c >> 4);
- *p++ = HEXD2ASC (c & 0xf);
- }
- else
- *p++ = *s;
- *p = '\0';
- return res;
+ char *encoded = encode_string_maybe (s);
+ if (encoded != s)
+ return encoded;
+ else
+ return xstrdup (s);
}
+
+/* Encode unsafe characters in PTR to %xx. If such encoding is done,
+ the old value of PTR is freed and PTR is made to point to the newly
+ allocated storage. */
+
+#define ENCODE(ptr) do { \
+ char *e_new = encode_string_maybe (ptr); \
+ if (e_new != ptr) \
+ { \
+ xfree (ptr); \
+ ptr = e_new; \
+ } \
+} while (0)
\f
-/* Returns the proto-type if URL's protocol is supported, or
- URLUNKNOWN if not. */
-uerr_t
-urlproto (const char *url)
-{
- int i;
+enum copy_method { CM_DECODE, CM_ENCODE, CM_PASSTHROUGH };
- url += skip_url (url);
- for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
- if (!strncasecmp (url, sup_protos[i].name, strlen (sup_protos[i].name)))
- return sup_protos[i].ind;
- for (i = 0; url[i] && url[i] != ':' && url[i] != '/'; i++);
- if (url[i] == ':')
+/* Decide whether to encode, decode, or pass through the char at P.
+ This used to be a macro, but it got a little too convoluted. */
+static inline enum copy_method
+decide_copy_method (const char *p)
+{
+ if (*p == '%')
{
- for (++i; url[i] && url[i] != '/'; i++)
- if (!ISDIGIT (url[i]))
- return URLBADPORT;
- if (url[i - 1] == ':')
- return URLFTP;
+ if (ISXDIGIT (*(p + 1)) && ISXDIGIT (*(p + 2)))
+ {
+ /* %xx sequence: decode it, unless it would decode to an
+ unsafe or a reserved char; in that case, leave it as
+ is. */
+ char preempt = (XCHAR_TO_XDIGIT (*(p + 1)) << 4) +
+ XCHAR_TO_XDIGIT (*(p + 2));
+
+ if (UNSAFE_CHAR (preempt) || RESERVED_CHAR (preempt))
+ return CM_PASSTHROUGH;
+ else
+ return CM_DECODE;
+ }
else
- return URLHTTP;
+ /* Garbled %.. sequence: encode `%'. */
+ return CM_ENCODE;
}
+ else if (UNSAFE_CHAR (*p) && !RESERVED_CHAR (*p))
+ return CM_ENCODE;
else
- return URLHTTP;
+ return CM_PASSTHROUGH;
+}
+
+/* Translate a %-quoting (but possibly non-conformant) input string S
+ into a %-quoting (and conformant) output string. If no characters
+ are encoded or decoded, return the same string S; otherwise, return
+ a freshly allocated string with the new contents.
+
+ After a URL has been run through this function, the protocols that
+ use `%' as the quote character can use the resulting string as-is,
+ while those that don't call decode_string() to get to the intended
+ data. This function is also stable: after an input string is
+ transformed the first time, all further transformations of the
+ result yield the same result string.
+
+ Let's discuss why this function is needed.
+
+ Imagine Wget is to retrieve `http://abc.xyz/abc def'. Since a raw
+ space character would mess up the HTTP request, it needs to be
+ quoted, like this:
+
+ GET /abc%20def HTTP/1.0
+
+ So it appears that the unsafe chars need to be quoted, as with
+ encode_string. But what if we're requested to download
+ `abc%20def'? Remember that %-encoding is valid URL syntax, so what
+ the user meant was a literal space, and he was kind enough to quote
+ it. In that case, Wget should obviously leave the `%20' as is, and
+ send the same request as above. So in this case we may not call
+ encode_string.
+
+ But what if the requested URI is `abc%20 def'? If we call
+ encode_string, we end up with `/abc%2520%20def', which is almost
+ certainly not intended. If we don't call encode_string, we are
+ left with the embedded space and cannot send the request. What the
+ user meant was for Wget to request `/abc%20%20def', and this is
+ where reencode_string kicks in.
+
+ Wget used to solve this by first decoding %-quotes, and then
+ encoding all the "unsafe" characters found in the resulting string.
+ This was wrong because it didn't preserve certain URL special
+ (reserved) characters. For instance, URI containing "a%2B+b" (0x2b
+ == '+') would get translated to "a%2B%2Bb" or "a++b" depending on
+ whether we considered `+' reserved (it is). One of these results
+ is inevitable because by the second step we would lose information
+ on whether the `+' was originally encoded or not. Both results
+ were wrong because in CGI parameters + means space, while %2B means
+ literal plus. reencode_string correctly translates the above to
+ "a%2B+b", i.e. returns the original string.
+
+ This function uses an algorithm proposed by Anon Sricharoenchai:
+
+ 1. Encode all URL_UNSAFE and the "%" that are not followed by 2
+ hexdigits.
+
+ 2. Decode all "%XX" except URL_UNSAFE, URL_RESERVED (";/?:@=&") and
+ "+".
+
+ ...except that this code conflates the two steps, and decides
+ whether to encode, decode, or pass through each character in turn.
+ The function still uses two passes, but their logic is the same --
+ the first pass exists merely for the sake of allocation. Another
+ small difference is that we include `+' to URL_RESERVED.
+
+ Anon's test case:
+
+ "http://abc.xyz/%20%3F%%36%31%25aa% a?a=%61+a%2Ba&b=b%26c%3Dc"
+ ->
+ "http://abc.xyz/%20%3F%2561%25aa%25%20a?a=a+a%2Ba&b=b%26c%3Dc"
+
+ Simpler test cases:
+
+ "foo bar" -> "foo%20bar"
+ "foo%20bar" -> "foo%20bar"
+ "foo %20bar" -> "foo%20%20bar"
+ "foo%%20bar" -> "foo%25%20bar" (0x25 == '%')
+ "foo%25%20bar" -> "foo%25%20bar"
+ "foo%2%20bar" -> "foo%252%20bar"
+ "foo+bar" -> "foo+bar" (plus is reserved!)
+ "foo%2b+bar" -> "foo%2b+bar" */
+
+char *
+reencode_string (const char *s)
+{
+ const char *p1;
+ char *newstr, *p2;
+ int oldlen, newlen;
+
+ int encode_count = 0;
+ int decode_count = 0;
+
+ /* First, pass through the string to see if there's anything to do,
+ and to calculate the new length. */
+ for (p1 = s; *p1; p1++)
+ {
+ switch (decide_copy_method (p1))
+ {
+ case CM_ENCODE:
+ ++encode_count;
+ break;
+ case CM_DECODE:
+ ++decode_count;
+ break;
+ case CM_PASSTHROUGH:
+ break;
+ }
+ }
+
+ if (!encode_count && !decode_count)
+ /* The string is good as it is. */
+ return (char *)s; /* C const model sucks. */
+
+ oldlen = p1 - s;
+ /* Each encoding adds two characters (hex digits), while each
+ decoding removes two characters. */
+ newlen = oldlen + 2 * (encode_count - decode_count);
+ newstr = xmalloc (newlen + 1);
+
+ p1 = s;
+ p2 = newstr;
+
+ while (*p1)
+ {
+ switch (decide_copy_method (p1))
+ {
+ case CM_ENCODE:
+ {
+ char c = *p1++;
+ *p2++ = '%';
+ *p2++ = XDIGIT_TO_XCHAR (c >> 4);
+ *p2++ = XDIGIT_TO_XCHAR (c & 0xf);
+ }
+ break;
+ case CM_DECODE:
+ *p2++ = ((XCHAR_TO_XDIGIT (*(p1 + 1)) << 4)
+ + (XCHAR_TO_XDIGIT (*(p1 + 2))));
+ p1 += 3; /* skip %xx */
+ break;
+ case CM_PASSTHROUGH:
+ *p2++ = *p1++;
+ }
+ }
+ *p2 = '\0';
+ assert (p2 - newstr == newlen);
+ return newstr;
+}
+
+/* Run PTR_VAR through reencode_string. If a new string is consed,
+ free PTR_VAR and make it point to the new storage. Obviously,
+ PTR_VAR needs to be an lvalue. */
+
+#define REENCODE(ptr_var) do { \
+ char *rf_new = reencode_string (ptr_var); \
+ if (rf_new != ptr_var) \
+ { \
+ xfree (ptr_var); \
+ ptr_var = rf_new; \
+ } \
+} while (0)
+\f
+/* Returns the scheme type if the scheme is supported, or
+ SCHEME_INVALID if not. */
+enum url_scheme
+url_scheme (const char *url)
+{
+ int i;
+
+ for (i = 0; supported_schemes[i].leading_string; i++)
+ if (!strncasecmp (url, supported_schemes[i].leading_string,
+ strlen (supported_schemes[i].leading_string)))
+ return (enum url_scheme)i;
+ return SCHEME_INVALID;
}
-/* Skip the protocol part of the URL, e.g. `http://'. If no protocol
- part is found, returns 0. */
+/* Return the number of characters needed to skip the scheme part of
+ the URL, e.g. `http://'. If no scheme is found, returns 0. */
int
-skip_proto (const char *url)
+url_skip_scheme (const char *url)
{
- char **s;
- int l;
+ const char *p = url;
- for (s = protostrings; *s; s++)
- if (!strncasecmp (*s, url, strlen (*s)))
- break;
- if (!*s)
+ /* Skip the scheme name. We allow `-' and `+' because of `whois++',
+ etc. */
+ while (ISALNUM (*p) || *p == '-' || *p == '+')
+ ++p;
+ if (*p != ':')
return 0;
- l = strlen (*s);
- /* HTTP and FTP protocols are expected to yield exact host names
- (i.e. the `//' part must be skipped, too). */
- if (!strcmp (*s, "http:") || !strcmp (*s, "ftp:"))
- l += 2;
- return l;
+ /* Skip ':'. */
+ ++p;
+
+ /* Skip "//" if found. */
+ if (*p == '/' && *(p + 1) == '/')
+ p += 2;
+
+ return p - url;
}
-/* Returns 1 if the URL begins with a protocol (supported or
+/* Returns 1 if the URL begins with a scheme (supported or
unsupported), 0 otherwise. */
-static int
-has_proto (const char *url)
+int
+url_has_scheme (const char *url)
{
- char **s;
+ const char *p = url;
+ while (ISALNUM (*p) || *p == '-' || *p == '+')
+ ++p;
+ return *p == ':';
+}
- url += skip_url (url);
- for (s = protostrings; *s; s++)
- if (strncasecmp (url, *s, strlen (*s)) == 0)
- return 1;
- return 0;
+int
+scheme_default_port (enum url_scheme scheme)
+{
+ return supported_schemes[scheme].default_port;
}
/* Skip the username and password, if present here. The function
should be called *not* with the complete URL, but with the part
- right after the protocol.
+ right after the scheme.
If no username and password are found, return 0. */
int
-skip_uname (const char *url)
+url_skip_uname (const char *url)
{
const char *p;
- for (p = url; *p && *p != '/'; p++)
- if (*p == '@')
- break;
- /* If a `@' was found before the first occurrence of `/', skip
- it. */
- if (*p == '@')
- return p - url + 1;
- else
+
+ /* Look for '@' that comes before '/' or '?'. */
+ p = (const char *)strpbrk (url, "/?@");
+ if (!p || *p != '@')
return 0;
-}
-\f
-/* Allocate a new urlinfo structure, fill it with default values and
- return a pointer to it. */
-struct urlinfo *
-newurl (void)
-{
- struct urlinfo *u;
- u = (struct urlinfo *)xmalloc (sizeof (struct urlinfo));
- memset (u, 0, sizeof (*u));
- u->proto = URLUNKNOWN;
- return u;
+ return p - url + 1;
}
-/* Perform a "deep" free of the urlinfo structure. The structure
- should have been created with newurl, but need not have been used.
- If free_pointer is non-0, free the pointer itself. */
-void
-freeurl (struct urlinfo *u, int complete)
-{
- assert (u != NULL);
- FREE_MAYBE (u->url);
- FREE_MAYBE (u->host);
- FREE_MAYBE (u->path);
- FREE_MAYBE (u->file);
- FREE_MAYBE (u->dir);
- FREE_MAYBE (u->user);
- FREE_MAYBE (u->passwd);
- FREE_MAYBE (u->local);
- FREE_MAYBE (u->referer);
- if (u->proxy)
- freeurl (u->proxy, 1);
- if (complete)
- free (u);
- return;
-}
-\f
-/* Extract the given URL of the form
- (http:|ftp:)// (user (:password)?@)?hostname (:port)? (/path)?
- 1. hostname (terminated with `/' or `:')
- 2. port number (terminated with `/'), or chosen for the protocol
- 3. dirname (everything after hostname)
- Most errors are handled. No allocation is done, you must supply
- pointers to allocated memory.
- ...and a host of other stuff :-)
-
- - Recognizes hostname:dir/file for FTP and
- hostname (:portnum)?/dir/file for HTTP.
- - Parses the path to yield directory and file
- - Parses the URL to yield the username and passwd (if present)
- - Decodes the strings, in case they contain "forbidden" characters
- - Writes the result to struct urlinfo
-
- If the argument STRICT is set, it recognizes only the canonical
- form. */
-uerr_t
-parseurl (const char *url, struct urlinfo *u, int strict)
+static int
+parse_uname (const char *str, int len, char **user, char **passwd)
{
- int i, l, abs_ftp;
- int recognizable; /* Recognizable URL is the one where
- the protocol name was explicitly
- named, i.e. it wasn't deduced from
- the URL format. */
- uerr_t type;
-
- DEBUGP (("parseurl (\"%s\") -> ", url));
- url += skip_url (url);
- recognizable = has_proto (url);
- if (strict && !recognizable)
- return URLUNKNOWN;
- for (i = 0, l = 0; i < ARRAY_SIZE (sup_protos); i++)
+ char *colon;
+
+ if (len == 0)
+ /* Empty user name not allowed. */
+ return 0;
+
+ colon = memchr (str, ':', len);
+ if (colon == str)
+ /* Empty user name again. */
+ return 0;
+
+ if (colon)
{
- l = strlen (sup_protos[i].name);
- if (!strncasecmp (sup_protos[i].name, url, l))
- break;
+ int pwlen = len - (colon + 1 - str);
+ *passwd = xmalloc (pwlen + 1);
+ memcpy (*passwd, colon + 1, pwlen);
+ (*passwd)[pwlen] = '\0';
+ len -= pwlen + 1;
}
- /* If protocol is recognizable, but unsupported, bail out, else
- suppose unknown. */
- if (recognizable && !sup_protos[i].name)
- return URLUNKNOWN;
- else if (i == ARRAY_SIZE (sup_protos))
- type = URLUNKNOWN;
else
- u->proto = type = sup_protos[i].ind;
-
- if (type == URLUNKNOWN)
- l = 0;
- /* Allow a username and password to be specified (i.e. just skip
- them for now). */
- if (recognizable)
- l += skip_uname (url + l);
- for (i = l; url[i] && url[i] != ':' && url[i] != '/'; i++);
- if (i == l)
- return URLBADHOST;
- /* Get the hostname. */
- u->host = strdupdelim (url + l, url + i);
- DEBUGP (("host %s -> ", u->host));
-
- /* Assume no port has been given. */
- u->port = 0;
- if (url[i] == ':')
- {
- /* We have a colon delimiting the hostname. It could mean that
- a port number is following it, or a directory. */
- if (ISDIGIT (url[++i])) /* A port number */
- {
- if (type == URLUNKNOWN)
- u->proto = type = URLHTTP;
- for (; url[i] && url[i] != '/'; i++)
- if (ISDIGIT (url[i]))
- u->port = 10 * u->port + (url[i] - '0');
- else
- return URLBADPORT;
- if (!u->port)
- return URLBADPORT;
- DEBUGP (("port %hu -> ", u->port));
- }
- else if (type == URLUNKNOWN) /* or a directory */
- u->proto = type = URLFTP;
- else /* or just a misformed port number */
- return URLBADPORT;
- }
- else if (type == URLUNKNOWN)
- u->proto = type = URLHTTP;
- if (!u->port)
+ *passwd = NULL;
+
+ *user = xmalloc (len + 1);
+ memcpy (*user, str, len);
+ (*user)[len] = '\0';
+
+ return 1;
+}
+
+/* Used by main.c: detect URLs written using the "shorthand" URL forms
+ popularized by Netscape and NcFTP. HTTP shorthands look like this:
+
+ www.foo.com[:port]/dir/file -> http://www.foo.com[:port]/dir/file
+ www.foo.com[:port] -> http://www.foo.com[:port]
+
+ FTP shorthands look like this:
+
+ foo.bar.com:dir/file -> ftp://foo.bar.com/dir/file
+ foo.bar.com:/absdir/file -> ftp://foo.bar.com//absdir/file
+
+ If the URL needs not or cannot be rewritten, return NULL. */
+char *
+rewrite_shorthand_url (const char *url)
+{
+ const char *p;
+
+ if (url_has_scheme (url))
+ return NULL;
+
+ /* Look for a ':' or '/'. The former signifies NcFTP syntax, the
+ latter Netscape. */
+ for (p = url; *p && *p != ':' && *p != '/'; p++)
+ ;
+
+ if (p == url)
+ return NULL;
+
+ if (*p == ':')
{
- int i;
- for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
- if (sup_protos[i].ind == type)
- break;
- if (i == ARRAY_SIZE (sup_protos))
- return URLUNKNOWN;
- u->port = sup_protos[i].port;
+ const char *pp, *path;
+ char *res;
+ /* If the characters after the colon and before the next slash
+ or end of string are all digits, it's HTTP. */
+ int digits = 0;
+ for (pp = p + 1; ISDIGIT (*pp); pp++)
+ ++digits;
+ if (digits > 0
+ && (*pp == '/' || *pp == '\0'))
+ goto http;
+
+ /* Prepend "ftp://" to the entire URL... */
+ path = p + 1;
+ res = xmalloc (6 + strlen (url) + 1);
+ sprintf (res, "ftp://%s", url);
+ /* ...and replace ':' with '/'. */
+ res[6 + (p - url)] = '/';
+ return res;
}
- /* Some delimiter troubles... */
- if (url[i] == '/' && url[i - 1] != ':')
- ++i;
- if (type == URLHTTP)
- while (url[i] && url[i] == '/')
- ++i;
- u->path = (char *)xmalloc (strlen (url + i) + 8);
- strcpy (u->path, url + i);
- if (type == URLFTP)
+ else
{
- u->ftp_type = process_ftp_type (u->path);
- /* #### We don't handle type `d' correctly yet. */
- if (!u->ftp_type || TOUPPER (u->ftp_type) == 'D')
- u->ftp_type = 'I';
+ char *res;
+ http:
+ /* Just prepend "http://" to what we have. */
+ res = xmalloc (7 + strlen (url) + 1);
+ sprintf (res, "http://%s", url);
+ return res;
}
- DEBUGP (("opath %s -> ", u->path));
- /* Parse the username and password (if existing). */
- parse_uname (url, &u->user, &u->passwd);
- /* Decode the strings, as per RFC 1738. */
- decode_string (u->host);
- decode_string (u->path);
- if (u->user)
- decode_string (u->user);
- if (u->passwd)
- decode_string (u->passwd);
- /* Parse the directory. */
- parse_dir (u->path, &u->dir, &u->file);
- DEBUGP (("dir %s -> file %s -> ", u->dir, u->file));
- /* Simplify the directory. */
- path_simplify (u->dir);
- /* Remove the leading `/' in HTTP. */
- if (type == URLHTTP && *u->dir == '/')
- strcpy (u->dir, u->dir + 1);
- DEBUGP (("ndir %s\n", u->dir));
- /* Strip trailing `/'. */
- l = strlen (u->dir);
- if (l && u->dir[l - 1] == '/')
- u->dir[l - 1] = '\0';
- /* Re-create the path: */
- abs_ftp = (u->proto == URLFTP && *u->dir == '/');
- /* sprintf (u->path, "%s%s%s%s", abs_ftp ? "%2F": "/",
- abs_ftp ? (u->dir + 1) : u->dir, *u->dir ? "/" : "", u->file); */
- strcpy (u->path, abs_ftp ? "%2F" : "/");
- strcat (u->path, abs_ftp ? (u->dir + 1) : u->dir);
- strcat (u->path, *u->dir ? "/" : "");
- strcat (u->path, u->file);
- URL_CLEANSE (u->path);
- /* Create the clean URL. */
- u->url = str_url (u, 0);
- return URLOK;
}
\f
-/* Build the directory and filename components of the path. Both
- components are *separately* malloc-ed strings! It does not change
- the contents of path.
+static void parse_path PARAMS ((const char *, char **, char **));
- If the path ends with "." or "..", they are (correctly) counted as
- directories. */
-static void
-parse_dir (const char *path, char **dir, char **file)
+static char *
+strpbrk_or_eos (const char *s, const char *accept)
+{
+ char *p = strpbrk (s, accept);
+ if (!p)
+ p = (char *)s + strlen (s);
+ return p;
+}
+
+static char *parse_errors[] = {
+#define PE_NO_ERROR 0
+ "No error",
+#define PE_UNRECOGNIZED_SCHEME 1
+ "Unrecognized scheme",
+#define PE_EMPTY_HOST 2
+ "Empty host",
+#define PE_BAD_PORT_NUMBER 3
+ "Bad port number",
+#define PE_INVALID_USER_NAME 4
+ "Invalid user name"
+};
+
+#define SETERR(p, v) do { \
+ if (p) \
+ *(p) = (v); \
+} while (0)
+
+/* Parse a URL.
+
+ Return a new struct url if successful, NULL on error. In case of
+ error, and if ERROR is not NULL, also set *ERROR to the appropriate
+ error code. */
+struct url *
+url_parse (const char *url, int *error)
{
- int i, l;
+ struct url *u;
+ const char *p;
+
+ enum url_scheme scheme;
+
+ const char *uname_b, *uname_e;
+ const char *host_b, *host_e;
+ const char *path_b, *path_e;
+ const char *params_b, *params_e;
+ const char *query_b, *query_e;
+ const char *fragment_b, *fragment_e;
+
+ int port;
+ char *user = NULL, *passwd = NULL;
- for (i = l = strlen (path); i && path[i] != '/'; i--);
- if (!i && *path != '/') /* Just filename */
+ const char *url_orig = url;
+
+ p = url = reencode_string (url);
+
+ scheme = url_scheme (url);
+ if (scheme == SCHEME_INVALID)
{
- if (DOTP (path) || DDOTP (path))
- {
- *dir = xstrdup (path);
- *file = xstrdup ("");
- }
- else
- {
- *dir = xstrdup (""); /* This is required because of FTP */
- *file = xstrdup (path);
- }
+ SETERR (error, PE_UNRECOGNIZED_SCHEME);
+ return NULL;
}
- else if (!i) /* /filename */
+
+ p += strlen (supported_schemes[scheme].leading_string);
+ uname_b = p;
+ p += url_skip_uname (p);
+ uname_e = p;
+
+ /* scheme://user:pass@host[:port]... */
+ /* ^ */
+
+ /* We attempt to break down the URL into the components path,
+ params, query, and fragment. They are ordered like this:
+
+ scheme://host[:port][/path][;params][?query][#fragment] */
+
+ params_b = params_e = NULL;
+ query_b = query_e = NULL;
+ fragment_b = fragment_e = NULL;
+
+ host_b = p;
+ p = strpbrk_or_eos (p, ":/;?#");
+ host_e = p;
+
+ if (host_b == host_e)
{
- if (DOTP (path + 1) || DDOTP (path + 1))
- {
- *dir = xstrdup (path);
- *file = xstrdup ("");
- }
- else
- {
- *dir = xstrdup ("/");
- *file = xstrdup (path + 1);
- }
+ SETERR (error, PE_EMPTY_HOST);
+ return NULL;
}
- else /* Nonempty directory with or without a filename */
+
+ port = scheme_default_port (scheme);
+ if (*p == ':')
{
- if (DOTP (path + i + 1) || DDOTP (path + i + 1))
- {
- *dir = xstrdup (path);
- *file = xstrdup ("");
- }
- else
+ const char *port_b, *port_e, *pp;
+
+ /* scheme://host:port/tralala */
+ /* ^ */
+ ++p;
+ port_b = p;
+ p = strpbrk_or_eos (p, "/;?#");
+ port_e = p;
+
+ if (port_b == port_e)
{
- *dir = strdupdelim (path, path + i);
- *file = strdupdelim (path + i + 1, path + l + 1);
+ /* http://host:/whatever */
+ /* ^ */
+ SETERR (error, PE_BAD_PORT_NUMBER);
+ return NULL;
}
- }
-}
-/* Find the optional username and password within the URL, as per
- RFC1738. The returned user and passwd char pointers are
- malloc-ed. */
-static uerr_t
-parse_uname (const char *url, char **user, char **passwd)
-{
- int l;
- const char *p, *col;
- char **where;
-
- *user = NULL;
- *passwd = NULL;
- url += skip_url (url);
- /* Look for end of protocol string. */
- l = skip_proto (url);
- if (!l)
- return URLUNKNOWN;
- /* Add protocol offset. */
- url += l;
- /* Is there an `@' character? */
- for (p = url; *p && *p != '/'; p++)
- if (*p == '@')
- break;
- /* If not, return. */
- if (*p != '@')
- return URLOK;
- /* Else find the username and password. */
- for (p = col = url; *p != '@'; p++)
- {
- if (*p == ':' && !*user)
+ for (port = 0, pp = port_b; pp < port_e; pp++)
{
- *user = (char *)xmalloc (p - url + 1);
- memcpy (*user, url, p - url);
- (*user)[p - url] = '\0';
- col = p + 1;
+ if (!ISDIGIT (*pp))
+ {
+ /* http://host:12randomgarbage/blah */
+ /* ^ */
+ SETERR (error, PE_BAD_PORT_NUMBER);
+ return NULL;
+ }
+ port = 10 * port + (*pp - '0');
}
}
- /* Decide whether you have only the username or both. */
- where = *user ? passwd : user;
- *where = (char *)xmalloc (p - col + 1);
- memcpy (*where, col, p - col);
- (*where)[p - col] = '\0';
- return URLOK;
-}
-
-/* If PATH ends with `;type=X', return the character X. */
-static char
-process_ftp_type (char *path)
-{
- int len = strlen (path);
- if (len >= 7
- && !memcmp (path + len - 7, ";type=", 6))
+ if (*p == '/')
{
- path[len - 7] = '\0';
- return path[len - 1];
+ ++p;
+ path_b = p;
+ p = strpbrk_or_eos (p, ";?#");
+ path_e = p;
}
else
- return '\0';
-}
-\f
-/* Return the URL as fine-formed string, with a proper protocol,
- optional port number, directory and optional user/password. If
- HIDE is non-zero, password will be hidden. The forbidden
- characters in the URL will be cleansed. */
-char *
-str_url (const struct urlinfo *u, int hide)
-{
- char *res, *host, *user, *passwd, *proto_name, *dir, *file;
- int i, l, ln, lu, lh, lp, lf, ld;
- unsigned short proto_default_port;
+ {
+ /* Path is not allowed not to exist. */
+ path_b = path_e = p;
+ }
- /* Look for the protocol name. */
- for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
- if (sup_protos[i].ind == u->proto)
- break;
- if (i == ARRAY_SIZE (sup_protos))
- return NULL;
- proto_name = sup_protos[i].name;
- proto_default_port = sup_protos[i].port;
- host = CLEANDUP (u->host);
- dir = CLEANDUP (u->dir);
- file = CLEANDUP (u->file);
- user = passwd = NULL;
- if (u->user)
- user = CLEANDUP (u->user);
- if (u->passwd)
+ if (*p == ';')
{
- int i;
- passwd = CLEANDUP (u->passwd);
- if (hide)
- for (i = 0; passwd[i]; i++)
- passwd[i] = 'x';
+ ++p;
+ params_b = p;
+ p = strpbrk_or_eos (p, "?#");
+ params_e = p;
}
- if (u->proto == URLFTP && *dir == '/')
+ if (*p == '?')
{
- char *tmp = (char *)xmalloc (strlen (dir) + 3);
- /*sprintf (tmp, "%%2F%s", dir + 1);*/
- tmp[0] = '%';
- tmp[1] = '2';
- tmp[2] = 'F';
- strcpy (tmp + 3, dir + 1);
- free (dir);
- dir = tmp;
+ ++p;
+ query_b = p;
+ p = strpbrk_or_eos (p, "#");
+ query_e = p;
}
+ if (*p == '#')
+ {
+ ++p;
+ fragment_b = p;
+ p += strlen (p);
+ fragment_e = p;
+ }
+ assert (*p == 0);
- ln = strlen (proto_name);
- lu = user ? strlen (user) : 0;
- lp = passwd ? strlen (passwd) : 0;
- lh = strlen (host);
- ld = strlen (dir);
- lf = strlen (file);
- res = (char *)xmalloc (ln + lu + lp + lh + ld + lf + 20); /* safe sex */
- /* sprintf (res, "%s%s%s%s%s%s:%d/%s%s%s", proto_name,
- (user ? user : ""), (passwd ? ":" : ""),
- (passwd ? passwd : ""), (user ? "@" : ""),
- host, u->port, dir, *dir ? "/" : "", file); */
- l = 0;
- memcpy (res, proto_name, ln);
- l += ln;
- if (user)
+ if (uname_b != uname_e)
{
- memcpy (res + l, user, lu);
- l += lu;
- if (passwd)
+ /* http://user:pass@host */
+ /* ^ ^ */
+ /* uname_b uname_e */
+ if (!parse_uname (uname_b, uname_e - uname_b - 1, &user, &passwd))
{
- res[l++] = ':';
- memcpy (res + l, passwd, lp);
- l += lp;
+ SETERR (error, PE_INVALID_USER_NAME);
+ return NULL;
}
- res[l++] = '@';
}
- memcpy (res + l, host, lh);
- l += lh;
- if (u->port != proto_default_port)
- {
- res[l++] = ':';
- long_to_string (res + l, (long)u->port);
- l += numdigit (u->port);
- }
- res[l++] = '/';
- memcpy (res + l, dir, ld);
- l += ld;
- if (*dir)
- res[l++] = '/';
- strcpy (res + l, file);
- free (host);
- free (dir);
- free (file);
- FREE_MAYBE (user);
- FREE_MAYBE (passwd);
- return res;
+
+ u = (struct url *)xmalloc (sizeof (struct url));
+ memset (u, 0, sizeof (*u));
+
+ if (url == url_orig)
+ u->url = xstrdup (url);
+ else
+ u->url = (char *)url;
+
+ u->scheme = scheme;
+ u->host = strdupdelim (host_b, host_e);
+ u->port = port;
+ u->user = user;
+ u->passwd = passwd;
+
+ u->path = strdupdelim (path_b, path_e);
+ path_simplify (u->path);
+
+ if (params_b)
+ u->params = strdupdelim (params_b, params_e);
+ if (query_b)
+ u->query = strdupdelim (query_b, query_e);
+ if (fragment_b)
+ u->fragment = strdupdelim (fragment_b, fragment_e);
+
+ parse_path (u->path, &u->dir, &u->file);
+
+ return u;
}
-/* Check whether two URL-s are equivalent, i.e. pointing to the same
- location. Uses parseurl to parse them, and compares the canonical
- forms.
+const char *
+url_error (int error_code)
+{
+ assert (error_code >= 0 && error_code < ARRAY_SIZE (parse_errors));
+ return parse_errors[error_code];
+}
- Returns 1 if the URL1 is equivalent to URL2, 0 otherwise. Also
- return 0 on error. */
-int
-url_equal (const char *url1, const char *url2)
+static void
+parse_path (const char *quoted_path, char **dir, char **file)
{
- struct urlinfo *u1, *u2;
- uerr_t err;
- int res;
+ char *path, *last_slash;
+
+ STRDUP_ALLOCA (path, quoted_path);
+ decode_string (path);
- u1 = newurl ();
- err = parseurl (url1, u1, 0);
- if (err != URLOK)
+ last_slash = strrchr (path, '/');
+ if (!last_slash)
{
- freeurl (u1, 1);
- return 0;
+ *dir = xstrdup ("");
+ *file = xstrdup (path);
}
- u2 = newurl ();
- err = parseurl (url2, u2, 0);
- if (err != URLOK)
+ else
{
- freeurl (u2, 1);
- return 0;
+ *dir = strdupdelim (path, last_slash);
+ *file = xstrdup (last_slash + 1);
}
- res = !strcmp (u1->url, u2->url);
- freeurl (u1, 1);
- freeurl (u2, 1);
- return res;
}
-\f
-/* Find URL of format scheme:hostname[:port]/dir in a buffer. The
- buffer may contain pretty much anything; no errors are signaled. */
-static const char *
-findurl (const char *buf, int howmuch, int *count)
+
+/* Note: URL's "full path" is the path with the query string and
+ params appended. The "fragment" (#foo) is intentionally ignored,
+ but that might be changed. For example, if the original URL was
+ "http://host:port/foo/bar/baz;bullshit?querystring#uselessfragment",
+ the full path will be "/foo/bar/baz;bullshit?querystring". */
+
+/* Return the length of the full path, without the terminating
+ zero. */
+
+static int
+full_path_length (const struct url *url)
{
- char **prot;
- const char *s1, *s2;
-
- for (s1 = buf; howmuch; s1++, howmuch--)
- for (prot = protostrings; *prot; prot++)
- if (howmuch <= strlen (*prot))
- continue;
- else if (!strncasecmp (*prot, s1, strlen (*prot)))
- {
- for (s2 = s1, *count = 0;
- howmuch && *s2 && *s2 >= 32 && *s2 < 127 && !ISSPACE (*s2) &&
- !strchr (URL_SEPARATOR, *s2);
- s2++, (*count)++, howmuch--);
- return s1;
- }
- return NULL;
+ int len = 0;
+
+#define FROB(el) if (url->el) len += 1 + strlen (url->el)
+
+ FROB (path);
+ FROB (params);
+ FROB (query);
+
+#undef FROB
+
+ return len;
}
-/* Scans the file for signs of URL-s. Returns a vector of pointers,
- each pointer representing a URL string. The file is *not* assumed
- to be HTML. */
-urlpos *
-get_urls_file (const char *file)
+/* Write out the full path. */
+
+static void
+full_path_write (const struct url *url, char *where)
{
- long nread;
- FILE *fp;
- char *buf;
- const char *pbuf;
- int size;
- urlpos *first, *current, *old;
+#define FROB(el, chr) do { \
+ char *f_el = url->el; \
+ if (f_el) { \
+ int l = strlen (f_el); \
+ *where++ = chr; \
+ memcpy (where, f_el, l); \
+ where += l; \
+ } \
+} while (0)
+
+ FROB (path, '/');
+ FROB (params, ';');
+ FROB (query, '?');
+
+#undef FROB
+}
+
+/* Public function for getting the "full path". */
+char *
+url_full_path (const struct url *url)
+{
+ int length = full_path_length (url);
+ char *full_path = (char *)xmalloc(length + 1);
+
+ full_path_write (url, full_path);
+ full_path[length] = '\0';
+
+ return full_path;
+}
- if (file && !HYPHENP (file))
+/* Sync u->path and u->url with u->dir and u->file. */
+static void
+sync_path (struct url *url)
+{
+ char *newpath;
+
+ xfree (url->path);
+
+ if (!*url->dir)
{
- fp = fopen (file, "rb");
- if (!fp)
- {
- logprintf (LOG_NOTQUIET, "%s: %s\n", file, strerror (errno));
- return NULL;
- }
+ newpath = xstrdup (url->file);
+ REENCODE (newpath);
}
else
- fp = stdin;
- /* Load the file. */
- load_file (fp, &buf, &nread);
- if (file && !HYPHENP (file))
- fclose (fp);
- DEBUGP (("Loaded %s (size %ld).\n", file, nread));
- first = current = NULL;
- /* Fill the linked list with URLs. */
- for (pbuf = buf; (pbuf = findurl (pbuf, nread - (pbuf - buf), &size));
- pbuf += size)
{
- /* Allocate the space. */
- old = current;
- current = (urlpos *)xmalloc (sizeof (urlpos));
- if (old)
- old->next = current;
- memset (current, 0, sizeof (*current));
- current->next = NULL;
- current->url = (char *)xmalloc (size + 1);
- memcpy (current->url, pbuf, size);
- current->url[size] = '\0';
- if (!first)
- first = current;
+ int dirlen = strlen (url->dir);
+ int filelen = strlen (url->file);
+
+ newpath = xmalloc (dirlen + 1 + filelen + 1);
+ memcpy (newpath, url->dir, dirlen);
+ newpath[dirlen] = '/';
+ memcpy (newpath + dirlen + 1, url->file, filelen);
+ newpath[dirlen + 1 + filelen] = '\0';
+ REENCODE (newpath);
}
- /* Free the buffer. */
- free (buf);
- return first;
+ url->path = newpath;
+
+ /* Synchronize u->url. */
+ xfree (url->url);
+ url->url = url_string (url, 0);
}
-/* Similar to get_urls_file, but for HTML files. FILE is scanned as
- an HTML document using htmlfindurl(), which see. get_urls_html()
- constructs the HTML-s from the relative href-s.
+/* Mutators. Code in ftp.c insists on changing u->dir and u->file.
+ This way we can sync u->path and u->url when they get changed. */
+
+void
+url_set_dir (struct url *url, const char *newdir)
+{
+ xfree (url->dir);
+ url->dir = xstrdup (newdir);
+ sync_path (url);
+}
+
+void
+url_set_file (struct url *url, const char *newfile)
+{
+ xfree (url->file);
+ url->file = xstrdup (newfile);
+ sync_path (url);
+}
- If SILENT is non-zero, do not barf on baseless relative links. */
+void
+url_free (struct url *url)
+{
+ xfree (url->host);
+ xfree (url->path);
+ xfree (url->url);
+
+ FREE_MAYBE (url->params);
+ FREE_MAYBE (url->query);
+ FREE_MAYBE (url->fragment);
+ FREE_MAYBE (url->user);
+ FREE_MAYBE (url->passwd);
+ FREE_MAYBE (url->dir);
+ FREE_MAYBE (url->file);
+
+ xfree (url);
+}
+\f
urlpos *
-get_urls_html (const char *file, const char *this_url, int silent,
- int dash_p_leaf_HTML)
+get_urls_file (const char *file)
{
- long nread;
- FILE *fp;
- char *orig_buf;
- const char *buf;
- int step, first_time;
- urlpos *first, *current, *old;
-
- if (file && !HYPHENP (file))
+ struct file_memory *fm;
+ urlpos *head, *tail;
+ const char *text, *text_end;
+
+ /* Load the file. */
+ fm = read_file (file);
+ if (!fm)
{
- fp = fopen (file, "rb");
- if (!fp)
- {
- logprintf (LOG_NOTQUIET, "%s: %s\n", file, strerror (errno));
- return NULL;
- }
+ logprintf (LOG_NOTQUIET, "%s: %s\n", file, strerror (errno));
+ return NULL;
}
- else
- fp = stdin;
- /* Load the file. */
- load_file (fp, &orig_buf, &nread);
- if (file && !HYPHENP (file))
- fclose (fp);
- DEBUGP (("Loaded HTML file %s (size %ld).\n", file, nread));
- first = current = NULL;
- first_time = 1;
- /* Iterate over the URLs in BUF, picked by htmlfindurl(). */
- for (buf = orig_buf;
- (buf = htmlfindurl (buf, nread - (buf - orig_buf), &step, first_time,
- dash_p_leaf_HTML));
- buf += step)
+ DEBUGP (("Loaded %s (size %ld).\n", file, fm->length));
+ head = tail = NULL;
+ text = fm->content;
+ text_end = fm->content + fm->length;
+ while (text < text_end)
{
- int i, no_proto;
- int size = step;
- const char *pbuf = buf;
- char *constr, *base;
- const char *cbase;
- char *needs_freeing, *url_data;
-
- first_time = 0;
-
- /* A frequent phenomenon that needs to be handled are pages
- generated by brain-damaged HTML generators, which refer to to
- URI-s as <a href="<spaces>URI<spaces>">. We simply ignore
- any spaces at the beginning or at the end of the string.
- This is probably not strictly correct, but that's what the
- browsers do, so we may follow. May the authors of "WYSIWYG"
- HTML tools burn in hell for the damage they've inflicted! */
- while ((pbuf < buf + step) && ISSPACE (*pbuf))
- {
- ++pbuf;
- --size;
- }
- while (size && ISSPACE (pbuf[size - 1]))
- --size;
- if (!size)
- break;
-
- /* It would be nice if we could avoid allocating memory in this
- loop, but I don't see an easy way. To process the entities,
- we need to either copy the data, or change it destructively.
- I choose the former.
-
- We have two pointers: needs_freeing and url_data, because the
- code below does thing like url_data += <something>, and we
- want to pass the original string to free(). */
- needs_freeing = url_data = html_decode_entities (pbuf, pbuf + size);
- size = strlen (url_data);
-
- for (i = 0; protostrings[i]; i++)
- {
- if (!strncasecmp (protostrings[i], url_data,
- MINVAL (strlen (protostrings[i]), size)))
- break;
- }
- /* Check for http:RELATIVE_URI. See below for details. */
- if (protostrings[i]
- && !(strncasecmp (url_data, "http:", 5) == 0
- && strncasecmp (url_data, "http://", 7) != 0))
- {
- no_proto = 0;
- }
+ const char *line_beg = text;
+ const char *line_end = memchr (text, '\n', text_end - text);
+ if (!line_end)
+ line_end = text_end;
else
+ ++line_end;
+ text = line_end;
+ while (line_beg < line_end
+ && ISSPACE (*line_beg))
+ ++line_beg;
+ while (line_end > line_beg + 1
+ && ISSPACE (*(line_end - 1)))
+ --line_end;
+ if (line_end > line_beg)
{
- no_proto = 1;
- /* This is for extremely brain-damaged pages that refer to
- relative URI-s as <a href="http:URL">. Just strip off the
- silly leading "http:" (as well as any leading blanks
- before it). */
- if ((size > 5) && !strncasecmp ("http:", url_data, 5))
- url_data += 5, size -= 5;
- }
- if (!no_proto)
- {
- for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
- {
- if (!strncasecmp (sup_protos[i].name, url_data,
- MINVAL (strlen (sup_protos[i].name), size)))
- break;
- }
- /* Do *not* accept a non-supported protocol. */
- if (i == ARRAY_SIZE (sup_protos))
- {
- free (needs_freeing);
- continue;
- }
- }
- if (no_proto)
- {
- /* First, construct the base, which can be relative itself.
-
- Criteria for creating the base are:
- 1) html_base created by <base href="...">
- 2) current URL
- 3) base provided from the command line */
- cbase = html_base ();
- if (!cbase)
- cbase = this_url;
- if (!cbase)
- cbase = opt.base_href;
- if (!cbase) /* Error condition -- a baseless
- relative link. */
- {
- if (!opt.quiet && !silent)
- {
- /* Use malloc, not alloca because this is called in
- a loop. */
- char *temp = (char *)malloc (size + 1);
- strncpy (temp, url_data, size);
- temp[size] = '\0';
- logprintf (LOG_NOTQUIET,
- _("Error (%s): Link %s without a base provided.\n"),
- file, temp);
- free (temp);
- }
- free (needs_freeing);
- continue;
- }
- if (this_url)
- base = construct (this_url, cbase, strlen (cbase),
- !has_proto (cbase));
+ urlpos *entry = (urlpos *)xmalloc (sizeof (urlpos));
+ memset (entry, 0, sizeof (*entry));
+ entry->next = NULL;
+ entry->url = strdupdelim (line_beg, line_end);
+ if (!head)
+ head = entry;
else
- {
- /* Base must now be absolute, with host name and
- protocol. */
- if (!has_proto (cbase))
- {
- logprintf (LOG_NOTQUIET, _("\
-Error (%s): Base %s relative, without referer URL.\n"),
- file, cbase);
- free (needs_freeing);
- continue;
- }
- base = xstrdup (cbase);
- }
- constr = construct (base, url_data, size, no_proto);
- free (base);
- }
- else /* has proto */
- {
- constr = (char *)xmalloc (size + 1);
- strncpy (constr, url_data, size);
- constr[size] = '\0';
- }
-#ifdef DEBUG
- if (opt.debug)
- {
- char *tmp;
- const char *tmp2;
-
- tmp2 = html_base ();
- /* Use malloc, not alloca because this is called in a loop. */
- tmp = (char *)xmalloc (size + 1);
- strncpy (tmp, url_data, size);
- tmp[size] = '\0';
- logprintf (LOG_ALWAYS,
- "file %s; this_url %s; base %s\nlink: %s; constr: %s\n",
- file, this_url ? this_url : "(null)",
- tmp2 ? tmp2 : "(null)", tmp, constr);
- free (tmp);
+ tail->next = entry;
+ tail = entry;
}
-#endif
-
- /* Allocate the space. */
- old = current;
- current = (urlpos *)xmalloc (sizeof (urlpos));
- if (old)
- old->next = current;
- if (!first)
- first = current;
- /* Fill the values. */
- memset (current, 0, sizeof (*current));
- current->next = NULL;
- current->url = constr;
- current->size = step;
- current->pos = buf - orig_buf;
- /* A URL is relative if the host and protocol are not named,
- and the name does not start with `/'. */
- if (no_proto && *url_data != '/')
- current->flags |= (URELATIVE | UNOPROTO);
- else if (no_proto)
- current->flags |= UNOPROTO;
- free (needs_freeing);
}
- free (orig_buf);
-
- return first;
+ read_file_free (fm);
+ return head;
}
\f
/* Free the linked list of urlpos. */
while (l)
{
urlpos *next = l->next;
- free (l->url);
+ xfree (l->url);
FREE_MAYBE (l->local_name);
- free (l);
+ xfree (l);
l = next;
}
}
{
if (S_ISDIR (st.st_mode))
{
- free (t);
+ xfree (t);
return 0;
}
else
res = make_directory (t);
if (res != 0)
logprintf (LOG_NOTQUIET, "%s: %s", t, strerror (errno));
- free (t);
+ xfree (t);
return res;
}
/* Return the path name of the URL-equivalent file name, with a
remote-like structure of directories. */
static char *
-mkstruct (const struct urlinfo *u)
+mkstruct (const struct url *u)
{
char *host, *dir, *file, *res, *dirpref;
int l;
- assert (u->dir != NULL);
- assert (u->host != NULL);
-
if (opt.cut_dirs)
{
char *ptr = u->dir + (*u->dir == '/');
if (opt.add_hostdir && !opt.simple_check)
{
char *nhost = realhost (host);
- free (host);
+ xfree (host);
host = nhost;
}
/* Add dir_prefix and hostname (if required) to the beginning of
else
dirpref = "";
}
- free (host);
+ xfree (host);
/* If there is a prefix, prepend it. */
if (*dirpref)
sprintf (newdir, "%s%s%s", dirpref, *dir == '/' ? "" : "/", dir);
dir = newdir;
}
- dir = xstrdup (dir);
- URL_CLEANSE (dir);
+ dir = encode_string (dir);
l = strlen (dir);
if (l && dir[l - 1] == '/')
dir[l - 1] = '\0';
/* Finally, construct the full name. */
res = (char *)xmalloc (strlen (dir) + 1 + strlen (file) + 1);
sprintf (res, "%s%s%s", dir, *dir ? "/" : "", file);
- free (dir);
+ xfree (dir);
return res;
}
+/* Compose a file name out of BASE, an unescaped file name, and QUERY,
+ an escaped query string. The trick is to make sure that unsafe
+ characters in BASE are escaped, and that slashes in QUERY are also
+ escaped. */
+
+static char *
+compose_file_name (char *base, char *query)
+{
+ char result[256];
+ char *from;
+ char *to = result;
+
+ /* Copy BASE to RESULT and encode all unsafe characters. */
+ from = base;
+ while (*from && to - result < sizeof (result))
+ {
+ if (UNSAFE_CHAR (*from))
+ {
+ const unsigned char c = *from++;
+ *to++ = '%';
+ *to++ = XDIGIT_TO_XCHAR (c >> 4);
+ *to++ = XDIGIT_TO_XCHAR (c & 0xf);
+ }
+ else
+ *to++ = *from++;
+ }
+
+ if (query && to - result < sizeof (result))
+ {
+ *to++ = '?';
+
+ /* Copy QUERY to RESULT and encode all '/' characters. */
+ from = query;
+ while (*from && to - result < sizeof (result))
+ {
+ if (*from == '/')
+ {
+ *to++ = '%';
+ *to++ = '2';
+ *to++ = 'F';
+ ++from;
+ }
+ else
+ *to++ = *from++;
+ }
+ }
+
+ if (to - result < sizeof (result))
+ *to = '\0';
+ else
+ /* Truncate input which is too long, presumably due to a huge
+ query string. */
+ result[sizeof (result) - 1] = '\0';
+
+ return xstrdup (result);
+}
+
/* Create a unique filename, corresponding to a given URL. Calls
mkstruct if necessary. Does *not* actually create any directories. */
char *
-url_filename (const struct urlinfo *u)
+url_filename (const struct url *u)
{
char *file, *name;
int have_prefix = 0; /* whether we must prepend opt.dir_prefix */
}
else
{
- if (!*u->file)
- file = xstrdup ("index.html");
- else
- file = xstrdup (u->file);
+ char *base = *u->file ? u->file : "index.html";
+ char *query = u->query && *u->query ? u->query : NULL;
+ file = compose_file_name (base, query);
}
if (!have_prefix)
char *nfile = (char *)xmalloc (strlen (opt.dir_prefix)
+ 1 + strlen (file) + 1);
sprintf (nfile, "%s/%s", opt.dir_prefix, file);
- free (file);
+ xfree (file);
file = nfile;
}
}
/* Find a unique name. */
name = unique_name (file);
- free (file);
+ xfree (file);
return name;
}
return strlen (url);
}
+/* Find the last occurrence of character C in the range [b, e), or
+ NULL, if none are present. This is almost completely equivalent to
+ { *e = '\0'; return strrchr(b); }, except that it doesn't change
+ the contents of the string. */
static const char *
find_last_char (const char *b, const char *e, char c)
{
return NULL;
}
-/* Construct an absolute URL, given a (possibly) relative one. This
- gets tricky if you want to cover all the "reasonable" cases, but
- I'm satisfied with the result. */
+/* Resolve the result of "linking" a base URI (BASE) to a
+ link-specified URI (LINK).
+
+ Either of the URIs may be absolute or relative, complete with the
+ host name, or path only. This tries to behave "reasonably" in all
+ foreseeable cases. It employs little specific knowledge about
+ schemes or URL-specific stuff -- it just works on strings.
+
+ The parameters LINKLENGTH is useful if LINK is not zero-terminated.
+ See uri_merge for a gentler interface to this functionality.
+
+ #### This function should handle `./' and `../' so that the evil
+ path_simplify can go. */
static char *
-construct (const char *url, const char *sub, int subsize, int no_proto)
+uri_merge_1 (const char *base, const char *link, int linklength, int no_scheme)
{
char *constr;
- if (no_proto)
+ if (no_scheme)
{
- const char *end = url + urlpath_length (url);
+ const char *end = base + urlpath_length (base);
- if (*sub != '/')
+ if (*link != '/')
{
- /* SUB is a relative URL: we need to replace everything
- after last slash (possibly empty) with SUB.
+ /* LINK is a relative URL: we need to replace everything
+ after last slash (possibly empty) with LINK.
- So, if URL is "whatever/foo/bar", and SUB is "qux/xyzzy",
+ So, if BASE is "whatever/foo/bar", and LINK is "qux/xyzzy",
our result should be "whatever/foo/qux/xyzzy". */
int need_explicit_slash = 0;
int span;
const char *start_insert;
- const char *last_slash = find_last_char (url, end, '/'); /* the last slash. */
+ const char *last_slash = find_last_char (base, end, '/');
if (!last_slash)
{
- /* No slash found at all. Append SUB to what we have,
+ /* No slash found at all. Append LINK to what we have,
but we'll need a slash as a separator.
- Example: if url == "foo" and sub == "qux/xyzzy", then
- we cannot just append sub to url, because we'd get
+ Example: if base == "foo" and link == "qux/xyzzy", then
+ we cannot just append link to base, because we'd get
"fooqux/xyzzy", whereas what we want is
"foo/qux/xyzzy".
start_insert = end + 1;
need_explicit_slash = 1;
}
+ else if (last_slash && last_slash != base && *(last_slash - 1) == '/')
+ {
+ /* example: http://host" */
+ /* ^ */
+ start_insert = end + 1;
+ need_explicit_slash = 1;
+ }
else
{
/* example: "whatever/foo/bar" */
start_insert = last_slash + 1;
}
- span = start_insert - url;
- constr = (char *)xmalloc (span + subsize + 1);
+ span = start_insert - base;
+ constr = (char *)xmalloc (span + linklength + 1);
if (span)
- memcpy (constr, url, span);
+ memcpy (constr, base, span);
if (need_explicit_slash)
constr[span - 1] = '/';
- if (subsize)
- memcpy (constr + span, sub, subsize);
- constr[span + subsize] = '\0';
+ if (linklength)
+ memcpy (constr + span, link, linklength);
+ constr[span + linklength] = '\0';
}
- else /* *sub == `/' */
+ else /* *link == `/' */
{
- /* SUB is an absolute path: we need to replace everything
- after (and including) the FIRST slash with SUB.
+ /* LINK is an absolute path: we need to replace everything
+ after (and including) the FIRST slash with LINK.
- So, if URL is "http://host/whatever/foo/bar", and SUB is
+ So, if BASE is "http://host/whatever/foo/bar", and LINK is
"/qux/xyzzy", our result should be
"http://host/qux/xyzzy". */
int span;
- const char *slash, *start_insert;
- const char *pos = url;
+ const char *slash;
+ const char *start_insert = NULL; /* for gcc to shut up. */
+ const char *pos = base;
int seen_slash_slash = 0;
/* We're looking for the first slash, but want to ignore
double slash. */
/* At this point, SLASH is the location of the first / after
"//", or the first slash altogether. START_INSERT is the
- pointer to the location where SUB will be inserted. When
- examining the last two examples, keep in mind that SUB
+ pointer to the location where LINK will be inserted. When
+ examining the last two examples, keep in mind that LINK
begins with '/'. */
if (!slash && !seen_slash_slash)
/* example: "foo" */
/* ^ */
- start_insert = url;
+ start_insert = base;
else if (!slash && seen_slash_slash)
/* example: "http://foo" */
/* ^ */
else if (slash && !seen_slash_slash)
/* example: "foo/bar" */
/* ^ */
- start_insert = url;
+ start_insert = base;
else if (slash && seen_slash_slash)
/* example: "http://something/" */
/* ^ */
start_insert = slash;
- span = start_insert - url;
- constr = (char *)xmalloc (span + subsize + 1);
+ span = start_insert - base;
+ constr = (char *)xmalloc (span + linklength + 1);
if (span)
- memcpy (constr, url, span);
- if (subsize)
- memcpy (constr + span, sub, subsize);
- constr[span + subsize] = '\0';
+ memcpy (constr, base, span);
+ if (linklength)
+ memcpy (constr + span, link, linklength);
+ constr[span + linklength] = '\0';
}
}
- else /* !no_proto */
+ else /* !no_scheme */
{
- constr = strdupdelim (sub, sub + subsize);
+ constr = strdupdelim (link, link + linklength);
}
return constr;
}
-/* Like the function above, but with a saner caller interface. */
+/* Merge BASE with LINK and return the resulting URI. This is an
+ interface to uri_merge_1 that assumes that LINK is a
+ zero-terminated string. */
char *
-url_concat (const char *base_url, const char *new_url)
+uri_merge (const char *base, const char *link)
{
- return construct (base_url, new_url, strlen (new_url), !has_proto (new_url));
+ return uri_merge_1 (base, link, strlen (link), !url_has_scheme (link));
}
\f
-/* Optimize URL by host, destructively replacing u->host with realhost
- (u->host). Do this regardless of opt.simple_check. */
-void
-opt_url (struct urlinfo *u)
+#define APPEND(p, s) do { \
+ int len = strlen (s); \
+ memcpy (p, s, len); \
+ p += len; \
+} while (0)
+
+/* Use this instead of password when the actual password is supposed
+ to be hidden. We intentionally use a generic string without giving
+ away the number of characters in the password, like previous
+ versions did. */
+#define HIDDEN_PASSWORD "*password*"
+
+/* Recreate the URL string from the data in URL.
+
+ If HIDE is non-zero (as it is when we're calling this on a URL we
+ plan to print, but not when calling it to canonicalize a URL for
+ use within the program), password will be hidden. Unsafe
+ characters in the URL will be quoted. */
+
+char *
+url_string (const struct url *url, int hide_password)
{
- /* Find the "true" host. */
- char *host = realhost (u->host);
- free (u->host);
- u->host = host;
- assert (u->dir != NULL); /* the URL must have been parsed */
- /* Refresh the printed representation. */
- free (u->url);
- u->url = str_url (u, 0);
+ int size;
+ char *result, *p;
+ char *quoted_user = NULL, *quoted_passwd = NULL;
+
+ int scheme_port = supported_schemes[url->scheme].default_port;
+ char *scheme_str = supported_schemes[url->scheme].leading_string;
+ int fplen = full_path_length (url);
+
+ assert (scheme_str != NULL);
+
+ /* Make sure the user name and password are quoted. */
+ if (url->user)
+ {
+ quoted_user = encode_string_maybe (url->user);
+ if (url->passwd)
+ {
+ if (hide_password)
+ quoted_passwd = HIDDEN_PASSWORD;
+ else
+ quoted_passwd = encode_string_maybe (url->passwd);
+ }
+ }
+
+ size = (strlen (scheme_str)
+ + strlen (url->host)
+ + fplen
+ + 1);
+ if (url->port != scheme_port)
+ size += 1 + numdigit (url->port);
+ if (quoted_user)
+ {
+ size += 1 + strlen (quoted_user);
+ if (quoted_passwd)
+ size += 1 + strlen (quoted_passwd);
+ }
+
+ p = result = xmalloc (size);
+
+ APPEND (p, scheme_str);
+ if (quoted_user)
+ {
+ APPEND (p, quoted_user);
+ if (quoted_passwd)
+ {
+ *p++ = ':';
+ APPEND (p, quoted_passwd);
+ }
+ *p++ = '@';
+ }
+
+ APPEND (p, url->host);
+ if (url->port != scheme_port)
+ {
+ *p++ = ':';
+ long_to_string (p, url->port);
+ p += strlen (p);
+ }
+
+ full_path_write (url, p);
+ p += fplen;
+ *p++ = '\0';
+
+ assert (p - result == size);
+
+ if (quoted_user && quoted_user != url->user)
+ xfree (quoted_user);
+ if (quoted_passwd && !hide_password
+ && quoted_passwd != url->passwd)
+ xfree (quoted_passwd);
+
+ return result;
}
\f
-/* Returns proxy host address, in accordance with PROTO. */
+/* Returns proxy host address, in accordance with SCHEME. */
char *
-getproxy (uerr_t proto)
+getproxy (enum url_scheme scheme)
{
- if (proto == URLHTTP)
- return opt.http_proxy ? opt.http_proxy : getenv ("http_proxy");
- else if (proto == URLFTP)
- return opt.ftp_proxy ? opt.ftp_proxy : getenv ("ftp_proxy");
- else
+ char *proxy = NULL;
+ char *rewritten_url;
+ static char rewritten_storage[1024];
+
+ switch (scheme)
+ {
+ case SCHEME_HTTP:
+ proxy = opt.http_proxy ? opt.http_proxy : getenv ("http_proxy");
+ break;
+#ifdef HAVE_SSL
+ case SCHEME_HTTPS:
+ proxy = opt.https_proxy ? opt.https_proxy : getenv ("https_proxy");
+ break;
+#endif
+ case SCHEME_FTP:
+ proxy = opt.ftp_proxy ? opt.ftp_proxy : getenv ("ftp_proxy");
+ break;
+ case SCHEME_INVALID:
+ break;
+ }
+ if (!proxy || !*proxy)
return NULL;
+
+ /* Handle shorthands. */
+ rewritten_url = rewrite_shorthand_url (proxy);
+ if (rewritten_url)
+ {
+ strncpy (rewritten_storage, rewritten_url, sizeof(rewritten_storage));
+ rewritten_storage[sizeof (rewritten_storage) - 1] = '\0';
+ proxy = rewritten_storage;
+ }
+
+ return proxy;
}
/* Should a host be accessed through proxy, concerning no_proxy? */
return !sufmatch (no_proxy, host);
}
\f
+static void write_backup_file PARAMS ((const char *, downloaded_file_t));
+static void replace_attr PARAMS ((const char **, int, FILE *, const char *));
+
/* Change the links in an HTML document. Accepts a structure that
defines the positions of all the links. */
void
convert_links (const char *file, urlpos *l)
{
+ struct file_memory *fm;
FILE *fp;
- char *buf, *p, *p2;
+ const char *p;
downloaded_file_t downloaded_file_return;
- long size;
logprintf (LOG_VERBOSE, _("Converting %s... "), file);
- /* Read from the file.... */
- fp = fopen (file, "rb");
- if (!fp)
+
+ {
+ /* First we do a "dry run": go through the list L and see whether
+ any URL needs to be converted in the first place. If not, just
+ leave the file alone. */
+ int count = 0;
+ urlpos *dry = l;
+ for (dry = l; dry; dry = dry->next)
+ if (dry->convert != CO_NOCONVERT)
+ ++count;
+ if (!count)
+ {
+ logputs (LOG_VERBOSE, _("nothing to do.\n"));
+ return;
+ }
+ }
+
+ fm = read_file (file);
+ if (!fm)
{
logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
file, strerror (errno));
return;
}
- /* ...to a buffer. */
- load_file (fp, &buf, &size);
- fclose (fp);
-
- downloaded_file_return = downloaded_file(CHECK_FOR_FILE, file);
+ downloaded_file_return = downloaded_file (CHECK_FOR_FILE, file);
if (opt.backup_converted && downloaded_file_return)
- /* Rather than just writing over the original .html file with the converted
- version, save the former to *.orig. Note we only do this for files we've
- _successfully_ downloaded, so we don't clobber .orig files sitting around
- from previous invocations. */
- {
- /* Construct the backup filename as the original name plus ".orig". */
- size_t filename_len = strlen(file);
- char* filename_plus_orig_suffix;
- boolean already_wrote_backup_file = FALSE;
- slist* converted_file_ptr;
- static slist* converted_files = NULL;
-
- if (downloaded_file_return == FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED)
- {
- /* Just write "orig" over "html". We need to do it this way because
- when we're checking to see if we've downloaded the file before (to
- see if we can skip downloading it), we don't know if it's a
- text/html file. Therefore we don't know yet at that stage that -E
- is going to cause us to tack on ".html", so we need to compare
- vs. the original URL plus ".orig", not the original URL plus
- ".html.orig". */
- filename_plus_orig_suffix = xmalloc(filename_len + 1);
- strcpy(filename_plus_orig_suffix, file);
- strcpy((filename_plus_orig_suffix + filename_len) - 4, "orig");
- }
- else /* downloaded_file_return == FILE_DOWNLOADED_NORMALLY */
- {
- /* Append ".orig" to the name. */
- filename_plus_orig_suffix = xmalloc(filename_len + sizeof(".orig"));
- strcpy(filename_plus_orig_suffix, file);
- strcpy(filename_plus_orig_suffix + filename_len, ".orig");
- }
-
- /* We can get called twice on the same URL thanks to the
- convert_all_links() call in main(). If we write the .orig file each
- time in such a case, it'll end up containing the first-pass conversion,
- not the original file. So, see if we've already been called on this
- file. */
- converted_file_ptr = converted_files;
- while (converted_file_ptr != NULL)
- if (strcmp(converted_file_ptr->string, file) == 0)
- {
- already_wrote_backup_file = TRUE;
- break;
- }
- else
- converted_file_ptr = converted_file_ptr->next;
-
- if (!already_wrote_backup_file)
- {
- /* Rename <file> to <file>.orig before former gets written over. */
- if (rename(file, filename_plus_orig_suffix) != 0)
- logprintf (LOG_NOTQUIET, _("Cannot back up %s as %s: %s\n"),
- file, filename_plus_orig_suffix, strerror (errno));
-
- /* Remember that we've already written a .orig backup for this file.
- Note that we never free this memory since we need it till the
- convert_all_links() call, which is one of the last things the
- program does before terminating. BTW, I'm not sure if it would be
- safe to just set 'converted_file_ptr->string' to 'file' below,
- rather than making a copy of the string... Another note is that I
- thought I could just add a field to the urlpos structure saying
- that we'd written a .orig file for this URL, but that didn't work,
- so I had to make this separate list. */
- converted_file_ptr = xmalloc(sizeof(*converted_file_ptr));
- converted_file_ptr->string = xstrdup(file); /* die on out-of-mem. */
- converted_file_ptr->next = converted_files;
- converted_files = converted_file_ptr;
- }
+ write_backup_file (file, downloaded_file_return);
- free(filename_plus_orig_suffix);
+ /* Before opening the file for writing, unlink the file. This is
+ important if the data in FM is mmaped. In such case, nulling the
+ file, which is what fopen() below does, would make us read all
+ zeroes from the mmaped region. */
+ if (unlink (file) < 0 && errno != ENOENT)
+ {
+ logprintf (LOG_NOTQUIET, _("Unable to delete `%s': %s\n"),
+ file, strerror (errno));
+ read_file_free (fm);
+ return;
}
/* Now open the file for writing. */
fp = fopen (file, "wb");
{
logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
file, strerror (errno));
- free (buf);
+ read_file_free (fm);
return;
}
- /* Presumably we have to loop through multiple URLs here (even though we're
- only talking about a single local file) because of the -O option. */
- for (p = buf; l; l = l->next)
+ /* Here we loop through all the URLs in file, replacing those of
+ them that are downloaded with relative references. */
+ p = fm->content;
+ for (; l; l = l->next)
{
- if (l->pos >= size)
+ char *url_start = fm->content + l->pos;
+
+ if (l->pos >= fm->length)
{
DEBUGP (("Something strange is going on. Please investigate."));
break;
}
- /* If the URL already is relative or it is not to be converted
- for some other reason (e.g. because of not having been
- downloaded in the first place), skip it. */
- if ((l->flags & URELATIVE) || !(l->flags & UABS2REL))
+ /* If the URL is not to be converted, skip it. */
+ if (l->convert == CO_NOCONVERT)
{
- DEBUGP (("Skipping %s at position %d (flags %d).\n", l->url,
- l->pos, l->flags));
+ DEBUGP (("Skipping %s at position %d.\n", l->url, l->pos));
continue;
}
- /* Else, reach the position of the offending URL, echoing
- everything up to it to the outfile. */
- for (p2 = buf + l->pos; p < p2; p++)
- putc (*p, fp);
- if (l->flags & UABS2REL)
- /* Convert absolute URL to relative. */
+
+ /* Echo the file contents, up to the offending URL's opening
+ quote, to the outfile. */
+ fwrite (p, 1, url_start - p, fp);
+ p = url_start;
+ if (l->convert == CO_CONVERT_TO_RELATIVE)
{
+ /* Convert absolute URL to relative. */
char *newname = construct_relative (file, l->local_name);
- fprintf (fp, "%s", newname);
- DEBUGP (("ABS2REL: %s to %s at position %d in %s.\n",
+ char *quoted_newname = html_quote_string (newname);
+ replace_attr (&p, l->size, fp, quoted_newname);
+ DEBUGP (("TO_RELATIVE: %s to %s at position %d in %s.\n",
l->url, newname, l->pos, file));
- free (newname);
+ xfree (newname);
+ xfree (quoted_newname);
+ }
+ else if (l->convert == CO_CONVERT_TO_COMPLETE)
+ {
+ /* Convert the link to absolute URL. */
+ char *newlink = l->url;
+ char *quoted_newlink = html_quote_string (newlink);
+ replace_attr (&p, l->size, fp, quoted_newlink);
+ DEBUGP (("TO_COMPLETE: <something> to %s at position %d in %s.\n",
+ newlink, l->pos, file));
+ xfree (quoted_newlink);
}
- p += l->size;
}
/* Output the rest of the file. */
- if (p - buf < size)
- {
- for (p2 = buf + size; p < p2; p++)
- putc (*p, fp);
- }
+ if (p - fm->content < fm->length)
+ fwrite (p, 1, fm->length - (p - fm->content), fp);
fclose (fp);
- free (buf);
+ read_file_free (fm);
logputs (LOG_VERBOSE, _("done.\n"));
}
return t;
}
+static void
+write_backup_file (const char *file, downloaded_file_t downloaded_file_return)
+{
+ /* Rather than just writing over the original .html file with the
+ converted version, save the former to *.orig. Note we only do
+ this for files we've _successfully_ downloaded, so we don't
+ clobber .orig files sitting around from previous invocations. */
+
+ /* Construct the backup filename as the original name plus ".orig". */
+ size_t filename_len = strlen(file);
+ char* filename_plus_orig_suffix;
+ boolean already_wrote_backup_file = FALSE;
+ slist* converted_file_ptr;
+ static slist* converted_files = NULL;
+
+ if (downloaded_file_return == FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED)
+ {
+ /* Just write "orig" over "html". We need to do it this way
+ because when we're checking to see if we've downloaded the
+ file before (to see if we can skip downloading it), we don't
+ know if it's a text/html file. Therefore we don't know yet
+ at that stage that -E is going to cause us to tack on
+ ".html", so we need to compare vs. the original URL plus
+ ".orig", not the original URL plus ".html.orig". */
+ filename_plus_orig_suffix = alloca (filename_len + 1);
+ strcpy(filename_plus_orig_suffix, file);
+ strcpy((filename_plus_orig_suffix + filename_len) - 4, "orig");
+ }
+ else /* downloaded_file_return == FILE_DOWNLOADED_NORMALLY */
+ {
+ /* Append ".orig" to the name. */
+ filename_plus_orig_suffix = alloca (filename_len + sizeof(".orig"));
+ strcpy(filename_plus_orig_suffix, file);
+ strcpy(filename_plus_orig_suffix + filename_len, ".orig");
+ }
+
+ /* We can get called twice on the same URL thanks to the
+ convert_all_links() call in main(). If we write the .orig file
+ each time in such a case, it'll end up containing the first-pass
+ conversion, not the original file. So, see if we've already been
+ called on this file. */
+ converted_file_ptr = converted_files;
+ while (converted_file_ptr != NULL)
+ if (strcmp(converted_file_ptr->string, file) == 0)
+ {
+ already_wrote_backup_file = TRUE;
+ break;
+ }
+ else
+ converted_file_ptr = converted_file_ptr->next;
+
+ if (!already_wrote_backup_file)
+ {
+ /* Rename <file> to <file>.orig before former gets written over. */
+ if (rename(file, filename_plus_orig_suffix) != 0)
+ logprintf (LOG_NOTQUIET, _("Cannot back up %s as %s: %s\n"),
+ file, filename_plus_orig_suffix, strerror (errno));
+
+ /* Remember that we've already written a .orig backup for this file.
+ Note that we never free this memory since we need it till the
+ convert_all_links() call, which is one of the last things the
+ program does before terminating. BTW, I'm not sure if it would be
+ safe to just set 'converted_file_ptr->string' to 'file' below,
+ rather than making a copy of the string... Another note is that I
+ thought I could just add a field to the urlpos structure saying
+ that we'd written a .orig file for this URL, but that didn't work,
+ so I had to make this separate list.
+ -- Dan Harkless <wget@harkless.org>
+
+ This [adding a field to the urlpos structure] didn't work
+ because convert_file() is called twice: once after all its
+ sublinks have been retrieved in recursive_retrieve(), and
+ once at the end of the day in convert_all_links(). The
+ original linked list collected in recursive_retrieve() is
+ lost after the first invocation of convert_links(), and
+ convert_all_links() makes a new one (it calls get_urls_html()
+ for each file it covers.) That's why your first approach didn't
+ work. The way to make it work is perhaps to make this flag a
+ field in the `urls_html' list.
+ -- Hrvoje Niksic <hniksic@arsdigita.com>
+ */
+ converted_file_ptr = xmalloc(sizeof(*converted_file_ptr));
+ converted_file_ptr->string = xstrdup(file); /* die on out-of-mem. */
+ converted_file_ptr->next = converted_files;
+ converted_files = converted_file_ptr;
+ }
+}
+
+static int find_fragment PARAMS ((const char *, int, const char **,
+ const char **));
+
+static void
+replace_attr (const char **pp, int raw_size, FILE *fp, const char *new_str)
+{
+ const char *p = *pp;
+ int quote_flag = 0;
+ int size = raw_size;
+ char quote_char = '\"';
+ const char *frag_beg, *frag_end;
+
+ /* Structure of our string is:
+ "...old-contents..."
+ <--- l->size ---> (with quotes)
+ OR:
+ ...old-contents...
+ <--- l->size --> (no quotes) */
+
+ if (*p == '\"' || *p == '\'')
+ {
+ quote_char = *p;
+ quote_flag = 1;
+ ++p;
+ size -= 2; /* disregard opening and closing quote */
+ }
+ putc (quote_char, fp);
+ fputs (new_str, fp);
+
+ /* Look for fragment identifier, if any. */
+ if (find_fragment (p, size, &frag_beg, &frag_end))
+ fwrite (frag_beg, 1, frag_end - frag_beg, fp);
+ p += size;
+ if (quote_flag)
+ ++p;
+ putc (quote_char, fp);
+ *pp = p;
+}
+
+/* Find the first occurrence of '#' in [BEG, BEG+SIZE) that is not
+ preceded by '&'. If the character is not found, return zero. If
+ the character is found, return 1 and set BP and EP to point to the
+ beginning and end of the region.
+
+ This is used for finding the fragment indentifiers in URLs. */
+
+static int
+find_fragment (const char *beg, int size, const char **bp, const char **ep)
+{
+ const char *end = beg + size;
+ int saw_amp = 0;
+ for (; beg < end; beg++)
+ {
+ switch (*beg)
+ {
+ case '&':
+ saw_amp = 1;
+ break;
+ case '#':
+ if (!saw_amp)
+ {
+ *bp = beg;
+ *ep = end;
+ return 1;
+ }
+ /* fallthrough */
+ default:
+ saw_amp = 0;
+ }
+ }
+ return 0;
+}
+
+typedef struct _downloaded_file_list {
+ char* file;
+ downloaded_file_t download_type;
+ struct _downloaded_file_list* next;
+} downloaded_file_list;
+
+static downloaded_file_list *downloaded_files;
/* Remembers which files have been downloaded. In the standard case, should be
called with mode == FILE_DOWNLOADED_NORMALLY for each file we actually
downloaded_file_t
downloaded_file (downloaded_file_t mode, const char* file)
{
- typedef struct _downloaded_file_list
- {
- char* file;
- downloaded_file_t download_type;
- struct _downloaded_file_list* next;
- } downloaded_file_list;
-
boolean found_file = FALSE;
- static downloaded_file_list* downloaded_files = NULL;
downloaded_file_list* rover = downloaded_files;
while (rover != NULL)
return FILE_NOT_ALREADY_DOWNLOADED;
}
}
+
+void
+downloaded_files_free (void)
+{
+ downloaded_file_list* rover = downloaded_files;
+ while (rover)
+ {
+ downloaded_file_list *next = rover->next;
+ xfree (rover->file);
+ xfree (rover);
+ rover = next;
+ }
+}