X-Git-Url: http://sjero.net/git/?a=blobdiff_plain;f=src%2Furl.c;h=e89704d7b625ab21b6ea26b54270545235c2717a;hb=0967c21094580317353f0742c4836c5bbea34059;hp=857aada5aa415f9699be9abae5bcb1784a49a1cb;hpb=2ffb47eabf9fe89d513dc79bdc535e4092e1d6ee;p=wget diff --git a/src/url.c b/src/url.c index 857aada5..e89704d7 100644 --- a/src/url.c +++ b/src/url.c @@ -1,21 +1,31 @@ /* URL handling. - Copyright (C) 1995, 1996, 1997, 2000 Free Software Foundation, Inc. + Copyright (C) 2005 Free Software Foundation, Inc. -This file is part of Wget. +This file is part of GNU Wget. -This program is free software; you can redistribute it and/or modify +GNU Wget is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. -This program is distributed in the hope that it will be useful, +GNU Wget is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +along with Wget; if not, write to the Free Software +Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +In addition, as a special exception, the Free Software Foundation +gives permission to link the code of its release of Wget with the +OpenSSL project's "OpenSSL" library (or with modified versions of it +that use the same license as the "OpenSSL" library), and distribute +the linked executables. You must obey the GNU General Public License +in all respects for all of the code used other than "OpenSSL". If you +modify this file, you may extend this exception to your version of the +file, but you are not obligated to do so. If you do not wish to do +so, delete this exception statement from your version. */ #include @@ -26,7 +36,6 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #else # include #endif -#include #include #ifdef HAVE_UNISTD_H # include @@ -37,842 +46,1101 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "wget.h" #include "utils.h" #include "url.h" -#include "host.h" +#include "host.h" /* for is_valid_ipv6_address */ #ifndef errno extern int errno; #endif -/* Default port definitions */ -#define DEFAULT_HTTP_PORT 80 -#define DEFAULT_FTP_PORT 21 - -/* Table of Unsafe chars. This is intialized in - init_unsafe_char_table. */ - -static char unsafe_char_table[256]; +struct scheme_data +{ + const char *name; + const char *leading_string; + int default_port; + int enabled; +}; -#define UNSAFE_CHAR(c) (unsafe_char_table[(unsigned char)(c)]) +/* Supported schemes: */ +static struct scheme_data supported_schemes[] = +{ + { "http", "http://", DEFAULT_HTTP_PORT, 1 }, +#ifdef HAVE_SSL + { "https", "https://", DEFAULT_HTTPS_PORT, 1 }, +#endif + { "ftp", "ftp://", DEFAULT_FTP_PORT, 1 }, -/* If S contains unsafe characters, free it and replace it with a - version that doesn't. */ -#define URL_CLEANSE(s) do \ -{ \ - if (contains_unsafe (s)) \ - { \ - char *uc_tmp = encode_string (s); \ - xfree (s); \ - (s) = uc_tmp; \ - } \ -} while (0) + /* SCHEME_INVALID */ + { NULL, NULL, -1, 0 } +}; -/* Is a directory "."? */ -#define DOTP(x) ((*(x) == '.') && (!*(x + 1))) -/* Is a directory ".."? */ -#define DDOTP(x) ((*(x) == '.') && (*(x + 1) == '.') && (!*(x + 2))) +/* Forward declarations: */ -#if 0 -static void path_simplify_with_kludge PARAMS ((char *)); -#endif -static int urlpath_length PARAMS ((const char *)); +static int path_simplify PARAMS ((char *)); + +/* Support for escaping and unescaping of URL strings. */ + +/* Table of "reserved" and "unsafe" characters. Those terms are + rfc1738-speak, as such largely obsoleted by rfc2396 and later + specs, but the general idea remains. + + A reserved character is the one that you can't decode without + changing the meaning of the URL. For example, you can't decode + "/foo/%2f/bar" into "/foo///bar" because the number and contents of + path components is different. Non-reserved characters can be + changed, so "/foo/%78/bar" is safe to change to "/foo/x/bar". The + unsafe characters are loosely based on rfc1738, plus "$" and ",", + as recommended by rfc2396, and minus "~", which is very frequently + used (and sometimes unrecognized as %7E by broken servers). + + An unsafe character is the one that should be encoded when URLs are + placed in foreign environments. E.g. space and newline are unsafe + in HTTP contexts because HTTP uses them as separator and line + terminator, so they must be encoded to %20 and %0A respectively. + "*" is unsafe in shell context, etc. + + We determine whether a character is unsafe through static table + lookup. This code assumes ASCII character set and 8-bit chars. */ + +enum { + /* rfc1738 reserved chars + "$" and ",". */ + urlchr_reserved = 1, + + /* rfc1738 unsafe chars, plus non-printables. */ + urlchr_unsafe = 2 +}; -/* NULL-terminated list of strings to be recognized as prototypes (URL - schemes). Note that recognized doesn't mean supported -- only HTTP - and FTP are currently supported. +#define urlchr_test(c, mask) (urlchr_table[(unsigned char)(c)] & (mask)) +#define URL_RESERVED_CHAR(c) urlchr_test(c, urlchr_reserved) +#define URL_UNSAFE_CHAR(c) urlchr_test(c, urlchr_unsafe) - However, a string that does not match anything in the list will be - considered a relative URL. Thus it's important that this list has - anything anyone could think of being legal. +/* Shorthands for the table: */ +#define R urlchr_reserved +#define U urlchr_unsafe +#define RU R|U - There are wild things here. :-) Take a look at - for more - fun. */ -static char *protostrings[] = +static const unsigned char urlchr_table[256] = { - "cid:", - "clsid:", - "file:", - "finger:", - "ftp:", - "gopher:", - "hdl:", - "http:", - "https:", - "ilu:", - "ior:", - "irc:", - "java:", - "javascript:", - "lifn:", - "mailto:", - "mid:", - "news:", - "nntp:", - "path:", - "prospero:", - "rlogin:", - "service:", - "shttp:", - "snews:", - "stanf:", - "telnet:", - "tn3270:", - "wais:", - "whois++:", - NULL + U, U, U, U, U, U, U, U, /* NUL SOH STX ETX EOT ENQ ACK BEL */ + U, U, U, U, U, U, U, U, /* BS HT LF VT FF CR SO SI */ + U, U, U, U, U, U, U, U, /* DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ + U, U, U, U, U, U, U, U, /* CAN EM SUB ESC FS GS RS US */ + U, 0, U, RU, R, U, R, 0, /* SP ! " # $ % & ' */ + 0, 0, 0, R, R, 0, 0, R, /* ( ) * + , - . / */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0 1 2 3 4 5 6 7 */ + 0, 0, RU, R, U, R, U, R, /* 8 9 : ; < = > ? */ + RU, 0, 0, 0, 0, 0, 0, 0, /* @ A B C D E F G */ + 0, 0, 0, 0, 0, 0, 0, 0, /* H I J K L M N O */ + 0, 0, 0, 0, 0, 0, 0, 0, /* P Q R S T U V W */ + 0, 0, 0, RU, U, RU, U, 0, /* X Y Z [ \ ] ^ _ */ + U, 0, 0, 0, 0, 0, 0, 0, /* ` a b c d e f g */ + 0, 0, 0, 0, 0, 0, 0, 0, /* h i j k l m n o */ + 0, 0, 0, 0, 0, 0, 0, 0, /* p q r s t u v w */ + 0, 0, 0, U, U, U, 0, U, /* x y z { | } ~ DEL */ + + U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, + U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, + U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, + U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, + + U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, + U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, + U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, + U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, }; +#undef R +#undef U +#undef RU -struct proto -{ - char *name; - uerr_t ind; - unsigned short port; -}; +/* URL-unescape the string S. -/* Similar to former, but for supported protocols: */ -static struct proto sup_protos[] = -{ - { "http://", URLHTTP, DEFAULT_HTTP_PORT }, - { "ftp://", URLFTP, DEFAULT_FTP_PORT }, - /*{ "file://", URLFILE, DEFAULT_FTP_PORT },*/ -}; + This is done by transforming the sequences "%HH" to the character + represented by the hexadecimal digits HH. If % is not followed by + two hexadecimal digits, it is inserted literally. -static void parse_dir PARAMS ((const char *, char **, char **)); -static uerr_t parse_uname PARAMS ((const char *, char **, char **)); -static char *construct PARAMS ((const char *, const char *, int , int)); -static char *construct_relative PARAMS ((const char *, const char *)); -static char process_ftp_type PARAMS ((char *)); + The transformation is done in place. If you need the original + string intact, make a copy before calling this function. */ - -/* Returns the number of characters to be skipped if the first thing - in a URL is URL: (which is 0 or 4+). The optional spaces after - URL: are also skipped. */ -int -skip_url (const char *url) +static void +url_unescape (char *s) { - int i; + char *t = s; /* t - tortoise */ + char *h = s; /* h - hare */ - if (TOUPPER (url[0]) == 'U' - && TOUPPER (url[1]) == 'R' - && TOUPPER (url[2]) == 'L' - && url[3] == ':') + for (; *h; h++, t++) { - /* Skip blanks. */ - for (i = 4; url[i] && ISSPACE (url[i]); i++); - return i; + if (*h != '%') + { + copychar: + *t = *h; + } + else + { + /* Do nothing if '%' is not followed by two hex digits. */ + if (!h[1] || !h[2] || !(ISXDIGIT (h[1]) && ISXDIGIT (h[2]))) + goto copychar; + *t = X2DIGITS_TO_NUM (h[1], h[2]); + h += 2; + } } - else - return 0; + *t = '\0'; } -/* Unsafe chars: - - anything <= 32; - - stuff from rfc1738 ("<>\"#%{}|\\^~[]`"); - - @ and :, for user/password encoding. - - everything over 127 (but we don't bother with recording those. */ -void -init_unsafe_char_table (void) -{ - int i; - for (i = 0; i < 256; i++) - if (i < 32 || i >= 127 - || i == '<' - || i == '>' - || i == '\"' - || i == '#' - || i == '%' - || i == '{' - || i == '}' - || i == '|' - || i == '\\' - || i == '^' - || i == '~' - || i == '[' - || i == ']' - || i == '`') - unsafe_char_table[i] = 1; -} +/* The core of url_escape_* functions. Escapes the characters that + match the provided mask in urlchr_table. -/* Returns 1 if the string contains unsafe characters, 0 otherwise. */ -int -contains_unsafe (const char *s) + If ALLOW_PASSTHROUGH is non-zero, a string with no unsafe chars + will be returned unchanged. If ALLOW_PASSTHROUGH is zero, a + freshly allocated string will be returned in all cases. */ + +static char * +url_escape_1 (const char *s, unsigned char mask, int allow_passthrough) { - for (; *s; s++) - if (UNSAFE_CHAR (*s)) - return 1; - return 0; -} + const char *p1; + char *p2, *newstr; + int newlen; + int addition = 0; -/* Decodes the forms %xy in a URL to the character the hexadecimal - code of which is xy. xy are hexadecimal digits from - [0123456789ABCDEF] (case-insensitive). If x or y are not - hex-digits or `%' precedes `\0', the sequence is inserted - literally. */ + for (p1 = s; *p1; p1++) + if (urlchr_test (*p1, mask)) + addition += 2; /* Two more characters (hex digits) */ -static void -decode_string (char *s) -{ - char *p = s; + if (!addition) + return allow_passthrough ? (char *)s : xstrdup (s); + + newlen = (p1 - s) + addition; + newstr = (char *)xmalloc (newlen + 1); - for (; *s; s++, p++) + p1 = s; + p2 = newstr; + while (*p1) { - if (*s != '%') - *p = *s; - else + /* Quote the characters that match the test mask. */ + if (urlchr_test (*p1, mask)) { - /* Do nothing if at the end of the string, or if the chars - are not hex-digits. */ - if (!*(s + 1) || !*(s + 2) - || !(ISXDIGIT (*(s + 1)) && ISXDIGIT (*(s + 2)))) - { - *p = *s; - continue; - } - *p = (ASC2HEXD (*(s + 1)) << 4) + ASC2HEXD (*(s + 2)); - s += 2; + unsigned char c = *p1++; + *p2++ = '%'; + *p2++ = XNUM_TO_DIGIT (c >> 4); + *p2++ = XNUM_TO_DIGIT (c & 0xf); } + else + *p2++ = *p1++; } - *p = '\0'; + assert (p2 - newstr == newlen); + *p2 = '\0'; + + return newstr; } -/* Encode the unsafe characters (as determined by URL_UNSAFE) in a - given string, returning a malloc-ed %XX encoded string. */ +/* URL-escape the unsafe characters (see urlchr_table) in a given + string, returning a freshly allocated string. */ + char * -encode_string (const char *s) +url_escape (const char *s) { - const char *b; - char *p, *res; - int i; + return url_escape_1 (s, urlchr_unsafe, 0); +} - b = s; - for (i = 0; *s; s++, i++) - if (UNSAFE_CHAR (*s)) - i += 2; /* Two more characters (hex digits) */ - res = (char *)xmalloc (i + 1); - s = b; - for (p = res; *s; s++) - if (UNSAFE_CHAR (*s)) - { - const unsigned char c = *s; - *p++ = '%'; - *p++ = HEXD2ASC (c >> 4); - *p++ = HEXD2ASC (c & 0xf); - } - else - *p++ = *s; - *p = '\0'; - return res; +/* URL-escape the unsafe characters (see urlchr_table) in a given + string. If no characters are unsafe, S is returned. */ + +static char * +url_escape_allow_passthrough (const char *s) +{ + return url_escape_1 (s, urlchr_unsafe, 1); } -/* Returns the proto-type if URL's protocol is supported, or - URLUNKNOWN if not. */ -uerr_t -urlproto (const char *url) -{ - int i; +enum copy_method { CM_DECODE, CM_ENCODE, CM_PASSTHROUGH }; - url += skip_url (url); - for (i = 0; i < ARRAY_SIZE (sup_protos); i++) - if (!strncasecmp (url, sup_protos[i].name, strlen (sup_protos[i].name))) - return sup_protos[i].ind; - for (i = 0; url[i] && url[i] != ':' && url[i] != '/'; i++); - if (url[i] == ':') +/* Decide whether to encode, decode, or pass through the char at P. + This used to be a macro, but it got a little too convoluted. */ +static inline enum copy_method +decide_copy_method (const char *p) +{ + if (*p == '%') { - for (++i; url[i] && url[i] != '/'; i++) - if (!ISDIGIT (url[i])) - return URLBADPORT; - if (url[i - 1] == ':') - return URLFTP; + if (ISXDIGIT (*(p + 1)) && ISXDIGIT (*(p + 2))) + { + /* %xx sequence: decode it, unless it would decode to an + unsafe or a reserved char; in that case, leave it as + is. */ + char preempt = X2DIGITS_TO_NUM (*(p + 1), *(p + 2)); + if (URL_UNSAFE_CHAR (preempt) || URL_RESERVED_CHAR (preempt)) + return CM_PASSTHROUGH; + else + return CM_DECODE; + } else - return URLHTTP; + /* Garbled %.. sequence: encode `%'. */ + return CM_ENCODE; } + else if (URL_UNSAFE_CHAR (*p) && !URL_RESERVED_CHAR (*p)) + return CM_ENCODE; else - return URLHTTP; + return CM_PASSTHROUGH; } -/* Skip the protocol part of the URL, e.g. `http://'. If no protocol - part is found, returns 0. */ -int -skip_proto (const char *url) +/* Translate a %-escaped (but possibly non-conformant) input string S + into a %-escaped (and conformant) output string. If no characters + are encoded or decoded, return the same string S; otherwise, return + a freshly allocated string with the new contents. + + After a URL has been run through this function, the protocols that + use `%' as the quote character can use the resulting string as-is, + while those that don't call url_unescape() to get to the intended + data. This function is also stable: after an input string is + transformed the first time, all further transformations of the + result yield the same result string. + + Let's discuss why this function is needed. + + Imagine Wget is to retrieve `http://abc.xyz/abc def'. Since a raw + space character would mess up the HTTP request, it needs to be + quoted, like this: + + GET /abc%20def HTTP/1.0 + + It appears that the unsafe chars need to be quoted, for example + with url_escape. But what if we're requested to download + `abc%20def'? url_escape transforms "%" to "%25", which would leave + us with `abc%2520def'. This is incorrect -- since %-escapes are + part of URL syntax, "%20" is the correct way to denote a literal + space on the Wget command line. This leaves us in the conclusion + that in that case Wget should not call url_escape, but leave the + `%20' as is. + + And what if the requested URI is `abc%20 def'? If we call + url_escape, we end up with `/abc%2520%20def', which is almost + certainly not intended. If we don't call url_escape, we are left + with the embedded space and cannot complete the request. What the + user meant was for Wget to request `/abc%20%20def', and this is + where reencode_escapes kicks in. + + Wget used to solve this by first decoding %-quotes, and then + encoding all the "unsafe" characters found in the resulting string. + This was wrong because it didn't preserve certain URL special + (reserved) characters. For instance, URI containing "a%2B+b" (0x2b + == '+') would get translated to "a%2B%2Bb" or "a++b" depending on + whether we considered `+' reserved (it is). One of these results + is inevitable because by the second step we would lose information + on whether the `+' was originally encoded or not. Both results + were wrong because in CGI parameters + means space, while %2B means + literal plus. reencode_escapes correctly translates the above to + "a%2B+b", i.e. returns the original string. + + This function uses an algorithm proposed by Anon Sricharoenchai: + + 1. Encode all URL_UNSAFE and the "%" that are not followed by 2 + hexdigits. + + 2. Decode all "%XX" except URL_UNSAFE, URL_RESERVED (";/?:@=&") and + "+". + + ...except that this code conflates the two steps, and decides + whether to encode, decode, or pass through each character in turn. + The function still uses two passes, but their logic is the same -- + the first pass exists merely for the sake of allocation. Another + small difference is that we include `+' to URL_RESERVED. + + Anon's test case: + + "http://abc.xyz/%20%3F%%36%31%25aa% a?a=%61+a%2Ba&b=b%26c%3Dc" + -> + "http://abc.xyz/%20%3F%2561%25aa%25%20a?a=a+a%2Ba&b=b%26c%3Dc" + + Simpler test cases: + + "foo bar" -> "foo%20bar" + "foo%20bar" -> "foo%20bar" + "foo %20bar" -> "foo%20%20bar" + "foo%%20bar" -> "foo%25%20bar" (0x25 == '%') + "foo%25%20bar" -> "foo%25%20bar" + "foo%2%20bar" -> "foo%252%20bar" + "foo+bar" -> "foo+bar" (plus is reserved!) + "foo%2b+bar" -> "foo%2b+bar" */ + +static char * +reencode_escapes (const char *s) { - char **s; - int l; + const char *p1; + char *newstr, *p2; + int oldlen, newlen; - for (s = protostrings; *s; s++) - if (!strncasecmp (*s, url, strlen (*s))) - break; - if (!*s) - return 0; - l = strlen (*s); - /* HTTP and FTP protocols are expected to yield exact host names - (i.e. the `//' part must be skipped, too). */ - if (!strcmp (*s, "http:") || !strcmp (*s, "ftp:")) - l += 2; - return l; + int encode_count = 0; + int decode_count = 0; + + /* First, pass through the string to see if there's anything to do, + and to calculate the new length. */ + for (p1 = s; *p1; p1++) + { + switch (decide_copy_method (p1)) + { + case CM_ENCODE: + ++encode_count; + break; + case CM_DECODE: + ++decode_count; + break; + case CM_PASSTHROUGH: + break; + } + } + + if (!encode_count && !decode_count) + /* The string is good as it is. */ + return (char *)s; /* C const model sucks. */ + + oldlen = p1 - s; + /* Each encoding adds two characters (hex digits), while each + decoding removes two characters. */ + newlen = oldlen + 2 * (encode_count - decode_count); + newstr = xmalloc (newlen + 1); + + p1 = s; + p2 = newstr; + + while (*p1) + { + switch (decide_copy_method (p1)) + { + case CM_ENCODE: + { + unsigned char c = *p1++; + *p2++ = '%'; + *p2++ = XNUM_TO_DIGIT (c >> 4); + *p2++ = XNUM_TO_DIGIT (c & 0xf); + } + break; + case CM_DECODE: + *p2++ = X2DIGITS_TO_NUM (p1[1], p1[2]); + p1 += 3; /* skip %xx */ + break; + case CM_PASSTHROUGH: + *p2++ = *p1++; + } + } + *p2 = '\0'; + assert (p2 - newstr == newlen); + return newstr; } + +/* Returns the scheme type if the scheme is supported, or + SCHEME_INVALID if not. */ -/* Returns 1 if the URL begins with a protocol (supported or - unsupported), 0 otherwise. */ -int -has_proto (const char *url) +enum url_scheme +url_scheme (const char *url) { - char **s; + int i; - url += skip_url (url); - for (s = protostrings; *s; s++) - if (strncasecmp (url, *s, strlen (*s)) == 0) - return 1; - return 0; + for (i = 0; supported_schemes[i].leading_string; i++) + if (0 == strncasecmp (url, supported_schemes[i].leading_string, + strlen (supported_schemes[i].leading_string))) + { + if (supported_schemes[i].enabled) + return (enum url_scheme) i; + else + return SCHEME_INVALID; + } + + return SCHEME_INVALID; } -/* Skip the username and password, if present here. The function - should be called *not* with the complete URL, but with the part - right after the protocol. +#define SCHEME_CHAR(ch) (ISALNUM (ch) || (ch) == '-' || (ch) == '+') + +/* Return 1 if the URL begins with any "scheme", 0 otherwise. As + currently implemented, it returns true if URL begins with + [-+a-zA-Z0-9]+: . */ - If no username and password are found, return 0. */ int -skip_uname (const char *url) +url_has_scheme (const char *url) { - const char *p; - for (p = url; *p && *p != '/'; p++) - if (*p == '@') - break; - /* If a `@' was found before the first occurrence of `/', skip - it. */ - if (*p == '@') - return p - url + 1; - else + const char *p = url; + + /* The first char must be a scheme char. */ + if (!*p || !SCHEME_CHAR (*p)) return 0; + ++p; + /* Followed by 0 or more scheme chars. */ + while (*p && SCHEME_CHAR (*p)) + ++p; + /* Terminated by ':'. */ + return *p == ':'; } - -/* Allocate a new urlinfo structure, fill it with default values and - return a pointer to it. */ -struct urlinfo * -newurl (void) -{ - struct urlinfo *u; - u = (struct urlinfo *)xmalloc (sizeof (struct urlinfo)); - memset (u, 0, sizeof (*u)); - u->proto = URLUNKNOWN; - return u; +int +scheme_default_port (enum url_scheme scheme) +{ + return supported_schemes[scheme].default_port; } -/* Perform a "deep" free of the urlinfo structure. The structure - should have been created with newurl, but need not have been used. - If free_pointer is non-0, free the pointer itself. */ void -freeurl (struct urlinfo *u, int complete) +scheme_disable (enum url_scheme scheme) { - assert (u != NULL); - FREE_MAYBE (u->url); - FREE_MAYBE (u->host); - FREE_MAYBE (u->path); - FREE_MAYBE (u->file); - FREE_MAYBE (u->dir); - FREE_MAYBE (u->user); - FREE_MAYBE (u->passwd); - FREE_MAYBE (u->local); - FREE_MAYBE (u->referer); - if (u->proxy) - freeurl (u->proxy, 1); - if (complete) - xfree (u); - return; + supported_schemes[scheme].enabled = 0; } - -/* Extract the given URL of the form - (http:|ftp:)// (user (:password)?@)?hostname (:port)? (/path)? - 1. hostname (terminated with `/' or `:') - 2. port number (terminated with `/'), or chosen for the protocol - 3. dirname (everything after hostname) - Most errors are handled. No allocation is done, you must supply - pointers to allocated memory. - ...and a host of other stuff :-) - - - Recognizes hostname:dir/file for FTP and - hostname (:portnum)?/dir/file for HTTP. - - Parses the path to yield directory and file - - Parses the URL to yield the username and passwd (if present) - - Decodes the strings, in case they contain "forbidden" characters - - Writes the result to struct urlinfo - - If the argument STRICT is set, it recognizes only the canonical - form. */ -uerr_t -parseurl (const char *url, struct urlinfo *u, int strict) + +/* Skip the username and password, if present in the URL. The + function should *not* be called with the complete URL, but with the + portion after the scheme. + + If no username and password are found, return URL. */ + +static const char * +url_skip_credentials (const char *url) +{ + /* Look for '@' that comes before terminators, such as '/', '?', + '#', or ';'. */ + const char *p = (const char *)strpbrk (url, "@/?#;"); + if (!p || *p != '@') + return url; + return p + 1; +} + +/* Parse credentials contained in [BEG, END). The region is expected + to have come from a URL and is unescaped. */ + +static int +parse_credentials (const char *beg, const char *end, char **user, char **passwd) { - int i, l, abs_ftp; - int recognizable; /* Recognizable URL is the one where - the protocol name was explicitly - named, i.e. it wasn't deduced from - the URL format. */ - uerr_t type; - - DEBUGP (("parseurl (\"%s\") -> ", url)); - url += skip_url (url); - recognizable = has_proto (url); - if (strict && !recognizable) - return URLUNKNOWN; - for (i = 0, l = 0; i < ARRAY_SIZE (sup_protos); i++) + char *colon; + const char *userend; + + if (beg == end) + return 0; /* empty user name */ + + colon = memchr (beg, ':', end - beg); + if (colon == beg) + return 0; /* again empty user name */ + + if (colon) { - l = strlen (sup_protos[i].name); - if (!strncasecmp (sup_protos[i].name, url, l)) - break; + *passwd = strdupdelim (colon + 1, end); + userend = colon; + url_unescape (*passwd); } - /* If protocol is recognizable, but unsupported, bail out, else - suppose unknown. */ - if (recognizable && i == ARRAY_SIZE (sup_protos)) - return URLUNKNOWN; - else if (i == ARRAY_SIZE (sup_protos)) - type = URLUNKNOWN; else - u->proto = type = sup_protos[i].ind; - - if (type == URLUNKNOWN) - l = 0; - /* Allow a username and password to be specified (i.e. just skip - them for now). */ - if (recognizable) - l += skip_uname (url + l); - for (i = l; url[i] && url[i] != ':' && url[i] != '/'; i++); - if (i == l) - return URLBADHOST; - /* Get the hostname. */ - u->host = strdupdelim (url + l, url + i); - DEBUGP (("host %s -> ", u->host)); - - /* Assume no port has been given. */ - u->port = 0; - if (url[i] == ':') { - /* We have a colon delimiting the hostname. It could mean that - a port number is following it, or a directory. */ - if (ISDIGIT (url[++i])) /* A port number */ - { - if (type == URLUNKNOWN) - u->proto = type = URLHTTP; - for (; url[i] && url[i] != '/'; i++) - if (ISDIGIT (url[i])) - u->port = 10 * u->port + (url[i] - '0'); - else - return URLBADPORT; - if (!u->port) - return URLBADPORT; - DEBUGP (("port %hu -> ", u->port)); - } - else if (type == URLUNKNOWN) /* or a directory */ - u->proto = type = URLFTP; - else /* or just a misformed port number */ - return URLBADPORT; + *passwd = NULL; + userend = end; } - else if (type == URLUNKNOWN) - u->proto = type = URLHTTP; - if (!u->port) + *user = strdupdelim (beg, userend); + url_unescape (*user); + return 1; +} + +/* Used by main.c: detect URLs written using the "shorthand" URL forms + popularized by Netscape and NcFTP. HTTP shorthands look like this: + + www.foo.com[:port]/dir/file -> http://www.foo.com[:port]/dir/file + www.foo.com[:port] -> http://www.foo.com[:port] + + FTP shorthands look like this: + + foo.bar.com:dir/file -> ftp://foo.bar.com/dir/file + foo.bar.com:/absdir/file -> ftp://foo.bar.com//absdir/file + + If the URL needs not or cannot be rewritten, return NULL. */ + +char * +rewrite_shorthand_url (const char *url) +{ + const char *p; + + if (url_scheme (url) != SCHEME_INVALID) + return NULL; + + /* Look for a ':' or '/'. The former signifies NcFTP syntax, the + latter Netscape. */ + for (p = url; *p && *p != ':' && *p != '/'; p++) + ; + + if (p == url) + return NULL; + + if (*p == ':') { - int i; - for (i = 0; i < ARRAY_SIZE (sup_protos); i++) - if (sup_protos[i].ind == type) - break; - if (i == ARRAY_SIZE (sup_protos)) - return URLUNKNOWN; - u->port = sup_protos[i].port; + const char *pp; + char *res; + /* If the characters after the colon and before the next slash + or end of string are all digits, it's HTTP. */ + int digits = 0; + for (pp = p + 1; ISDIGIT (*pp); pp++) + ++digits; + if (digits > 0 && (*pp == '/' || *pp == '\0')) + goto http; + + /* Prepend "ftp://" to the entire URL... */ + res = xmalloc (6 + strlen (url) + 1); + sprintf (res, "ftp://%s", url); + /* ...and replace ':' with '/'. */ + res[6 + (p - url)] = '/'; + return res; } - /* Some delimiter troubles... */ - if (url[i] == '/' && url[i - 1] != ':') - ++i; - if (type == URLHTTP) - while (url[i] && url[i] == '/') - ++i; - u->path = (char *)xmalloc (strlen (url + i) + 8); - strcpy (u->path, url + i); - if (type == URLFTP) + else { - u->ftp_type = process_ftp_type (u->path); - /* #### We don't handle type `d' correctly yet. */ - if (!u->ftp_type || TOUPPER (u->ftp_type) == 'D') - u->ftp_type = 'I'; + char *res; + http: + /* Just prepend "http://" to what we have. */ + res = xmalloc (7 + strlen (url) + 1); + sprintf (res, "http://%s", url); + return res; } - DEBUGP (("opath %s -> ", u->path)); - /* Parse the username and password (if existing). */ - parse_uname (url, &u->user, &u->passwd); - /* Decode the strings, as per RFC 1738. */ - decode_string (u->host); - decode_string (u->path); - if (u->user) - decode_string (u->user); - if (u->passwd) - decode_string (u->passwd); - /* Parse the directory. */ - parse_dir (u->path, &u->dir, &u->file); - DEBUGP (("dir %s -> file %s -> ", u->dir, u->file)); - /* Simplify the directory. */ - path_simplify (u->dir); - /* Remove the leading `/' in HTTP. */ - if (type == URLHTTP && *u->dir == '/') - strcpy (u->dir, u->dir + 1); - DEBUGP (("ndir %s\n", u->dir)); - /* Strip trailing `/'. */ - l = strlen (u->dir); - if (l && u->dir[l - 1] == '/') - u->dir[l - 1] = '\0'; - /* Re-create the path: */ - abs_ftp = (u->proto == URLFTP && *u->dir == '/'); - /* sprintf (u->path, "%s%s%s%s", abs_ftp ? "%2F": "/", - abs_ftp ? (u->dir + 1) : u->dir, *u->dir ? "/" : "", u->file); */ - strcpy (u->path, abs_ftp ? "%2F" : "/"); - strcat (u->path, abs_ftp ? (u->dir + 1) : u->dir); - strcat (u->path, *u->dir ? "/" : ""); - strcat (u->path, u->file); - URL_CLEANSE (u->path); - DEBUGP (("newpath: %s\n", u->path)); - /* Create the clean URL. */ - u->url = str_url (u, 0); - return URLOK; } -/* Special versions of DOTP and DDOTP for parse_dir(). */ +static void split_path PARAMS ((const char *, char **, char **)); -#define PD_DOTP(x) ((*(x) == '.') && (!*((x) + 1) || *((x) + 1) == '?')) -#define PD_DDOTP(x) ((*(x) == '.') && (*(x) == '.') \ - && (!*((x) + 2) || *((x) + 2) == '?')) +/* Like strpbrk, with the exception that it returns the pointer to the + terminating zero (end-of-string aka "eos") if no matching character + is found. -/* Build the directory and filename components of the path. Both - components are *separately* malloc-ed strings! It does not change - the contents of path. + Although I normally balk at Gcc-specific optimizations, it probably + makes sense here: glibc has optimizations that detect strpbrk being + called with literal string as ACCEPT and inline the search. That + optimization is defeated if strpbrk is hidden within the call to + another function. (And no, making strpbrk_or_eos inline doesn't + help because the check for literal accept is in the + preprocessor.) */ - If the path ends with "." or "..", they are (correctly) counted as - directories. */ -static void -parse_dir (const char *path, char **dir, char **file) +#ifdef __GNUC__ + +#define strpbrk_or_eos(s, accept) ({ \ + char *SOE_p = strpbrk (s, accept); \ + if (!SOE_p) \ + SOE_p = (char *)s + strlen (s); \ + SOE_p; \ +}) + +#else /* not __GNUC__ */ + +static char * +strpbrk_or_eos (const char *s, const char *accept) { - int i, l; + char *p = strpbrk (s, accept); + if (!p) + p = (char *)s + strlen (s); + return p; +} +#endif - l = urlpath_length (path); - for (i = l; i && path[i] != '/'; i--); +/* Turn STR into lowercase; return non-zero if a character was + actually changed. */ - if (!i && *path != '/') /* Just filename */ +static int +lowercase_str (char *str) +{ + int change = 0; + for (; *str; str++) + if (ISUPPER (*str)) + { + change = 1; + *str = TOLOWER (*str); + } + return change; +} + +static const char *parse_errors[] = { +#define PE_NO_ERROR 0 + N_("No error"), +#define PE_UNSUPPORTED_SCHEME 1 + N_("Unsupported scheme"), +#define PE_EMPTY_HOST 2 + N_("Empty host"), +#define PE_BAD_PORT_NUMBER 3 + N_("Bad port number"), +#define PE_INVALID_USER_NAME 4 + N_("Invalid user name"), +#define PE_UNTERMINATED_IPV6_ADDRESS 5 + N_("Unterminated IPv6 numeric address"), +#define PE_IPV6_NOT_SUPPORTED 6 + N_("IPv6 addresses not supported"), +#define PE_INVALID_IPV6_ADDRESS 7 + N_("Invalid IPv6 numeric address") +}; + +/* Parse a URL. + + Return a new struct url if successful, NULL on error. In case of + error, and if ERROR is not NULL, also set *ERROR to the appropriate + error code. */ +struct url * +url_parse (const char *url, int *error) +{ + struct url *u; + const char *p; + int path_modified, host_modified; + + enum url_scheme scheme; + + const char *uname_b, *uname_e; + const char *host_b, *host_e; + const char *path_b, *path_e; + const char *params_b, *params_e; + const char *query_b, *query_e; + const char *fragment_b, *fragment_e; + + int port; + char *user = NULL, *passwd = NULL; + + char *url_encoded = NULL; + + int error_code; + + scheme = url_scheme (url); + if (scheme == SCHEME_INVALID) { - if (PD_DOTP (path) || PD_DDOTP (path)) - { - *dir = strdupdelim (path, path + l); - *file = xstrdup (path + l); /* normally empty, but could - contain ?... */ - } - else - { - *dir = xstrdup (""); /* This is required because of FTP */ - *file = xstrdup (path); - } + error_code = PE_UNSUPPORTED_SCHEME; + goto err; } - else if (!i) /* /filename */ + + url_encoded = reencode_escapes (url); + p = url_encoded; + + p += strlen (supported_schemes[scheme].leading_string); + uname_b = p; + p = url_skip_credentials (p); + uname_e = p; + + /* scheme://user:pass@host[:port]... */ + /* ^ */ + + /* We attempt to break down the URL into the components path, + params, query, and fragment. They are ordered like this: + + scheme://host[:port][/path][;params][?query][#fragment] */ + + params_b = params_e = NULL; + query_b = query_e = NULL; + fragment_b = fragment_e = NULL; + + host_b = p; + + if (*p == '[') { - if (PD_DOTP (path + 1) || PD_DDOTP (path + 1)) + /* Handle IPv6 address inside square brackets. Ideally we'd + just look for the terminating ']', but rfc2732 mandates + rejecting invalid IPv6 addresses. */ + + /* The address begins after '['. */ + host_b = p + 1; + host_e = strchr (host_b, ']'); + + if (!host_e) { - *dir = strdupdelim (path, path + l); - *file = xstrdup (path + l); /* normally empty, but could - contain ?... */ + error_code = PE_UNTERMINATED_IPV6_ADDRESS; + goto err; } - else + +#ifdef ENABLE_IPV6 + /* Check if the IPv6 address is valid. */ + if (!is_valid_ipv6_address(host_b, host_e)) { - *dir = xstrdup ("/"); - *file = xstrdup (path + 1); + error_code = PE_INVALID_IPV6_ADDRESS; + goto err; } + + /* Continue parsing after the closing ']'. */ + p = host_e + 1; +#else + error_code = PE_IPV6_NOT_SUPPORTED; + goto err; +#endif + } + else + { + p = strpbrk_or_eos (p, ":/;?#"); + host_e = p; } - else /* Nonempty directory with or without a filename */ + + if (host_b == host_e) + { + error_code = PE_EMPTY_HOST; + goto err; + } + + port = scheme_default_port (scheme); + if (*p == ':') { - if (PD_DOTP (path + i + 1) || PD_DDOTP (path + i + 1)) + const char *port_b, *port_e, *pp; + + /* scheme://host:port/tralala */ + /* ^ */ + ++p; + port_b = p; + p = strpbrk_or_eos (p, "/;?#"); + port_e = p; + + /* Allow empty port, as per rfc2396. */ + if (port_b != port_e) { - *dir = strdupdelim (path, path + l); - *file = xstrdup (path + l); /* normally empty, but could - contain ?... */ + for (port = 0, pp = port_b; pp < port_e; pp++) + { + if (!ISDIGIT (*pp)) + { + /* http://host:12randomgarbage/blah */ + /* ^ */ + error_code = PE_BAD_PORT_NUMBER; + goto err; + } + port = 10 * port + (*pp - '0'); + /* Check for too large port numbers here, before we have + a chance to overflow on bogus port values. */ + if (port > 65535) + { + error_code = PE_BAD_PORT_NUMBER; + goto err; + } + } } - else + } + + if (*p == '/') + { + ++p; + path_b = p; + p = strpbrk_or_eos (p, ";?#"); + path_e = p; + } + else + { + /* Path is not allowed not to exist. */ + path_b = path_e = p; + } + + if (*p == ';') + { + ++p; + params_b = p; + p = strpbrk_or_eos (p, "?#"); + params_e = p; + } + if (*p == '?') + { + ++p; + query_b = p; + p = strpbrk_or_eos (p, "#"); + query_e = p; + + /* Hack that allows users to use '?' (a wildcard character) in + FTP URLs without it being interpreted as a query string + delimiter. */ + if (scheme == SCHEME_FTP) { - *dir = strdupdelim (path, path + i); - *file = xstrdup (path + i + 1); + query_b = query_e = NULL; + path_e = p; } } -} + if (*p == '#') + { + ++p; + fragment_b = p; + p += strlen (p); + fragment_e = p; + } + assert (*p == 0); -/* Find the optional username and password within the URL, as per - RFC1738. The returned user and passwd char pointers are - malloc-ed. */ -static uerr_t -parse_uname (const char *url, char **user, char **passwd) -{ - int l; - const char *p, *col; - char **where; - - *user = NULL; - *passwd = NULL; - url += skip_url (url); - /* Look for end of protocol string. */ - l = skip_proto (url); - if (!l) - return URLUNKNOWN; - /* Add protocol offset. */ - url += l; - /* Is there an `@' character? */ - for (p = url; *p && *p != '/'; p++) - if (*p == '@') - break; - /* If not, return. */ - if (*p != '@') - return URLOK; - /* Else find the username and password. */ - for (p = col = url; *p != '@'; p++) + if (uname_b != uname_e) { - if (*p == ':' && !*user) + /* http://user:pass@host */ + /* ^ ^ */ + /* uname_b uname_e */ + if (!parse_credentials (uname_b, uname_e - 1, &user, &passwd)) { - *user = (char *)xmalloc (p - url + 1); - memcpy (*user, url, p - url); - (*user)[p - url] = '\0'; - col = p + 1; + error_code = PE_INVALID_USER_NAME; + goto err; } } - /* Decide whether you have only the username or both. */ - where = *user ? passwd : user; - *where = (char *)xmalloc (p - col + 1); - memcpy (*where, col, p - col); - (*where)[p - col] = '\0'; - return URLOK; -} -/* If PATH ends with `;type=X', return the character X. */ -static char -process_ftp_type (char *path) -{ - int len = strlen (path); + u = xnew0 (struct url); + u->scheme = scheme; + u->host = strdupdelim (host_b, host_e); + u->port = port; + u->user = user; + u->passwd = passwd; - if (len >= 7 - && !memcmp (path + len - 7, ";type=", 6)) + u->path = strdupdelim (path_b, path_e); + path_modified = path_simplify (u->path); + split_path (u->path, &u->dir, &u->file); + + host_modified = lowercase_str (u->host); + + /* Decode %HH sequences in host name. This is important not so much + to support %HH sequences, but to support binary characters (which + will have been converted to %HH by reencode_escapes). */ + if (strchr (u->host, '%')) + { + url_unescape (u->host); + host_modified = 1; + } + + if (params_b) + u->params = strdupdelim (params_b, params_e); + if (query_b) + u->query = strdupdelim (query_b, query_e); + if (fragment_b) + u->fragment = strdupdelim (fragment_b, fragment_e); + + if (path_modified || u->fragment || host_modified || path_b == path_e) { - path[len - 7] = '\0'; - return path[len - 1]; + /* If we suspect that a transformation has rendered what + url_string might return different from URL_ENCODED, rebuild + u->url using url_string. */ + u->url = url_string (u, 0); + + if (url_encoded != url) + xfree ((char *) url_encoded); } else - return '\0'; + { + if (url_encoded == url) + u->url = xstrdup (url); + else + u->url = url_encoded; + } + url_encoded = NULL; + + return u; + + err: + /* Cleanup in case of error: */ + if (url_encoded && url_encoded != url) + xfree (url_encoded); + + /* Transmit the error code to the caller, if the caller wants to + know. */ + if (error) + *error = error_code; + return NULL; } - -/* Return the URL as fine-formed string, with a proper protocol, - optional port number, directory and optional user/password. If - HIDE is non-zero, password will be hidden. The forbidden - characters in the URL will be cleansed. */ -char * -str_url (const struct urlinfo *u, int hide) + +/* Return the error message string from ERROR_CODE, which should have + been retrieved from url_parse. The error message is translated. */ + +const char * +url_error (int error_code) { - char *res, *host, *user, *passwd, *proto_name, *dir, *file; - int i, l, ln, lu, lh, lp, lf, ld; - unsigned short proto_default_port; - - /* Look for the protocol name. */ - for (i = 0; i < ARRAY_SIZE (sup_protos); i++) - if (sup_protos[i].ind == u->proto) - break; - if (i == ARRAY_SIZE (sup_protos)) - return NULL; - proto_name = sup_protos[i].name; - proto_default_port = sup_protos[i].port; - host = CLEANDUP (u->host); - dir = CLEANDUP (u->dir); - file = CLEANDUP (u->file); - user = passwd = NULL; - if (u->user) - user = CLEANDUP (u->user); - if (u->passwd) + assert (error_code >= 0 && error_code < countof (parse_errors)); + return _(parse_errors[error_code]); +} + +/* Split PATH into DIR and FILE. PATH comes from the URL and is + expected to be URL-escaped. + + The path is split into directory (the part up to the last slash) + and file (the part after the last slash), which are subsequently + unescaped. Examples: + + PATH DIR FILE + "foo/bar/baz" "foo/bar" "baz" + "foo/bar/" "foo/bar" "" + "foo" "" "foo" + "foo/bar/baz%2fqux" "foo/bar" "baz/qux" (!) + + DIR and FILE are freshly allocated. */ + +static void +split_path (const char *path, char **dir, char **file) +{ + char *last_slash = strrchr (path, '/'); + if (!last_slash) { - int i; - passwd = CLEANDUP (u->passwd); - if (hide) - for (i = 0; passwd[i]; i++) - passwd[i] = 'x'; + *dir = xstrdup (""); + *file = xstrdup (path); } - if (u->proto == URLFTP && *dir == '/') + else { - char *tmp = (char *)xmalloc (strlen (dir) + 3); - /*sprintf (tmp, "%%2F%s", dir + 1);*/ - tmp[0] = '%'; - tmp[1] = '2'; - tmp[2] = 'F'; - strcpy (tmp + 3, dir + 1); - xfree (dir); - dir = tmp; + *dir = strdupdelim (path, last_slash); + *file = xstrdup (last_slash + 1); } + url_unescape (*dir); + url_unescape (*file); +} + +/* Note: URL's "full path" is the path with the query string and + params appended. The "fragment" (#foo) is intentionally ignored, + but that might be changed. For example, if the original URL was + "http://host:port/foo/bar/baz;bullshit?querystring#uselessfragment", + the full path will be "/foo/bar/baz;bullshit?querystring". */ + +/* Return the length of the full path, without the terminating + zero. */ + +static int +full_path_length (const struct url *url) +{ + int len = 0; + +#define FROB(el) if (url->el) len += 1 + strlen (url->el) + + FROB (path); + FROB (params); + FROB (query); + +#undef FROB + + return len; +} + +/* Write out the full path. */ + +static void +full_path_write (const struct url *url, char *where) +{ +#define FROB(el, chr) do { \ + char *f_el = url->el; \ + if (f_el) { \ + int l = strlen (f_el); \ + *where++ = chr; \ + memcpy (where, f_el, l); \ + where += l; \ + } \ +} while (0) + + FROB (path, '/'); + FROB (params, ';'); + FROB (query, '?'); + +#undef FROB +} + +/* Public function for getting the "full path". E.g. if u->path is + "foo/bar" and u->query is "param=value", full_path will be + "/foo/bar?param=value". */ + +char * +url_full_path (const struct url *url) +{ + int length = full_path_length (url); + char *full_path = (char *) xmalloc (length + 1); - ln = strlen (proto_name); - lu = user ? strlen (user) : 0; - lp = passwd ? strlen (passwd) : 0; - lh = strlen (host); - ld = strlen (dir); - lf = strlen (file); - res = (char *)xmalloc (ln + lu + lp + lh + ld + lf + 20); /* safe sex */ - /* sprintf (res, "%s%s%s%s%s%s:%d/%s%s%s", proto_name, - (user ? user : ""), (passwd ? ":" : ""), - (passwd ? passwd : ""), (user ? "@" : ""), - host, u->port, dir, *dir ? "/" : "", file); */ - l = 0; - memcpy (res, proto_name, ln); - l += ln; - if (user) + full_path_write (url, full_path); + full_path[length] = '\0'; + + return full_path; +} + +/* Unescape CHR in an otherwise escaped STR. Used to selectively + escaping of certain characters, such as "/" and ":". Returns a + count of unescaped chars. */ + +static void +unescape_single_char (char *str, char chr) +{ + const char c1 = XNUM_TO_DIGIT (chr >> 4); + const char c2 = XNUM_TO_DIGIT (chr & 0xf); + char *h = str; /* hare */ + char *t = str; /* tortoise */ + for (; *h; h++, t++) { - memcpy (res + l, user, lu); - l += lu; - if (passwd) + if (h[0] == '%' && h[1] == c1 && h[2] == c2) { - res[l++] = ':'; - memcpy (res + l, passwd, lp); - l += lp; + *t = chr; + h += 2; } - res[l++] = '@'; + else + *t = *h; } - memcpy (res + l, host, lh); - l += lh; - if (u->port != proto_default_port) + *t = '\0'; +} + +/* Escape unsafe and reserved characters, except for the slash + characters. */ + +static char * +url_escape_dir (const char *dir) +{ + char *newdir = url_escape_1 (dir, urlchr_unsafe | urlchr_reserved, 1); + if (newdir == dir) + return (char *)dir; + + unescape_single_char (newdir, '/'); + return newdir; +} + +/* Sync u->path and u->url with u->dir and u->file. Called after + u->file or u->dir have been changed, typically by the FTP code. */ + +static void +sync_path (struct url *u) +{ + char *newpath, *efile, *edir; + + xfree (u->path); + + /* u->dir and u->file are not escaped. URL-escape them before + reassembling them into u->path. That way, if they contain + separators like '?' or even if u->file contains slashes, the + path will be correctly assembled. (u->file can contain slashes + if the URL specifies it with %2f, or if an FTP server returns + it.) */ + edir = url_escape_dir (u->dir); + efile = url_escape_1 (u->file, urlchr_unsafe | urlchr_reserved, 1); + + if (!*edir) + newpath = xstrdup (efile); + else { - res[l++] = ':'; - long_to_string (res + l, (long)u->port); - l += numdigit (u->port); + int dirlen = strlen (edir); + int filelen = strlen (efile); + + /* Copy "DIR/FILE" to newpath. */ + char *p = newpath = xmalloc (dirlen + 1 + filelen + 1); + memcpy (p, edir, dirlen); + p += dirlen; + *p++ = '/'; + memcpy (p, efile, filelen); + p += filelen; + *p++ = '\0'; } - res[l++] = '/'; - memcpy (res + l, dir, ld); - l += ld; - if (*dir) - res[l++] = '/'; - strcpy (res + l, file); - xfree (host); - xfree (dir); - xfree (file); - FREE_MAYBE (user); - FREE_MAYBE (passwd); - return res; + + u->path = newpath; + + if (edir != u->dir) + xfree (edir); + if (efile != u->file) + xfree (efile); + + /* Regenerate u->url as well. */ + xfree (u->url); + u->url = url_string (u, 0); } -/* Check whether two URL-s are equivalent, i.e. pointing to the same - location. Uses parseurl to parse them, and compares the canonical - forms. +/* Mutators. Code in ftp.c insists on changing u->dir and u->file. + This way we can sync u->path and u->url when they get changed. */ - Returns 1 if the URL1 is equivalent to URL2, 0 otherwise. Also - return 0 on error. */ -int -url_equal (const char *url1, const char *url2) +void +url_set_dir (struct url *url, const char *newdir) { - struct urlinfo *u1, *u2; - uerr_t err; - int res; - - u1 = newurl (); - err = parseurl (url1, u1, 0); - if (err != URLOK) - { - freeurl (u1, 1); - return 0; - } - u2 = newurl (); - err = parseurl (url2, u2, 0); - if (err != URLOK) - { - freeurl (u2, 1); - return 0; - } - res = !strcmp (u1->url, u2->url); - freeurl (u1, 1); - freeurl (u2, 1); - return res; + xfree (url->dir); + url->dir = xstrdup (newdir); + sync_path (url); } - -urlpos * -get_urls_file (const char *file) -{ - struct file_memory *fm; - urlpos *head, *tail; - const char *text, *text_end; - /* Load the file. */ - fm = read_file (file); - if (!fm) - { - logprintf (LOG_NOTQUIET, "%s: %s\n", file, strerror (errno)); - return NULL; - } - DEBUGP (("Loaded %s (size %ld).\n", file, fm->length)); - head = tail = NULL; - text = fm->content; - text_end = fm->content + fm->length; - while (text < text_end) - { - const char *line_beg = text; - const char *line_end = memchr (text, '\n', text_end - text); - if (!line_end) - line_end = text_end; - else - ++line_end; - text = line_end; - while (line_beg < line_end - && ISSPACE (*line_beg)) - ++line_beg; - while (line_end > line_beg + 1 - && ISSPACE (*(line_end - 1))) - --line_end; - if (line_end > line_beg) - { - urlpos *entry = (urlpos *)xmalloc (sizeof (urlpos)); - memset (entry, 0, sizeof (*entry)); - entry->next = NULL; - entry->url = strdupdelim (line_beg, line_end); - if (!head) - head = entry; - else - tail->next = entry; - tail = entry; - } - } - read_file_free (fm); - return head; -} - -/* Free the linked list of urlpos. */ void -free_urlpos (urlpos *l) +url_set_file (struct url *url, const char *newfile) { - while (l) - { - urlpos *next = l->next; - xfree (l->url); - FREE_MAYBE (l->local_name); - xfree (l); - l = next; - } + xfree (url->file); + url->file = xstrdup (newfile); + sync_path (url); } -/* Rotate FNAME opt.backups times */ void -rotate_backups(const char *fname) +url_free (struct url *url) { - int maxlen = strlen (fname) + 1 + numdigit (opt.backups) + 1; - char *from = (char *)alloca (maxlen); - char *to = (char *)alloca (maxlen); - struct stat sb; - int i; + xfree (url->host); + xfree (url->path); + xfree (url->url); - if (stat (fname, &sb) == 0) - if (S_ISREG (sb.st_mode) == 0) - return; + xfree_null (url->params); + xfree_null (url->query); + xfree_null (url->fragment); + xfree_null (url->user); + xfree_null (url->passwd); - for (i = opt.backups; i > 1; i--) - { - sprintf (from, "%s.%d", fname, i - 1); - sprintf (to, "%s.%d", fname, i); - /* #### This will fail on machines without the rename() system - call. */ - rename (from, to); - } + xfree (url->dir); + xfree (url->file); - sprintf (to, "%s.%d", fname, 1); - rename(fname, to); + xfree (url); } - + /* Create all the necessary directories for PATH (a file). Calls mkdirhier() internally. */ int @@ -880,15 +1148,18 @@ mkalldirs (const char *path) { const char *p; char *t; - struct stat st; + struct_stat st; int res; p = path + strlen (path); - for (; *p != '/' && p != path; p--); + for (; *p != '/' && p != path; p--) + ; + /* Don't create if it's just a file. */ if ((p == path) && (*p != '/')) return 0; t = strdupdelim (path, p); + /* Check whether the directory exists. */ if ((stat (t, &st) == 0)) { @@ -921,141 +1192,318 @@ mkalldirs (const char *path) xfree (t); return res; } + +/* Functions for constructing the file name out of URL components. */ + +/* A growable string structure, used by url_file_name and friends. + This should perhaps be moved to utils.c. + + The idea is to have a convenient and efficient way to construct a + string by having various functions append data to it. Instead of + passing the obligatory BASEVAR, SIZEVAR and TAILPOS to all the + functions in questions, we pass the pointer to this struct. */ + +struct growable { + char *base; + int size; + int tail; +}; + +/* Ensure that the string can accept APPEND_COUNT more characters past + the current TAIL position. If necessary, this will grow the string + and update its allocated size. If the string is already large + enough to take TAIL+APPEND_COUNT characters, this does nothing. */ +#define GROW(g, append_size) do { \ + struct growable *G_ = g; \ + DO_REALLOC (G_->base, G_->size, G_->tail + append_size, char); \ +} while (0) + +/* Return the tail position of the string. */ +#define TAIL(r) ((r)->base + (r)->tail) + +/* Move the tail position by APPEND_COUNT characters. */ +#define TAIL_INCR(r, append_count) ((r)->tail += append_count) + +/* Append the string STR to DEST. NOTICE: the string in DEST is not + terminated. */ + +static void +append_string (const char *str, struct growable *dest) +{ + int l = strlen (str); + GROW (dest, l); + memcpy (TAIL (dest), str, l); + TAIL_INCR (dest, l); +} -static int -count_slashes (const char *s) +/* Append CH to DEST. For example, append_char (0, DEST) + zero-terminates DEST. */ + +static void +append_char (char ch, struct growable *dest) { - int i = 0; - while (*s) - if (*s++ == '/') - ++i; - return i; + GROW (dest, 1); + *TAIL (dest) = ch; + TAIL_INCR (dest, 1); } -/* Return the path name of the URL-equivalent file name, with a - remote-like structure of directories. */ -static char * -mkstruct (const struct urlinfo *u) +enum { + filechr_not_unix = 1, /* unusable on Unix, / and \0 */ + filechr_not_windows = 2, /* unusable on Windows, one of \|/<>?:*" */ + filechr_control = 4 /* a control character, e.g. 0-31 */ +}; + +#define FILE_CHAR_TEST(c, mask) (filechr_table[(unsigned char)(c)] & (mask)) + +/* Shorthands for the table: */ +#define U filechr_not_unix +#define W filechr_not_windows +#define C filechr_control + +#define UW U|W +#define UWC U|W|C + +/* Table of characters unsafe under various conditions (see above). + + Arguably we could also claim `%' to be unsafe, since we use it as + the escape character. If we ever want to be able to reliably + translate file name back to URL, this would become important + crucial. Right now, it's better to be minimal in escaping. */ + +static const unsigned char filechr_table[256] = { - char *host, *dir, *file, *res, *dirpref; - int l; +UWC, C, C, C, C, C, C, C, /* NUL SOH STX ETX EOT ENQ ACK BEL */ + C, C, C, C, C, C, C, C, /* BS HT LF VT FF CR SO SI */ + C, C, C, C, C, C, C, C, /* DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ + C, C, C, C, C, C, C, C, /* CAN EM SUB ESC FS GS RS US */ + 0, 0, W, 0, 0, 0, 0, 0, /* SP ! " # $ % & ' */ + 0, 0, W, 0, 0, 0, 0, UW, /* ( ) * + , - . / */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0 1 2 3 4 5 6 7 */ + 0, 0, W, 0, W, 0, W, W, /* 8 9 : ; < = > ? */ + 0, 0, 0, 0, 0, 0, 0, 0, /* @ A B C D E F G */ + 0, 0, 0, 0, 0, 0, 0, 0, /* H I J K L M N O */ + 0, 0, 0, 0, 0, 0, 0, 0, /* P Q R S T U V W */ + 0, 0, 0, 0, W, 0, 0, 0, /* X Y Z [ \ ] ^ _ */ + 0, 0, 0, 0, 0, 0, 0, 0, /* ` a b c d e f g */ + 0, 0, 0, 0, 0, 0, 0, 0, /* h i j k l m n o */ + 0, 0, 0, 0, 0, 0, 0, 0, /* p q r s t u v w */ + 0, 0, 0, 0, 0, 0, 0, 0, /* x y z { | } ~ DEL */ + + C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, /* 128-143 */ + C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, /* 144-159 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; +#undef U +#undef W +#undef C +#undef UW +#undef UWC + +/* FN_PORT_SEP is the separator between host and port in file names + for non-standard port numbers. On Unix this is normally ':', as in + "www.xemacs.org:4001/index.html". Under Windows, we set it to + + because Windows can't handle ':' in file names. */ +#define FN_PORT_SEP (opt.restrict_files_os != restrict_windows ? ':' : '+') + +/* FN_QUERY_SEP is the separator between the file name and the URL + query, normally '?'. Since Windows cannot handle '?' as part of + file name, we use '@' instead there. */ +#define FN_QUERY_SEP (opt.restrict_files_os != restrict_windows ? '?' : '@') + +/* Quote path element, characters in [b, e), as file name, and append + the quoted string to DEST. Each character is quoted as per + file_unsafe_char and the corresponding table. + + If ESCAPED_P is non-zero, the path element is considered to be + URL-escaped and will be unescaped prior to inspection. */ + +static void +append_uri_pathel (const char *b, const char *e, int escaped_p, + struct growable *dest) +{ + const char *p; + int quoted, outlen; - assert (u->dir != NULL); - assert (u->host != NULL); + int mask; + if (opt.restrict_files_os == restrict_unix) + mask = filechr_not_unix; + else + mask = filechr_not_windows; + if (opt.restrict_files_ctrl) + mask |= filechr_control; - if (opt.cut_dirs) + /* Copy [b, e) to PATHEL and URL-unescape it. */ + if (escaped_p) { - char *ptr = u->dir + (*u->dir == '/'); - int slash_count = 1 + count_slashes (ptr); - int cut = MINVAL (opt.cut_dirs, slash_count); - for (; cut && *ptr; ptr++) - if (*ptr == '/') - --cut; - STRDUP_ALLOCA (dir, ptr); + char *unescaped; + BOUNDED_TO_ALLOCA (b, e, unescaped); + url_unescape (unescaped); + b = unescaped; + e = unescaped + strlen (unescaped); } - else - dir = u->dir + (*u->dir == '/'); - host = xstrdup (u->host); - /* Check for the true name (or at least a consistent name for saving - to directory) of HOST, reusing the hlist if possible. */ - if (opt.add_hostdir && !opt.simple_check) + /* Defang ".." when found as component of path. Remember that path + comes from the URL and might contain malicious input. */ + if (e - b == 2 && b[0] == '.' && b[1] == '.') + { + b = "%2E%2E"; + e = b + 6; + } + + /* Walk the PATHEL string and check how many characters we'll need + to quote. */ + quoted = 0; + for (p = b; p < e; p++) + if (FILE_CHAR_TEST (*p, mask)) + ++quoted; + + /* Calculate the length of the output string. e-b is the input + string length. Each quoted char introduces two additional + characters in the string, hence 2*quoted. */ + outlen = (e - b) + (2 * quoted); + GROW (dest, outlen); + + if (!quoted) { - char *nhost = realhost (host); - xfree (host); - host = nhost; + /* If there's nothing to quote, we can simply append the string + without processing it again. */ + memcpy (TAIL (dest), b, outlen); } - /* Add dir_prefix and hostname (if required) to the beginning of - dir. */ - if (opt.add_hostdir) + else { - if (!DOTP (opt.dir_prefix)) + char *q = TAIL (dest); + for (p = b; p < e; p++) { - dirpref = (char *)alloca (strlen (opt.dir_prefix) + 1 - + strlen (host) + 1); - sprintf (dirpref, "%s/%s", opt.dir_prefix, host); + if (!FILE_CHAR_TEST (*p, mask)) + *q++ = *p; + else + { + unsigned char ch = *p; + *q++ = '%'; + *q++ = XNUM_TO_DIGIT (ch >> 4); + *q++ = XNUM_TO_DIGIT (ch & 0xf); + } } - else - STRDUP_ALLOCA (dirpref, host); - } - else /* not add_hostdir */ - { - if (!DOTP (opt.dir_prefix)) - dirpref = opt.dir_prefix; - else - dirpref = ""; + assert (q - TAIL (dest) == outlen); } - xfree (host); + TAIL_INCR (dest, outlen); +} + +/* Append to DEST the directory structure that corresponds the + directory part of URL's path. For example, if the URL is + http://server/dir1/dir2/file, this appends "/dir1/dir2". + + Each path element ("dir1" and "dir2" in the above example) is + examined, url-unescaped, and re-escaped as file name element. + + Additionally, it cuts as many directories from the path as + specified by opt.cut_dirs. For example, if opt.cut_dirs is 1, it + will produce "bar" for the above example. For 2 or more, it will + produce "". - /* If there is a prefix, prepend it. */ - if (*dirpref) + Each component of the path is quoted for use as file name. */ + +static void +append_dir_structure (const struct url *u, struct growable *dest) +{ + char *pathel, *next; + int cut = opt.cut_dirs; + + /* Go through the path components, de-URL-quote them, and quote them + (if necessary) as file names. */ + + pathel = u->path; + for (; (next = strchr (pathel, '/')) != NULL; pathel = next + 1) { - char *newdir = (char *)alloca (strlen (dirpref) + 1 + strlen (dir) + 2); - sprintf (newdir, "%s%s%s", dirpref, *dir == '/' ? "" : "/", dir); - dir = newdir; + if (cut-- > 0) + continue; + if (pathel == next) + /* Ignore empty pathels. */ + continue; + + if (dest->tail) + append_char ('/', dest); + append_uri_pathel (pathel, next, 1, dest); } - dir = xstrdup (dir); - URL_CLEANSE (dir); - l = strlen (dir); - if (l && dir[l - 1] == '/') - dir[l - 1] = '\0'; - - if (!*u->file) - file = "index.html"; - else - file = u->file; - - /* Finally, construct the full name. */ - res = (char *)xmalloc (strlen (dir) + 1 + strlen (file) + 1); - sprintf (res, "%s%s%s", dir, *dir ? "/" : "", file); - xfree (dir); - return res; } -/* Create a unique filename, corresponding to a given URL. Calls - mkstruct if necessary. Does *not* actually create any directories. */ +/* Return a unique file name that matches the given URL as good as + possible. Does not create directories on the file system. */ + char * -url_filename (const struct urlinfo *u) +url_file_name (const struct url *u) { - char *file, *name; - int have_prefix = 0; /* whether we must prepend opt.dir_prefix */ + struct growable fnres; /* stands for "file name result" */ + + const char *u_file, *u_query; + char *fname, *unique; + + fnres.base = NULL; + fnres.size = 0; + fnres.tail = 0; + + /* Start with the directory prefix, if specified. */ + if (opt.dir_prefix) + append_string (opt.dir_prefix, &fnres); + /* If "dirstruct" is turned on (typically the case with -r), add + the host and port (unless those have been turned off) and + directory structure. */ if (opt.dirstruct) { - file = mkstruct (u); - have_prefix = 1; - } - else - { - if (!*u->file) - file = xstrdup ("index.html"); - else - file = xstrdup (u->file); + if (opt.protocol_directories) + { + if (fnres.tail) + append_char ('/', &fnres); + append_string (supported_schemes[u->scheme].name, &fnres); + } + if (opt.add_hostdir) + { + if (fnres.tail) + append_char ('/', &fnres); + if (0 != strcmp (u->host, "..")) + append_string (u->host, &fnres); + else + /* Host name can come from the network; malicious DNS may + allow ".." to be resolved, causing us to write to + "../". Defang such host names. */ + append_string ("%2E%2E", &fnres); + if (u->port != scheme_default_port (u->scheme)) + { + char portstr[24]; + number_to_string (portstr, u->port); + append_char (FN_PORT_SEP, &fnres); + append_string (portstr, &fnres); + } + } + + append_dir_structure (u, &fnres); } - if (!have_prefix) + /* Add the file name. */ + if (fnres.tail) + append_char ('/', &fnres); + u_file = *u->file ? u->file : "index.html"; + append_uri_pathel (u_file, u_file + strlen (u_file), 0, &fnres); + + /* Append "?query" to the file name. */ + u_query = u->query && *u->query ? u->query : NULL; + if (u_query) { - /* Check whether the prefix directory is something other than "." - before prepending it. */ - if (!DOTP (opt.dir_prefix)) - { - char *nfile = (char *)xmalloc (strlen (opt.dir_prefix) - + 1 + strlen (file) + 1); - sprintf (nfile, "%s/%s", opt.dir_prefix, file); - xfree (file); - file = nfile; - } + append_char (FN_QUERY_SEP, &fnres); + append_uri_pathel (u_query, u_query + strlen (u_query), 1, &fnres); } - /* DOS-ish file systems don't like `%' signs in them; we change it - to `@'. */ -#ifdef WINDOWS - { - char *p = file; - for (p = file; *p; p++) - if (*p == '%') - *p = '@'; - } -#endif /* WINDOWS */ + + /* Zero-terminate the file name. */ + append_char ('\0', &fnres); + + fname = fnres.base; /* Check the cases in which the unique extensions are not used: 1) Clobbering is turned off (-nc). @@ -1064,574 +1512,526 @@ url_filename (const struct urlinfo *u) 4) Hierarchy is built. The exception is the case when file does exist and is a - directory (actually support for bad httpd-s). */ + directory (see `mkalldirs' for explanation). */ + if ((opt.noclobber || opt.always_rest || opt.timestamping || opt.dirstruct) - && !(file_exists_p (file) && !file_non_directory_p (file))) - return file; + && !(file_exists_p (fname) && !file_non_directory_p (fname))) + return fname; - /* Find a unique name. */ - name = unique_name (file); - xfree (file); - return name; + unique = unique_name (fname, 1); + if (unique != fname) + xfree (fname); + return unique; } + +/* Resolve "." and ".." elements of PATH by destructively modifying + PATH and return non-zero if PATH has been modified, zero otherwise. -/* Like strlen(), but allow the URL to be ended with '?'. */ -static int -urlpath_length (const char *url) -{ - const char *q = strchr (url, '?'); - if (q) - return q - url; - return strlen (url); -} + The algorithm is in spirit similar to the one described in rfc1808, + although implemented differently, in one pass. To recap, path + elements containing only "." are removed, and ".." is taken to mean + "back up one element". Single leading and trailing slashes are + preserved. -/* Find the last occurrence of character C in the range [b, e), or - NULL, if none are present. This is almost completely equivalent to - { *e = '\0'; return strrchr(b); }, except that it doesn't change - the contents of the string. */ -static const char * -find_last_char (const char *b, const char *e, char c) -{ - for (; e > b; e--) - if (*e == c) - return e; - return NULL; -} + This function does not handle URL escapes explicitly. If you're + passing paths from URLs, make sure to unquote "%2e" and "%2E" to + ".", so that this function can find the dots. (Wget's URL parser + calls reencode_escapes, which see.) -/* Construct a URL by concatenating an absolute URL and a path, which - may or may not be absolute. This tries to behave "reasonably" in - all foreseeable cases. It employs little specific knowledge about - protocols or URL-specific stuff -- it just works on strings. */ -static char * -construct (const char *url, const char *sub, int subsize, int no_proto) + For example, "a/b/c/./../d/.." will yield "a/b/". More exhaustive + test examples are provided below. If you change anything in this + function, run test_path_simplify to make sure you haven't broken a + test case. */ + +static int +path_simplify (char *path) { - char *constr; + char *h = path; /* hare */ + char *t = path; /* tortoise */ + char *beg = path; /* boundary for backing the tortoise */ + char *end = path + strlen (path); - if (no_proto) + while (h < end) { - const char *end = url + urlpath_length (url); + /* Hare should be at the beginning of a path element. */ - if (*sub != '/') + if (h[0] == '.' && (h[1] == '/' || h[1] == '\0')) { - /* SUB is a relative URL: we need to replace everything - after last slash (possibly empty) with SUB. - - So, if URL is "whatever/foo/bar", and SUB is "qux/xyzzy", - our result should be "whatever/foo/qux/xyzzy". */ - int need_explicit_slash = 0; - int span; - const char *start_insert; - const char *last_slash = find_last_char (url, end, '/'); /* the last slash. */ - if (!last_slash) - { - /* No slash found at all. Append SUB to what we have, - but we'll need a slash as a separator. - - Example: if url == "foo" and sub == "qux/xyzzy", then - we cannot just append sub to url, because we'd get - "fooqux/xyzzy", whereas what we want is - "foo/qux/xyzzy". - - To make sure the / gets inserted, we set - need_explicit_slash to 1. We also set start_insert - to end + 1, so that the length calculations work out - correctly for one more (slash) character. Accessing - that character is fine, since it will be the - delimiter, '\0' or '?'. */ - /* example: "foo?..." */ - /* ^ ('?' gets changed to '/') */ - start_insert = end + 1; - need_explicit_slash = 1; - } - else if (last_slash && last_slash != url && *(last_slash - 1) == '/') + /* Ignore "./". */ + h += 2; + } + else if (h[0] == '.' && h[1] == '.' && (h[2] == '/' || h[2] == '\0')) + { + /* Handle "../" by retreating the tortoise by one path + element -- but not past beggining. */ + if (t > beg) { - /* example: http://host" */ - /* ^ */ - start_insert = end + 1; - need_explicit_slash = 1; + /* Move backwards until T hits the beginning of the + previous path element or the beginning of path. */ + for (--t; t > beg && t[-1] != '/'; t--) + ; } else { - /* example: "whatever/foo/bar" */ - /* ^ */ - start_insert = last_slash + 1; + /* If we're at the beginning, copy the "../" literally + move the beginning so a later ".." doesn't remove + it. */ + beg = t + 3; + goto regular; } - - span = start_insert - url; - constr = (char *)xmalloc (span + subsize + 1); - if (span) - memcpy (constr, url, span); - if (need_explicit_slash) - constr[span - 1] = '/'; - if (subsize) - memcpy (constr + span, sub, subsize); - constr[span + subsize] = '\0'; + h += 3; } - else /* *sub == `/' */ + else { - /* SUB is an absolute path: we need to replace everything - after (and including) the FIRST slash with SUB. - - So, if URL is "http://host/whatever/foo/bar", and SUB is - "/qux/xyzzy", our result should be - "http://host/qux/xyzzy". */ - int span; - const char *slash; - const char *start_insert = NULL; /* for gcc to shut up. */ - const char *pos = url; - int seen_slash_slash = 0; - /* We're looking for the first slash, but want to ignore - double slash. */ - again: - slash = memchr (pos, '/', end - pos); - if (slash && !seen_slash_slash) - if (*(slash + 1) == '/') - { - pos = slash + 2; - seen_slash_slash = 1; - goto again; - } - - /* At this point, SLASH is the location of the first / after - "//", or the first slash altogether. START_INSERT is the - pointer to the location where SUB will be inserted. When - examining the last two examples, keep in mind that SUB - begins with '/'. */ - - if (!slash && !seen_slash_slash) - /* example: "foo" */ - /* ^ */ - start_insert = url; - else if (!slash && seen_slash_slash) - /* example: "http://foo" */ - /* ^ */ - start_insert = end; - else if (slash && !seen_slash_slash) - /* example: "foo/bar" */ - /* ^ */ - start_insert = url; - else if (slash && seen_slash_slash) - /* example: "http://something/" */ - /* ^ */ - start_insert = slash; - - span = start_insert - url; - constr = (char *)xmalloc (span + subsize + 1); - if (span) - memcpy (constr, url, span); - if (subsize) - memcpy (constr + span, sub, subsize); - constr[span + subsize] = '\0'; + regular: + /* A regular path element. If H hasn't advanced past T, + simply skip to the next path element. Otherwise, copy + the path element until the next slash. */ + if (t == h) + { + /* Skip the path element, including the slash. */ + while (h < end && *h != '/') + t++, h++; + if (h < end) + t++, h++; + } + else + { + /* Copy the path element, including the final slash. */ + while (h < end && *h != '/') + *t++ = *h++; + if (h < end) + *t++ = *h++; + } } } - else /* !no_proto */ - { - constr = strdupdelim (sub, sub + subsize); - } - return constr; -} -/* Like the function above, but with a saner caller interface. */ -char * -url_concat (const char *base_url, const char *new_url) -{ - return construct (base_url, new_url, strlen (new_url), !has_proto (new_url)); + if (t != h) + *t = '\0'; + + return t != h; } -/* Optimize URL by host, destructively replacing u->host with realhost - (u->host). Do this regardless of opt.simple_check. */ -void -opt_url (struct urlinfo *u) +/* Return the length of URL's path. Path is considered to be + terminated by one of '?', ';', '#', or by the end of the + string. */ + +static int +path_length (const char *url) { - /* Find the "true" host. */ - char *host = realhost (u->host); - xfree (u->host); - u->host = host; - assert (u->dir != NULL); /* the URL must have been parsed */ - /* Refresh the printed representation. */ - xfree (u->url); - u->url = str_url (u, 0); + const char *q = strpbrk_or_eos (url, "?;#"); + return q - url; } -/* This beautiful kludge is fortunately not needed, as I've made - parse_dir do the (almost) right thing, so that a query can never - become a part of directory. */ -#if 0 -/* Call path_simplify, but make sure that the part after the - question-mark, if any, is not destroyed by path_simplify's - "optimizations". */ -void -path_simplify_with_kludge (char *path) +/* Find the last occurrence of character C in the range [b, e), or + NULL, if none are present. We might want to use memrchr (a GNU + extension) under GNU libc. */ + +static const char * +find_last_char (const char *b, const char *e, char c) { - char *query = strchr (path, '?'); - if (query) - /* path_simplify also works destructively, so we also have the - license to write. */ - *query = '\0'; - path_simplify (path); - if (query) - { - char *newend = path + strlen (path); - *query = '?'; - if (newend != query) - memmove (newend, query, strlen (query) + 1); - } + for (; e > b; e--) + if (*e == c) + return e; + return NULL; } -#endif - -/* Returns proxy host address, in accordance with PROTO. */ + +/* Merge BASE with LINK and return the resulting URI. + + Either of the URIs may be absolute or relative, complete with the + host name, or path only. This tries to reasonably handle all + foreseeable cases. It only employs minimal URL parsing, without + knowledge of the specifics of schemes. + + I briefly considered making this function call path_simplify after + the merging process, as rfc1738 seems to suggest. This is a bad + idea for several reasons: 1) it complexifies the code, and 2) + url_parse has to simplify path anyway, so it's wasteful to boot. */ + char * -getproxy (uerr_t proto) +uri_merge (const char *base, const char *link) { - if (proto == URLHTTP) - return opt.http_proxy ? opt.http_proxy : getenv ("http_proxy"); - else if (proto == URLFTP) - return opt.ftp_proxy ? opt.ftp_proxy : getenv ("ftp_proxy"); - else - return NULL; -} + int linklength; + const char *end; + char *merge; -/* Should a host be accessed through proxy, concerning no_proxy? */ -int -no_proxy_match (const char *host, const char **no_proxy) -{ - if (!no_proxy) - return 1; - else - return !sufmatch (no_proxy, host); -} - -static void write_backup_file PARAMS ((const char *, downloaded_file_t)); + if (url_has_scheme (link)) + return xstrdup (link); -/* Change the links in an HTML document. Accepts a structure that - defines the positions of all the links. */ -void -convert_links (const char *file, urlpos *l) -{ - struct file_memory *fm; - FILE *fp; - char *p; - downloaded_file_t downloaded_file_return; - - logprintf (LOG_VERBOSE, _("Converting %s... "), file); - - { - /* First we do a "dry run": go through the list L and see whether - any URL needs to be converted in the first place. If not, just - leave the file alone. */ - int count = 0; - urlpos *dry = l; - for (dry = l; dry; dry = dry->next) - if (dry->convert != CO_NOCONVERT) - ++count; - if (!count) - { - logputs (LOG_VERBOSE, _("nothing to do.\n")); - return; - } - } + /* We may not examine BASE past END. */ + end = base + path_length (base); + linklength = strlen (link); - fm = read_file (file); - if (!fm) + if (!*link) { - logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"), - file, strerror (errno)); - return; + /* Empty LINK points back to BASE, query string and all. */ + return xstrdup (base); } - - downloaded_file_return = downloaded_file (CHECK_FOR_FILE, file); - if (opt.backup_converted && downloaded_file_return) - write_backup_file (file, downloaded_file_return); - - /* Before opening the file for writing, unlink the file. This is - important if the data in FM is mmaped. In such case, nulling the - file, which is what fopen() below does, would make us read all - zeroes from the mmaped region. */ - if (unlink (file) < 0 && errno != ENOENT) + else if (*link == '?') { - logprintf (LOG_NOTQUIET, _("Unable to delete `%s': %s\n"), - file, strerror (errno)); - read_file_free (fm); - return; + /* LINK points to the same location, but changes the query + string. Examples: */ + /* uri_merge("path", "?new") -> "path?new" */ + /* uri_merge("path?foo", "?new") -> "path?new" */ + /* uri_merge("path?foo#bar", "?new") -> "path?new" */ + /* uri_merge("path#foo", "?new") -> "path?new" */ + int baselength = end - base; + merge = xmalloc (baselength + linklength + 1); + memcpy (merge, base, baselength); + memcpy (merge + baselength, link, linklength); + merge[baselength + linklength] = '\0'; } - /* Now open the file for writing. */ - fp = fopen (file, "wb"); - if (!fp) + else if (*link == '#') { - logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"), - file, strerror (errno)); - read_file_free (fm); - return; + /* uri_merge("path", "#new") -> "path#new" */ + /* uri_merge("path#foo", "#new") -> "path#new" */ + /* uri_merge("path?foo", "#new") -> "path?foo#new" */ + /* uri_merge("path?foo#bar", "#new") -> "path?foo#new" */ + int baselength; + const char *end1 = strchr (base, '#'); + if (!end1) + end1 = base + strlen (base); + baselength = end1 - base; + merge = xmalloc (baselength + linklength + 1); + memcpy (merge, base, baselength); + memcpy (merge + baselength, link, linklength); + merge[baselength + linklength] = '\0'; } - /* Here we loop through all the URLs in file, replacing those of - them that are downloaded with relative references. */ - p = fm->content; - for (; l; l = l->next) + else if (*link == '/' && *(link + 1) == '/') { - char *url_start = fm->content + l->pos; - if (l->pos >= fm->length) - { - DEBUGP (("Something strange is going on. Please investigate.")); - break; - } - /* If the URL is not to be converted, skip it. */ - if (l->convert == CO_NOCONVERT) + /* LINK begins with "//" and so is a net path: we need to + replace everything after (and including) the double slash + with LINK. */ + + /* uri_merge("foo", "//new/bar") -> "//new/bar" */ + /* uri_merge("//old/foo", "//new/bar") -> "//new/bar" */ + /* uri_merge("http://old/foo", "//new/bar") -> "http://new/bar" */ + + int span; + const char *slash; + const char *start_insert; + + /* Look for first slash. */ + slash = memchr (base, '/', end - base); + /* If found slash and it is a double slash, then replace + from this point, else default to replacing from the + beginning. */ + if (slash && *(slash + 1) == '/') + start_insert = slash; + else + start_insert = base; + + span = start_insert - base; + merge = (char *)xmalloc (span + linklength + 1); + if (span) + memcpy (merge, base, span); + memcpy (merge + span, link, linklength); + merge[span + linklength] = '\0'; + } + else if (*link == '/') + { + /* LINK is an absolute path: we need to replace everything + after (and including) the FIRST slash with LINK. + + So, if BASE is "http://host/whatever/foo/bar", and LINK is + "/qux/xyzzy", our result should be + "http://host/qux/xyzzy". */ + int span; + const char *slash; + const char *start_insert = NULL; /* for gcc to shut up. */ + const char *pos = base; + int seen_slash_slash = 0; + /* We're looking for the first slash, but want to ignore + double slash. */ + again: + slash = memchr (pos, '/', end - pos); + if (slash && !seen_slash_slash) + if (*(slash + 1) == '/') + { + pos = slash + 2; + seen_slash_slash = 1; + goto again; + } + + /* At this point, SLASH is the location of the first / after + "//", or the first slash altogether. START_INSERT is the + pointer to the location where LINK will be inserted. When + examining the last two examples, keep in mind that LINK + begins with '/'. */ + + if (!slash && !seen_slash_slash) + /* example: "foo" */ + /* ^ */ + start_insert = base; + else if (!slash && seen_slash_slash) + /* example: "http://foo" */ + /* ^ */ + start_insert = end; + else if (slash && !seen_slash_slash) + /* example: "foo/bar" */ + /* ^ */ + start_insert = base; + else if (slash && seen_slash_slash) + /* example: "http://something/" */ + /* ^ */ + start_insert = slash; + + span = start_insert - base; + merge = (char *)xmalloc (span + linklength + 1); + if (span) + memcpy (merge, base, span); + memcpy (merge + span, link, linklength); + merge[span + linklength] = '\0'; + } + else + { + /* LINK is a relative URL: we need to replace everything + after last slash (possibly empty) with LINK. + + So, if BASE is "whatever/foo/bar", and LINK is "qux/xyzzy", + our result should be "whatever/foo/qux/xyzzy". */ + int need_explicit_slash = 0; + int span; + const char *start_insert; + const char *last_slash = find_last_char (base, end, '/'); + if (!last_slash) { - DEBUGP (("Skipping %s at position %d.\n", l->url, l->pos)); - continue; + /* No slash found at all. Replace what we have with LINK. */ + start_insert = base; } - - /* Echo the file contents, up to the offending URL's opening - quote, to the outfile. */ - fwrite (p, 1, url_start - p, fp); - p = url_start; - if (l->convert == CO_CONVERT_TO_RELATIVE) + else if (last_slash && last_slash >= base + 2 + && last_slash[-2] == ':' && last_slash[-1] == '/') { - /* Convert absolute URL to relative. */ - char *newname = construct_relative (file, l->local_name); - char *quoted_newname = html_quote_string (newname); - putc (*p, fp); /* quoting char */ - fputs (quoted_newname, fp); - p += l->size - 1; - putc (*p, fp); /* close quote */ - ++p; - xfree (newname); - xfree (quoted_newname); - DEBUGP (("TO_RELATIVE: %s to %s at position %d in %s.\n", - l->url, newname, l->pos, file)); + /* example: http://host" */ + /* ^ */ + start_insert = end + 1; + need_explicit_slash = 1; } - else if (l->convert == CO_CONVERT_TO_COMPLETE) + else { - /* Convert the link to absolute URL. */ - char *newlink = l->url; - char *quoted_newlink = html_quote_string (newlink); - putc (*p, fp); /* quoting char */ - fputs (quoted_newlink, fp); - p += l->size - 1; - putc (*p, fp); /* close quote */ - ++p; - xfree (quoted_newlink); - DEBUGP (("TO_COMPLETE: to %s at position %d in %s.\n", - newlink, l->pos, file)); + /* example: "whatever/foo/bar" */ + /* ^ */ + start_insert = last_slash + 1; } + + span = start_insert - base; + merge = (char *)xmalloc (span + linklength + 1); + if (span) + memcpy (merge, base, span); + if (need_explicit_slash) + merge[span - 1] = '/'; + memcpy (merge + span, link, linklength); + merge[span + linklength] = '\0'; } - /* Output the rest of the file. */ - if (p - fm->content < fm->length) - fwrite (p, 1, fm->length - (p - fm->content), fp); - fclose (fp); - read_file_free (fm); - logputs (LOG_VERBOSE, _("done.\n")); + + return merge; } + +#define APPEND(p, s) do { \ + int len = strlen (s); \ + memcpy (p, s, len); \ + p += len; \ +} while (0) -/* Construct and return a malloced copy of the relative link from two - pieces of information: local name S1 of the referring file and - local name S2 of the referred file. +/* Use this instead of password when the actual password is supposed + to be hidden. We intentionally use a generic string without giving + away the number of characters in the password, like previous + versions did. */ +#define HIDDEN_PASSWORD "*password*" - So, if S1 is "jagor.srce.hr/index.html" and S2 is - "jagor.srce.hr/images/news.gif", the function will return - "images/news.gif". +/* Recreate the URL string from the data in URL. - Alternately, if S1 is "fly.cc.fer.hr/ioccc/index.html", and S2 is - "fly.cc.fer.hr/images/fly.gif", the function will return - "../images/fly.gif". + If HIDE is non-zero (as it is when we're calling this on a URL we + plan to print, but not when calling it to canonicalize a URL for + use within the program), password will be hidden. Unsafe + characters in the URL will be quoted. */ - Caveats: S1 should not begin with `/', unless S2 also begins with - '/'. S1 should not contain things like ".." and such -- - construct_relative ("fly/ioccc/../index.html", - "fly/images/fly.gif") will fail. (A workaround is to call - something like path_simplify() on S1). */ -static char * -construct_relative (const char *s1, const char *s2) +char * +url_string (const struct url *url, int hide_password) { - int i, cnt, sepdirs1; - char *res; - - if (*s2 == '/') - return xstrdup (s2); - /* S1 should *not* be absolute, if S2 wasn't. */ - assert (*s1 != '/'); - i = cnt = 0; - /* Skip the directories common to both strings. */ - while (1) + int size; + char *result, *p; + char *quoted_host, *quoted_user = NULL, *quoted_passwd = NULL; + + int scheme_port = supported_schemes[url->scheme].default_port; + const char *scheme_str = supported_schemes[url->scheme].leading_string; + int fplen = full_path_length (url); + + int brackets_around_host; + + assert (scheme_str != NULL); + + /* Make sure the user name and password are quoted. */ + if (url->user) { - while (s1[i] && s2[i] - && (s1[i] == s2[i]) - && (s1[i] != '/') - && (s2[i] != '/')) - ++i; - if (s1[i] == '/' && s2[i] == '/') - cnt = ++i; - else - break; + quoted_user = url_escape_allow_passthrough (url->user); + if (url->passwd) + { + if (hide_password) + quoted_passwd = HIDDEN_PASSWORD; + else + quoted_passwd = url_escape_allow_passthrough (url->passwd); + } } - for (sepdirs1 = 0; s1[i]; i++) - if (s1[i] == '/') - ++sepdirs1; - /* Now, construct the file as of: - - ../ repeated sepdirs1 time - - all the non-mutual directories of S2. */ - res = (char *)xmalloc (3 * sepdirs1 + strlen (s2 + cnt) + 1); - for (i = 0; i < sepdirs1; i++) - memcpy (res + 3 * i, "../", 3); - strcpy (res + 3 * i, s2 + cnt); - return res; -} - -/* Add URL to the head of the list L. */ -urlpos * -add_url (urlpos *l, const char *url, const char *file) -{ - urlpos *t; - - t = (urlpos *)xmalloc (sizeof (urlpos)); - memset (t, 0, sizeof (*t)); - t->url = xstrdup (url); - t->local_name = xstrdup (file); - t->next = l; - return t; -} -static void -write_backup_file (const char *file, downloaded_file_t downloaded_file_return) -{ - /* Rather than just writing over the original .html file with the - converted version, save the former to *.orig. Note we only do - this for files we've _successfully_ downloaded, so we don't - clobber .orig files sitting around from previous invocations. */ - - /* Construct the backup filename as the original name plus ".orig". */ - size_t filename_len = strlen(file); - char* filename_plus_orig_suffix; - boolean already_wrote_backup_file = FALSE; - slist* converted_file_ptr; - static slist* converted_files = NULL; - - if (downloaded_file_return == FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED) + /* In the unlikely event that the host name contains non-printable + characters, quote it for displaying to the user. */ + quoted_host = url_escape_allow_passthrough (url->host); + + /* Undo the quoting of colons that URL escaping performs. IPv6 + addresses may legally contain colons, and in that case must be + placed in square brackets. */ + if (quoted_host != url->host) + unescape_single_char (quoted_host, ':'); + brackets_around_host = strchr (quoted_host, ':') != NULL; + + size = (strlen (scheme_str) + + strlen (quoted_host) + + (brackets_around_host ? 2 : 0) + + fplen + + 1); + if (url->port != scheme_port) + size += 1 + numdigit (url->port); + if (quoted_user) { - /* Just write "orig" over "html". We need to do it this way - because when we're checking to see if we've downloaded the - file before (to see if we can skip downloading it), we don't - know if it's a text/html file. Therefore we don't know yet - at that stage that -E is going to cause us to tack on - ".html", so we need to compare vs. the original URL plus - ".orig", not the original URL plus ".html.orig". */ - filename_plus_orig_suffix = alloca (filename_len + 1); - strcpy(filename_plus_orig_suffix, file); - strcpy((filename_plus_orig_suffix + filename_len) - 4, "orig"); + size += 1 + strlen (quoted_user); + if (quoted_passwd) + size += 1 + strlen (quoted_passwd); } - else /* downloaded_file_return == FILE_DOWNLOADED_NORMALLY */ + + p = result = xmalloc (size); + + APPEND (p, scheme_str); + if (quoted_user) { - /* Append ".orig" to the name. */ - filename_plus_orig_suffix = alloca (filename_len + sizeof(".orig")); - strcpy(filename_plus_orig_suffix, file); - strcpy(filename_plus_orig_suffix + filename_len, ".orig"); + APPEND (p, quoted_user); + if (quoted_passwd) + { + *p++ = ':'; + APPEND (p, quoted_passwd); + } + *p++ = '@'; } - /* We can get called twice on the same URL thanks to the - convert_all_links() call in main(). If we write the .orig file - each time in such a case, it'll end up containing the first-pass - conversion, not the original file. So, see if we've already been - called on this file. */ - converted_file_ptr = converted_files; - while (converted_file_ptr != NULL) - if (strcmp(converted_file_ptr->string, file) == 0) - { - already_wrote_backup_file = TRUE; - break; - } - else - converted_file_ptr = converted_file_ptr->next; - - if (!already_wrote_backup_file) + if (brackets_around_host) + *p++ = '['; + APPEND (p, quoted_host); + if (brackets_around_host) + *p++ = ']'; + if (url->port != scheme_port) { - /* Rename to .orig before former gets written over. */ - if (rename(file, filename_plus_orig_suffix) != 0) - logprintf (LOG_NOTQUIET, _("Cannot back up %s as %s: %s\n"), - file, filename_plus_orig_suffix, strerror (errno)); - - /* Remember that we've already written a .orig backup for this file. - Note that we never free this memory since we need it till the - convert_all_links() call, which is one of the last things the - program does before terminating. BTW, I'm not sure if it would be - safe to just set 'converted_file_ptr->string' to 'file' below, - rather than making a copy of the string... Another note is that I - thought I could just add a field to the urlpos structure saying - that we'd written a .orig file for this URL, but that didn't work, - so I had to make this separate list. - - This [adding a field to the urlpos structure] didn't work - because convert_file() is called twice: once after all its - sublinks have been retrieved in recursive_retrieve(), and - once at the end of the day in convert_all_links(). The - original linked list collected in recursive_retrieve() is - lost after the first invocation of convert_links(), and - convert_all_links() makes a new one (it calls get_urls_html() - for each file it covers.) That's why your approach didn't - work. The way to make it work is perhaps to make this flag a - field in the `urls_html' list. */ - - converted_file_ptr = xmalloc(sizeof(*converted_file_ptr)); - converted_file_ptr->string = xstrdup(file); /* die on out-of-mem. */ - converted_file_ptr->next = converted_files; - converted_files = converted_file_ptr; + *p++ = ':'; + p = number_to_string (p, url->port); } -} -/* Remembers which files have been downloaded. In the standard case, should be - called with mode == FILE_DOWNLOADED_NORMALLY for each file we actually - download successfully (i.e. not for ones we have failures on or that we skip - due to -N). + full_path_write (url, p); + p += fplen; + *p++ = '\0'; + + assert (p - result == size); + + if (quoted_user && quoted_user != url->user) + xfree (quoted_user); + if (quoted_passwd && !hide_password && quoted_passwd != url->passwd) + xfree (quoted_passwd); + if (quoted_host != url->host) + xfree (quoted_host); + + return result; +} + +/* Return non-zero if scheme a is similar to scheme b. + + Schemes are similar if they are equal. If SSL is supported, schemes + are also similar if one is http (SCHEME_HTTP) and the other is https + (SCHEME_HTTPS). */ +int +schemes_are_similar_p (enum url_scheme a, enum url_scheme b) +{ + if (a == b) + return 1; +#ifdef HAVE_SSL + if ((a == SCHEME_HTTP && b == SCHEME_HTTPS) + || (a == SCHEME_HTTPS && b == SCHEME_HTTP)) + return 1; +#endif + return 0; +} + +#if 0 +/* Debugging and testing support for path_simplify. */ - When we've downloaded a file and tacked on a ".html" extension due to -E, - call this function with FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED rather than - FILE_DOWNLOADED_NORMALLY. +/* Debug: run path_simplify on PATH and return the result in a new + string. Useful for calling from the debugger. */ +static char * +ps (char *path) +{ + char *copy = xstrdup (path); + path_simplify (copy); + return copy; +} - If you just want to check if a file has been previously added without adding - it, call with mode == CHECK_FOR_FILE. Please be sure to call this function - with local filenames, not remote URLs. */ -downloaded_file_t -downloaded_file (downloaded_file_t mode, const char* file) +static void +run_test (char *test, char *expected_result, int expected_change) { - typedef struct _downloaded_file_list - { - char* file; - downloaded_file_t download_type; - struct _downloaded_file_list* next; - } downloaded_file_list; - - boolean found_file = FALSE; - static downloaded_file_list* downloaded_files = NULL; - downloaded_file_list* rover = downloaded_files; - - while (rover != NULL) - if (strcmp(rover->file, file) == 0) - { - found_file = TRUE; - break; - } - else - rover = rover->next; + char *test_copy = xstrdup (test); + int modified = path_simplify (test_copy); - if (found_file) - return rover->download_type; /* file had already been downloaded */ - else + if (0 != strcmp (test_copy, expected_result)) { - if (mode != CHECK_FOR_FILE) - { - rover = xmalloc(sizeof(*rover)); - rover->file = xstrdup(file); /* use xstrdup() so die on out-of-mem. */ - rover->download_type = mode; - rover->next = downloaded_files; - downloaded_files = rover; - } - - return FILE_NOT_ALREADY_DOWNLOADED; + printf ("Failed path_simplify(\"%s\"): expected \"%s\", got \"%s\".\n", + test, expected_result, test_copy); } + if (modified != expected_change) + { + if (expected_change == 1) + printf ("Expected modification with path_simplify(\"%s\").\n", + test); + else + printf ("Expected no modification with path_simplify(\"%s\").\n", + test); + } + xfree (test_copy); } - -/* Initialization of static stuff. */ -void -url_init (void) + +static void +test_path_simplify (void) { - init_unsafe_char_table (); + static struct { + char *test, *result; + int should_modify; + } tests[] = { + { "", "", 0 }, + { ".", "", 1 }, + { "./", "", 1 }, + { "..", "..", 0 }, + { "../", "../", 0 }, + { "foo", "foo", 0 }, + { "foo/bar", "foo/bar", 0 }, + { "foo///bar", "foo///bar", 0 }, + { "foo/.", "foo/", 1 }, + { "foo/./", "foo/", 1 }, + { "foo./", "foo./", 0 }, + { "foo/../bar", "bar", 1 }, + { "foo/../bar/", "bar/", 1 }, + { "foo/bar/..", "foo/", 1 }, + { "foo/bar/../x", "foo/x", 1 }, + { "foo/bar/../x/", "foo/x/", 1 }, + { "foo/..", "", 1 }, + { "foo/../..", "..", 1 }, + { "foo/../../..", "../..", 1 }, + { "foo/../../bar/../../baz", "../../baz", 1 }, + { "a/b/../../c", "c", 1 }, + { "./a/../b", "b", 1 } + }; + int i; + + for (i = 0; i < countof (tests); i++) + { + char *test = tests[i].test; + char *expected_result = tests[i].result; + int expected_change = tests[i].should_modify; + run_test (test, expected_result, expected_change); + } } +#endif