/* URL handling.
- Copyright (C) 1995, 1996, 1997, 2000 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1996, 1997, 2000, 2001, 2003, 2003
+ Free Software Foundation, Inc.
-This file is part of Wget.
+This file is part of GNU Wget.
-This program is free software; you can redistribute it and/or modify
+GNU Wget is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
-This program is distributed in the hope that it will be useful,
+GNU Wget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+along with Wget; if not, write to the Free Software
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+In addition, as a special exception, the Free Software Foundation
+gives permission to link the code of its release of Wget with the
+OpenSSL project's "OpenSSL" library (or with modified versions of it
+that use the same license as the "OpenSSL" library), and distribute
+the linked executables. You must obey the GNU General Public License
+in all respects for all of the code used other than "OpenSSL". If you
+modify this file, you may extend this exception to your version of the
+file, but you are not obligated to do so. If you do not wish to do
+so, delete this exception statement from your version. */
#include <config.h>
#else
# include <strings.h>
#endif
-#include <ctype.h>
#include <sys/types.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#include "wget.h"
#include "utils.h"
#include "url.h"
-#include "host.h"
-#include "html.h"
#ifndef errno
extern int errno;
#endif
-/* Default port definitions */
-#define DEFAULT_HTTP_PORT 80
-#define DEFAULT_FTP_PORT 21
-
-/* URL separator (for findurl) */
-#define URL_SEPARATOR "!\"#'(),>`{}|<>"
-
-/* A list of unsafe characters for encoding, as per RFC1738. '@' and
- ':' (not listed in RFC) were added because of user/password
- encoding. */
-
-#ifndef WINDOWS
-# define URL_UNSAFE_CHARS "<>\"#%{}|\\^~[]`@:"
-#else /* WINDOWS */
-# define URL_UNSAFE_CHARS "<>\"%{}|\\^[]`"
-#endif /* WINDOWS */
-
-#define UNSAFE_CHAR(c) ( ((unsigned char)(c) <= ' ') /* ASCII 32 */ \
- || ((unsigned char)(c) > '~') /* ASCII 127 */ \
- || strchr (URL_UNSAFE_CHARS, c))
-
-/* If S contains unsafe characters, free it and replace it with a
- version that doesn't. */
-#define URL_CLEANSE(s) do \
-{ \
- if (contains_unsafe (s)) \
- { \
- char *uc_tmp = encode_string (s); \
- free (s); \
- (s) = uc_tmp; \
- } \
-} while (0)
-
-/* Is a directory "."? */
-#define DOTP(x) ((*(x) == '.') && (!*(x + 1)))
-/* Is a directory ".."? */
-#define DDOTP(x) ((*(x) == '.') && (*(x + 1) == '.') && (!*(x + 2)))
+struct scheme_data
+{
+ char *leading_string;
+ int default_port;
+ int enabled;
+};
-#if 0
-static void path_simplify_with_kludge PARAMS ((char *));
+/* Supported schemes: */
+static struct scheme_data supported_schemes[] =
+{
+ { "http://", DEFAULT_HTTP_PORT, 1 },
+#ifdef HAVE_SSL
+ { "https://", DEFAULT_HTTPS_PORT, 1 },
#endif
-static int urlpath_length PARAMS ((const char *));
+ { "ftp://", DEFAULT_FTP_PORT, 1 },
-/* NULL-terminated list of strings to be recognized as prototypes (URL
- schemes). Note that recognized doesn't mean supported -- only HTTP
- and FTP are currently supported.
+ /* SCHEME_INVALID */
+ { NULL, -1, 0 }
+};
- However, a string that does not match anything in the list will be
- considered a relative URL. Thus it's important that this list has
- anything anyone could think of being legal.
+/* Forward declarations: */
- There are wild things here. :-) Take a look at
- <URL:http://www.w3.org/pub/WWW/Addressing/schemes.html> for more
- fun. */
-static char *protostrings[] =
-{
- "cid:",
- "clsid:",
- "file:",
- "finger:",
- "ftp:",
- "gopher:",
- "hdl:",
- "http:",
- "https:",
- "ilu:",
- "ior:",
- "irc:",
- "java:",
- "javascript:",
- "lifn:",
- "mailto:",
- "mid:",
- "news:",
- "nntp:",
- "path:",
- "prospero:",
- "rlogin:",
- "service:",
- "shttp:",
- "snews:",
- "stanf:",
- "telnet:",
- "tn3270:",
- "wais:",
- "whois++:",
- NULL
-};
+static int path_simplify PARAMS ((char *));
+\f
+/* Support for encoding and decoding of URL strings. We determine
+ whether a character is unsafe through static table lookup. This
+ code assumes ASCII character set and 8-bit chars. */
-struct proto
-{
- char *name;
- uerr_t ind;
- unsigned short port;
+enum {
+ /* rfc1738 reserved chars, preserved from encoding. */
+ urlchr_reserved = 1,
+
+ /* rfc1738 unsafe chars, plus some more. */
+ urlchr_unsafe = 2
};
-/* Similar to former, but for supported protocols: */
-static struct proto sup_protos[] =
+#define urlchr_test(c, mask) (urlchr_table[(unsigned char)(c)] & (mask))
+#define URL_RESERVED_CHAR(c) urlchr_test(c, urlchr_reserved)
+#define URL_UNSAFE_CHAR(c) urlchr_test(c, urlchr_unsafe)
+
+/* Shorthands for the table: */
+#define R urlchr_reserved
+#define U urlchr_unsafe
+#define RU R|U
+
+const static unsigned char urlchr_table[256] =
{
- { "http://", URLHTTP, DEFAULT_HTTP_PORT },
- { "ftp://", URLFTP, DEFAULT_FTP_PORT },
- /*{ "file://", URLFILE, DEFAULT_FTP_PORT },*/
+ U, U, U, U, U, U, U, U, /* NUL SOH STX ETX EOT ENQ ACK BEL */
+ U, U, U, U, U, U, U, U, /* BS HT LF VT FF CR SO SI */
+ U, U, U, U, U, U, U, U, /* DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
+ U, U, U, U, U, U, U, U, /* CAN EM SUB ESC FS GS RS US */
+ U, 0, U, RU, 0, U, R, 0, /* SP ! " # $ % & ' */
+ 0, 0, 0, R, 0, 0, 0, R, /* ( ) * + , - . / */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 1 2 3 4 5 6 7 */
+ 0, 0, RU, R, U, R, U, R, /* 8 9 : ; < = > ? */
+ RU, 0, 0, 0, 0, 0, 0, 0, /* @ A B C D E F G */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* H I J K L M N O */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* P Q R S T U V W */
+ 0, 0, 0, RU, U, RU, U, 0, /* X Y Z [ \ ] ^ _ */
+ U, 0, 0, 0, 0, 0, 0, 0, /* ` a b c d e f g */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* h i j k l m n o */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* p q r s t u v w */
+ 0, 0, 0, U, U, U, U, U, /* x y z { | } ~ DEL */
+
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
+ U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
};
+#undef R
+#undef U
+#undef RU
-static void parse_dir PARAMS ((const char *, char **, char **));
-static uerr_t parse_uname PARAMS ((const char *, char **, char **));
-static char *construct PARAMS ((const char *, const char *, int , int));
-static char *construct_relative PARAMS ((const char *, const char *));
-static char process_ftp_type PARAMS ((char *));
+/* URL-unescape the string S.
-\f
-/* Returns the number of characters to be skipped if the first thing
- in a URL is URL: (which is 0 or 4+). The optional spaces after
- URL: are also skipped. */
-int
-skip_url (const char *url)
+ This is done by transforming the sequences "%HH" to the character
+ represented by the hexadecimal digits HH. If % is not followed by
+ two hexadecimal digits, it is inserted literally.
+
+ The transformation is done in place. If you need the original
+ string intact, make a copy before calling this function. */
+
+static void
+url_unescape (char *s)
{
- int i;
+ char *t = s; /* t - tortoise */
+ char *h = s; /* h - hare */
- if (TOUPPER (url[0]) == 'U'
- && TOUPPER (url[1]) == 'R'
- && TOUPPER (url[2]) == 'L'
- && url[3] == ':')
+ for (; *h; h++, t++)
{
- /* Skip blanks. */
- for (i = 4; url[i] && ISSPACE (url[i]); i++);
- return i;
+ if (*h != '%')
+ {
+ copychar:
+ *t = *h;
+ }
+ else
+ {
+ /* Do nothing if '%' is not followed by two hex digits. */
+ if (!h[1] || !h[2] || !(ISXDIGIT (h[1]) && ISXDIGIT (h[2])))
+ goto copychar;
+ *t = X2DIGITS_TO_NUM (h[1], h[2]);
+ h += 2;
+ }
}
- else
- return 0;
+ *t = '\0';
}
-/* Returns 1 if the string contains unsafe characters, 0 otherwise. */
-int
-contains_unsafe (const char *s)
-{
- for (; *s; s++)
- if (UNSAFE_CHAR (*s))
- return 1;
- return 0;
-}
+/* The core of url_escape_* functions. Escapes the characters that
+ match the provided mask in urlchr_table.
-/* Decodes the forms %xy in a URL to the character the hexadecimal
- code of which is xy. xy are hexadecimal digits from
- [0123456789ABCDEF] (case-insensitive). If x or y are not
- hex-digits or `%' precedes `\0', the sequence is inserted
- literally. */
+ If ALLOW_PASSTHROUGH is non-zero, a string with no unsafe chars
+ will be returned unchanged. If ALLOW_PASSTHROUGH is zero, a
+ freshly allocated string will be returned in all cases. */
-static void
-decode_string (char *s)
+static char *
+url_escape_1 (const char *s, unsigned char mask, int allow_passthrough)
{
- char *p = s;
+ const char *p1;
+ char *p2, *newstr;
+ int newlen;
+ int addition = 0;
- for (; *s; s++, p++)
+ for (p1 = s; *p1; p1++)
+ if (urlchr_test (*p1, mask))
+ addition += 2; /* Two more characters (hex digits) */
+
+ if (!addition)
+ return allow_passthrough ? (char *)s : xstrdup (s);
+
+ newlen = (p1 - s) + addition;
+ newstr = (char *)xmalloc (newlen + 1);
+
+ p1 = s;
+ p2 = newstr;
+ while (*p1)
{
- if (*s != '%')
- *p = *s;
- else
+ /* Quote the characters that match the test mask. */
+ if (urlchr_test (*p1, mask))
{
- /* Do nothing if at the end of the string, or if the chars
- are not hex-digits. */
- if (!*(s + 1) || !*(s + 2)
- || !(ISXDIGIT (*(s + 1)) && ISXDIGIT (*(s + 2))))
- {
- *p = *s;
- continue;
- }
- *p = (ASC2HEXD (*(s + 1)) << 4) + ASC2HEXD (*(s + 2));
- s += 2;
+ unsigned char c = *p1++;
+ *p2++ = '%';
+ *p2++ = XNUM_TO_DIGIT (c >> 4);
+ *p2++ = XNUM_TO_DIGIT (c & 0xf);
}
+ else
+ *p2++ = *p1++;
}
- *p = '\0';
+ assert (p2 - newstr == newlen);
+ *p2 = '\0';
+
+ return newstr;
}
-/* Encode the unsafe characters (as determined by URL_UNSAFE) in a
- given string, returning a malloc-ed %XX encoded string. */
+/* URL-escape the unsafe characters (see urlchr_table) in a given
+ string, returning a freshly allocated string. */
+
char *
-encode_string (const char *s)
+url_escape (const char *s)
{
- const char *b;
- char *p, *res;
- int i;
+ return url_escape_1 (s, urlchr_unsafe, 0);
+}
- b = s;
- for (i = 0; *s; s++, i++)
- if (UNSAFE_CHAR (*s))
- i += 2; /* Two more characters (hex digits) */
- res = (char *)xmalloc (i + 1);
- s = b;
- for (p = res; *s; s++)
- if (UNSAFE_CHAR (*s))
- {
- const unsigned char c = *s;
- *p++ = '%';
- *p++ = HEXD2ASC (c >> 4);
- *p++ = HEXD2ASC (c & 0xf);
- }
- else
- *p++ = *s;
- *p = '\0';
- return res;
+/* URL-escape the unsafe characters (see urlchr_table) in a given
+ string. If no characters are unsafe, S is returned. */
+
+static char *
+url_escape_allow_passthrough (const char *s)
+{
+ return url_escape_1 (s, urlchr_unsafe, 1);
}
\f
-/* Returns the proto-type if URL's protocol is supported, or
- URLUNKNOWN if not. */
-uerr_t
-urlproto (const char *url)
-{
- int i;
+enum copy_method { CM_DECODE, CM_ENCODE, CM_PASSTHROUGH };
- url += skip_url (url);
- for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
- if (!strncasecmp (url, sup_protos[i].name, strlen (sup_protos[i].name)))
- return sup_protos[i].ind;
- for (i = 0; url[i] && url[i] != ':' && url[i] != '/'; i++);
- if (url[i] == ':')
+/* Decide whether to encode, decode, or pass through the char at P.
+ This used to be a macro, but it got a little too convoluted. */
+static inline enum copy_method
+decide_copy_method (const char *p)
+{
+ if (*p == '%')
{
- for (++i; url[i] && url[i] != '/'; i++)
- if (!ISDIGIT (url[i]))
- return URLBADPORT;
- if (url[i - 1] == ':')
- return URLFTP;
+ if (ISXDIGIT (*(p + 1)) && ISXDIGIT (*(p + 2)))
+ {
+ /* %xx sequence: decode it, unless it would decode to an
+ unsafe or a reserved char; in that case, leave it as
+ is. */
+ char preempt = X2DIGITS_TO_NUM (*(p + 1), *(p + 2));
+ if (URL_UNSAFE_CHAR (preempt) || URL_RESERVED_CHAR (preempt))
+ return CM_PASSTHROUGH;
+ else
+ return CM_DECODE;
+ }
else
- return URLHTTP;
+ /* Garbled %.. sequence: encode `%'. */
+ return CM_ENCODE;
}
+ else if (URL_UNSAFE_CHAR (*p) && !URL_RESERVED_CHAR (*p))
+ return CM_ENCODE;
else
- return URLHTTP;
+ return CM_PASSTHROUGH;
}
-/* Skip the protocol part of the URL, e.g. `http://'. If no protocol
- part is found, returns 0. */
-int
-skip_proto (const char *url)
+/* Translate a %-escaped (but possibly non-conformant) input string S
+ into a %-escaped (and conformant) output string. If no characters
+ are encoded or decoded, return the same string S; otherwise, return
+ a freshly allocated string with the new contents.
+
+ After a URL has been run through this function, the protocols that
+ use `%' as the quote character can use the resulting string as-is,
+ while those that don't call url_unescape() to get to the intended
+ data. This function is also stable: after an input string is
+ transformed the first time, all further transformations of the
+ result yield the same result string.
+
+ Let's discuss why this function is needed.
+
+ Imagine Wget is to retrieve `http://abc.xyz/abc def'. Since a raw
+ space character would mess up the HTTP request, it needs to be
+ quoted, like this:
+
+ GET /abc%20def HTTP/1.0
+
+ It appears that the unsafe chars need to be quoted, for example
+ with url_escape. But what if we're requested to download
+ `abc%20def'? url_escape transforms "%" to "%25", which would leave
+ us with `abc%2520def'. This is incorrect -- since %-escapes are
+ part of URL syntax, "%20" is the correct way to denote a literal
+ space on the Wget command line. This leaves us in the conclusion
+ that in that case Wget should not call url_escape, but leave the
+ `%20' as is.
+
+ And what if the requested URI is `abc%20 def'? If we call
+ url_escape, we end up with `/abc%2520%20def', which is almost
+ certainly not intended. If we don't call url_escape, we are left
+ with the embedded space and cannot complete the request. What the
+ user meant was for Wget to request `/abc%20%20def', and this is
+ where reencode_escapes kicks in.
+
+ Wget used to solve this by first decoding %-quotes, and then
+ encoding all the "unsafe" characters found in the resulting string.
+ This was wrong because it didn't preserve certain URL special
+ (reserved) characters. For instance, URI containing "a%2B+b" (0x2b
+ == '+') would get translated to "a%2B%2Bb" or "a++b" depending on
+ whether we considered `+' reserved (it is). One of these results
+ is inevitable because by the second step we would lose information
+ on whether the `+' was originally encoded or not. Both results
+ were wrong because in CGI parameters + means space, while %2B means
+ literal plus. reencode_escapes correctly translates the above to
+ "a%2B+b", i.e. returns the original string.
+
+ This function uses an algorithm proposed by Anon Sricharoenchai:
+
+ 1. Encode all URL_UNSAFE and the "%" that are not followed by 2
+ hexdigits.
+
+ 2. Decode all "%XX" except URL_UNSAFE, URL_RESERVED (";/?:@=&") and
+ "+".
+
+ ...except that this code conflates the two steps, and decides
+ whether to encode, decode, or pass through each character in turn.
+ The function still uses two passes, but their logic is the same --
+ the first pass exists merely for the sake of allocation. Another
+ small difference is that we include `+' to URL_RESERVED.
+
+ Anon's test case:
+
+ "http://abc.xyz/%20%3F%%36%31%25aa% a?a=%61+a%2Ba&b=b%26c%3Dc"
+ ->
+ "http://abc.xyz/%20%3F%2561%25aa%25%20a?a=a+a%2Ba&b=b%26c%3Dc"
+
+ Simpler test cases:
+
+ "foo bar" -> "foo%20bar"
+ "foo%20bar" -> "foo%20bar"
+ "foo %20bar" -> "foo%20%20bar"
+ "foo%%20bar" -> "foo%25%20bar" (0x25 == '%')
+ "foo%25%20bar" -> "foo%25%20bar"
+ "foo%2%20bar" -> "foo%252%20bar"
+ "foo+bar" -> "foo+bar" (plus is reserved!)
+ "foo%2b+bar" -> "foo%2b+bar" */
+
+static char *
+reencode_escapes (const char *s)
{
- char **s;
- int l;
+ const char *p1;
+ char *newstr, *p2;
+ int oldlen, newlen;
- for (s = protostrings; *s; s++)
- if (!strncasecmp (*s, url, strlen (*s)))
- break;
- if (!*s)
- return 0;
- l = strlen (*s);
- /* HTTP and FTP protocols are expected to yield exact host names
- (i.e. the `//' part must be skipped, too). */
- if (!strcmp (*s, "http:") || !strcmp (*s, "ftp:"))
- l += 2;
- return l;
+ int encode_count = 0;
+ int decode_count = 0;
+
+ /* First, pass through the string to see if there's anything to do,
+ and to calculate the new length. */
+ for (p1 = s; *p1; p1++)
+ {
+ switch (decide_copy_method (p1))
+ {
+ case CM_ENCODE:
+ ++encode_count;
+ break;
+ case CM_DECODE:
+ ++decode_count;
+ break;
+ case CM_PASSTHROUGH:
+ break;
+ }
+ }
+
+ if (!encode_count && !decode_count)
+ /* The string is good as it is. */
+ return (char *)s; /* C const model sucks. */
+
+ oldlen = p1 - s;
+ /* Each encoding adds two characters (hex digits), while each
+ decoding removes two characters. */
+ newlen = oldlen + 2 * (encode_count - decode_count);
+ newstr = xmalloc (newlen + 1);
+
+ p1 = s;
+ p2 = newstr;
+
+ while (*p1)
+ {
+ switch (decide_copy_method (p1))
+ {
+ case CM_ENCODE:
+ {
+ unsigned char c = *p1++;
+ *p2++ = '%';
+ *p2++ = XNUM_TO_DIGIT (c >> 4);
+ *p2++ = XNUM_TO_DIGIT (c & 0xf);
+ }
+ break;
+ case CM_DECODE:
+ *p2++ = X2DIGITS_TO_NUM (p1[1], p1[2]);
+ p1 += 3; /* skip %xx */
+ break;
+ case CM_PASSTHROUGH:
+ *p2++ = *p1++;
+ }
+ }
+ *p2 = '\0';
+ assert (p2 - newstr == newlen);
+ return newstr;
}
+\f
+/* Returns the scheme type if the scheme is supported, or
+ SCHEME_INVALID if not. */
-/* Returns 1 if the URL begins with a protocol (supported or
- unsupported), 0 otherwise. */
-static int
-has_proto (const char *url)
+enum url_scheme
+url_scheme (const char *url)
{
- char **s;
+ int i;
- url += skip_url (url);
- for (s = protostrings; *s; s++)
- if (strncasecmp (url, *s, strlen (*s)) == 0)
- return 1;
- return 0;
+ for (i = 0; supported_schemes[i].leading_string; i++)
+ if (0 == strncasecmp (url, supported_schemes[i].leading_string,
+ strlen (supported_schemes[i].leading_string)))
+ {
+ if (supported_schemes[i].enabled)
+ return (enum url_scheme) i;
+ else
+ return SCHEME_INVALID;
+ }
+
+ return SCHEME_INVALID;
}
-/* Skip the username and password, if present here. The function
- should be called *not* with the complete URL, but with the part
- right after the protocol.
+#define SCHEME_CHAR(ch) (ISALNUM (ch) || (ch) == '-' || (ch) == '+')
+
+/* Return 1 if the URL begins with any "scheme", 0 otherwise. As
+ currently implemented, it returns true if URL begins with
+ [-+a-zA-Z0-9]+: . */
- If no username and password are found, return 0. */
int
-skip_uname (const char *url)
+url_has_scheme (const char *url)
{
- const char *p;
- for (p = url; *p && *p != '/'; p++)
- if (*p == '@')
- break;
- /* If a `@' was found before the first occurrence of `/', skip
- it. */
- if (*p == '@')
- return p - url + 1;
- else
+ const char *p = url;
+
+ /* The first char must be a scheme char. */
+ if (!*p || !SCHEME_CHAR (*p))
return 0;
+ ++p;
+ /* Followed by 0 or more scheme chars. */
+ while (*p && SCHEME_CHAR (*p))
+ ++p;
+ /* Terminated by ':'. */
+ return *p == ':';
}
-\f
-/* Allocate a new urlinfo structure, fill it with default values and
- return a pointer to it. */
-struct urlinfo *
-newurl (void)
-{
- struct urlinfo *u;
- u = (struct urlinfo *)xmalloc (sizeof (struct urlinfo));
- memset (u, 0, sizeof (*u));
- u->proto = URLUNKNOWN;
- return u;
+int
+scheme_default_port (enum url_scheme scheme)
+{
+ return supported_schemes[scheme].default_port;
}
-/* Perform a "deep" free of the urlinfo structure. The structure
- should have been created with newurl, but need not have been used.
- If free_pointer is non-0, free the pointer itself. */
void
-freeurl (struct urlinfo *u, int complete)
+scheme_disable (enum url_scheme scheme)
{
- assert (u != NULL);
- FREE_MAYBE (u->url);
- FREE_MAYBE (u->host);
- FREE_MAYBE (u->path);
- FREE_MAYBE (u->file);
- FREE_MAYBE (u->dir);
- FREE_MAYBE (u->user);
- FREE_MAYBE (u->passwd);
- FREE_MAYBE (u->local);
- FREE_MAYBE (u->referer);
- if (u->proxy)
- freeurl (u->proxy, 1);
- if (complete)
- free (u);
- return;
+ supported_schemes[scheme].enabled = 0;
}
-\f
-/* Extract the given URL of the form
- (http:|ftp:)// (user (:password)?@)?hostname (:port)? (/path)?
- 1. hostname (terminated with `/' or `:')
- 2. port number (terminated with `/'), or chosen for the protocol
- 3. dirname (everything after hostname)
- Most errors are handled. No allocation is done, you must supply
- pointers to allocated memory.
- ...and a host of other stuff :-)
-
- - Recognizes hostname:dir/file for FTP and
- hostname (:portnum)?/dir/file for HTTP.
- - Parses the path to yield directory and file
- - Parses the URL to yield the username and passwd (if present)
- - Decodes the strings, in case they contain "forbidden" characters
- - Writes the result to struct urlinfo
-
- If the argument STRICT is set, it recognizes only the canonical
- form. */
-uerr_t
-parseurl (const char *url, struct urlinfo *u, int strict)
+
+/* Skip the username and password, if present here. The function
+ should *not* be called with the complete URL, but with the part
+ right after the scheme.
+
+ If no username and password are found, return 0. */
+
+static int
+url_skip_credentials (const char *url)
{
- int i, l, abs_ftp;
- int recognizable; /* Recognizable URL is the one where
- the protocol name was explicitly
- named, i.e. it wasn't deduced from
- the URL format. */
- uerr_t type;
-
- DEBUGP (("parseurl (\"%s\") -> ", url));
- url += skip_url (url);
- recognizable = has_proto (url);
- if (strict && !recognizable)
- return URLUNKNOWN;
- for (i = 0, l = 0; i < ARRAY_SIZE (sup_protos); i++)
+ /* Look for '@' that comes before terminators, such as '/', '?',
+ '#', or ';'. */
+ const char *p = (const char *)strpbrk (url, "@/?#;");
+ if (!p || *p != '@')
+ return 0;
+ return p + 1 - url;
+}
+
+/* Parse credentials contained in [BEG, END). The region is expected
+ to have come from a URL and is unescaped. */
+
+static int
+parse_credentials (const char *beg, const char *end, char **user, char **passwd)
+{
+ char *colon;
+ const char *userend;
+
+ if (beg == end)
+ return 0; /* empty user name */
+
+ colon = memchr (beg, ':', end - beg);
+ if (colon == beg)
+ return 0; /* again empty user name */
+
+ if (colon)
{
- l = strlen (sup_protos[i].name);
- if (!strncasecmp (sup_protos[i].name, url, l))
- break;
+ *passwd = strdupdelim (colon + 1, end);
+ userend = colon;
+ url_unescape (*passwd);
}
- /* If protocol is recognizable, but unsupported, bail out, else
- suppose unknown. */
- if (recognizable && i == ARRAY_SIZE (sup_protos))
- return URLUNKNOWN;
- else if (i == ARRAY_SIZE (sup_protos))
- type = URLUNKNOWN;
else
- u->proto = type = sup_protos[i].ind;
-
- if (type == URLUNKNOWN)
- l = 0;
- /* Allow a username and password to be specified (i.e. just skip
- them for now). */
- if (recognizable)
- l += skip_uname (url + l);
- for (i = l; url[i] && url[i] != ':' && url[i] != '/'; i++);
- if (i == l)
- return URLBADHOST;
- /* Get the hostname. */
- u->host = strdupdelim (url + l, url + i);
- DEBUGP (("host %s -> ", u->host));
-
- /* Assume no port has been given. */
- u->port = 0;
- if (url[i] == ':')
{
- /* We have a colon delimiting the hostname. It could mean that
- a port number is following it, or a directory. */
- if (ISDIGIT (url[++i])) /* A port number */
- {
- if (type == URLUNKNOWN)
- u->proto = type = URLHTTP;
- for (; url[i] && url[i] != '/'; i++)
- if (ISDIGIT (url[i]))
- u->port = 10 * u->port + (url[i] - '0');
- else
- return URLBADPORT;
- if (!u->port)
- return URLBADPORT;
- DEBUGP (("port %hu -> ", u->port));
- }
- else if (type == URLUNKNOWN) /* or a directory */
- u->proto = type = URLFTP;
- else /* or just a misformed port number */
- return URLBADPORT;
+ *passwd = NULL;
+ userend = end;
}
- else if (type == URLUNKNOWN)
- u->proto = type = URLHTTP;
- if (!u->port)
+ *user = strdupdelim (beg, userend);
+ url_unescape (*user);
+ return 1;
+}
+
+/* Used by main.c: detect URLs written using the "shorthand" URL forms
+ popularized by Netscape and NcFTP. HTTP shorthands look like this:
+
+ www.foo.com[:port]/dir/file -> http://www.foo.com[:port]/dir/file
+ www.foo.com[:port] -> http://www.foo.com[:port]
+
+ FTP shorthands look like this:
+
+ foo.bar.com:dir/file -> ftp://foo.bar.com/dir/file
+ foo.bar.com:/absdir/file -> ftp://foo.bar.com//absdir/file
+
+ If the URL needs not or cannot be rewritten, return NULL. */
+
+char *
+rewrite_shorthand_url (const char *url)
+{
+ const char *p;
+
+ if (url_has_scheme (url))
+ return NULL;
+
+ /* Look for a ':' or '/'. The former signifies NcFTP syntax, the
+ latter Netscape. */
+ for (p = url; *p && *p != ':' && *p != '/'; p++)
+ ;
+
+ if (p == url)
+ return NULL;
+
+ if (*p == ':')
{
- int i;
- for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
- if (sup_protos[i].ind == type)
- break;
- if (i == ARRAY_SIZE (sup_protos))
- return URLUNKNOWN;
- u->port = sup_protos[i].port;
+ const char *pp;
+ char *res;
+ /* If the characters after the colon and before the next slash
+ or end of string are all digits, it's HTTP. */
+ int digits = 0;
+ for (pp = p + 1; ISDIGIT (*pp); pp++)
+ ++digits;
+ if (digits > 0 && (*pp == '/' || *pp == '\0'))
+ goto http;
+
+ /* Prepend "ftp://" to the entire URL... */
+ res = xmalloc (6 + strlen (url) + 1);
+ sprintf (res, "ftp://%s", url);
+ /* ...and replace ':' with '/'. */
+ res[6 + (p - url)] = '/';
+ return res;
}
- /* Some delimiter troubles... */
- if (url[i] == '/' && url[i - 1] != ':')
- ++i;
- if (type == URLHTTP)
- while (url[i] && url[i] == '/')
- ++i;
- u->path = (char *)xmalloc (strlen (url + i) + 8);
- strcpy (u->path, url + i);
- if (type == URLFTP)
+ else
{
- u->ftp_type = process_ftp_type (u->path);
- /* #### We don't handle type `d' correctly yet. */
- if (!u->ftp_type || TOUPPER (u->ftp_type) == 'D')
- u->ftp_type = 'I';
+ char *res;
+ http:
+ /* Just prepend "http://" to what we have. */
+ res = xmalloc (7 + strlen (url) + 1);
+ sprintf (res, "http://%s", url);
+ return res;
}
- DEBUGP (("opath %s -> ", u->path));
- /* Parse the username and password (if existing). */
- parse_uname (url, &u->user, &u->passwd);
- /* Decode the strings, as per RFC 1738. */
- decode_string (u->host);
- decode_string (u->path);
- if (u->user)
- decode_string (u->user);
- if (u->passwd)
- decode_string (u->passwd);
- /* Parse the directory. */
- parse_dir (u->path, &u->dir, &u->file);
- DEBUGP (("dir %s -> file %s -> ", u->dir, u->file));
- /* Simplify the directory. */
- path_simplify (u->dir);
- /* Remove the leading `/' in HTTP. */
- if (type == URLHTTP && *u->dir == '/')
- strcpy (u->dir, u->dir + 1);
- DEBUGP (("ndir %s\n", u->dir));
- /* Strip trailing `/'. */
- l = strlen (u->dir);
- if (l && u->dir[l - 1] == '/')
- u->dir[l - 1] = '\0';
- /* Re-create the path: */
- abs_ftp = (u->proto == URLFTP && *u->dir == '/');
- /* sprintf (u->path, "%s%s%s%s", abs_ftp ? "%2F": "/",
- abs_ftp ? (u->dir + 1) : u->dir, *u->dir ? "/" : "", u->file); */
- strcpy (u->path, abs_ftp ? "%2F" : "/");
- strcat (u->path, abs_ftp ? (u->dir + 1) : u->dir);
- strcat (u->path, *u->dir ? "/" : "");
- strcat (u->path, u->file);
- URL_CLEANSE (u->path);
- DEBUGP (("newpath: %s\n", u->path));
- /* Create the clean URL. */
- u->url = str_url (u, 0);
- return URLOK;
}
\f
-/* Special versions of DOTP and DDOTP for parse_dir(). */
+static void split_path PARAMS ((const char *, char **, char **));
-#define PD_DOTP(x) ((*(x) == '.') && (!*((x) + 1) || *((x) + 1) == '?'))
-#define PD_DDOTP(x) ((*(x) == '.') && (*(x) == '.') \
- && (!*((x) + 2) || *((x) + 2) == '?'))
+/* Like strpbrk, with the exception that it returns the pointer to the
+ terminating zero (end-of-string aka "eos") if no matching character
+ is found.
-/* Build the directory and filename components of the path. Both
- components are *separately* malloc-ed strings! It does not change
- the contents of path.
+ Although I normally balk at Gcc-specific optimizations, it probably
+ makes sense here: glibc has optimizations that detect strpbrk being
+ called with literal string as ACCEPT and inline the search. That
+ optimization is defeated if strpbrk is hidden within the call to
+ another function. (And no, making strpbrk_or_eos inline doesn't
+ help because the check for literal accept is in the
+ preprocessor.) */
- If the path ends with "." or "..", they are (correctly) counted as
- directories. */
-static void
-parse_dir (const char *path, char **dir, char **file)
+#ifdef __GNUC__
+
+#define strpbrk_or_eos(s, accept) ({ \
+ char *SOE_p = strpbrk (s, accept); \
+ if (!SOE_p) \
+ SOE_p = (char *)s + strlen (s); \
+ SOE_p; \
+})
+
+#else /* not __GNUC__ */
+
+static char *
+strpbrk_or_eos (const char *s, const char *accept)
+{
+ char *p = strpbrk (s, accept);
+ if (!p)
+ p = (char *)s + strlen (s);
+ return p;
+}
+#endif
+
+/* Turn STR into lowercase; return non-zero if a character was
+ actually changed. */
+
+static int
+lowercase_str (char *str)
{
- int i, l;
+ int change = 0;
+ for (; *str; str++)
+ if (ISUPPER (*str))
+ {
+ change = 1;
+ *str = TOLOWER (*str);
+ }
+ return change;
+}
+
+static char *parse_errors[] = {
+#define PE_NO_ERROR 0
+ N_("No error"),
+#define PE_UNSUPPORTED_SCHEME 1
+ N_("Unsupported scheme"),
+#define PE_EMPTY_HOST 2
+ N_("Empty host"),
+#define PE_BAD_PORT_NUMBER 3
+ N_("Bad port number"),
+#define PE_INVALID_USER_NAME 4
+ N_("Invalid user name"),
+#define PE_UNTERMINATED_IPV6_ADDRESS 5
+ N_("Unterminated IPv6 numeric address"),
+#define PE_IPV6_NOT_SUPPORTED 6
+ N_("IPv6 addresses not supported"),
+#define PE_INVALID_IPV6_ADDRESS 7
+ N_("Invalid IPv6 numeric address")
+};
- l = urlpath_length (path);
- for (i = l; i && path[i] != '/'; i--);
+#ifdef ENABLE_IPV6
+/* The following two functions were adapted from glibc. */
- if (!i && *path != '/') /* Just filename */
+static int
+is_valid_ipv4_address (const char *str, const char *end)
+{
+ int saw_digit = 0;
+ int octets = 0;
+ int val = 0;
+
+ while (str < end)
{
- if (PD_DOTP (path) || PD_DDOTP (path))
+ int ch = *str++;
+
+ if (ch >= '0' && ch <= '9')
{
- *dir = strdupdelim (path, path + l);
- *file = xstrdup (path + l); /* normally empty, but could
- contain ?... */
+ val = val * 10 + (ch - '0');
+
+ if (val > 255)
+ return 0;
+ if (saw_digit == 0)
+ {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
}
- else
+ else if (ch == '.' && saw_digit == 1)
{
- *dir = xstrdup (""); /* This is required because of FTP */
- *file = xstrdup (path);
+ if (octets == 4)
+ return 0;
+ val = 0;
+ saw_digit = 0;
}
+ else
+ return 0;
}
- else if (!i) /* /filename */
+ if (octets < 4)
+ return 0;
+
+ return 1;
+}
+
+static int
+is_valid_ipv6_address (const char *str, const char *end)
+{
+ enum {
+ NS_INADDRSZ = 4,
+ NS_IN6ADDRSZ = 16,
+ NS_INT16SZ = 2
+ };
+
+ const char *curtok;
+ int tp;
+ const char *colonp;
+ int saw_xdigit;
+ unsigned int val;
+
+ tp = 0;
+ colonp = NULL;
+
+ if (str == end)
+ return 0;
+
+ /* Leading :: requires some special handling. */
+ if (*str == ':')
{
- if (PD_DOTP (path + 1) || PD_DDOTP (path + 1))
+ ++str;
+ if (str == end || *str != ':')
+ return 0;
+ }
+
+ curtok = str;
+ saw_xdigit = 0;
+ val = 0;
+
+ while (str < end)
+ {
+ int ch = *str++;
+
+ /* if ch is a number, add it to val. */
+ if (ISXDIGIT (ch))
{
- *dir = strdupdelim (path, path + l);
- *file = xstrdup (path + l); /* normally empty, but could
- contain ?... */
+ val <<= 4;
+ val |= XDIGIT_TO_NUM (ch);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ continue;
}
- else
+
+ /* if ch is a colon ... */
+ if (ch == ':')
{
- *dir = xstrdup ("/");
- *file = xstrdup (path + 1);
+ curtok = str;
+ if (saw_xdigit == 0)
+ {
+ if (colonp != NULL)
+ return 0;
+ colonp = str + tp;
+ continue;
+ }
+ else if (str == end)
+ return 0;
+ if (tp > NS_IN6ADDRSZ - NS_INT16SZ)
+ return 0;
+ tp += NS_INT16SZ;
+ saw_xdigit = 0;
+ val = 0;
+ continue;
+ }
+
+ /* if ch is a dot ... */
+ if (ch == '.' && (tp <= NS_IN6ADDRSZ - NS_INADDRSZ)
+ && is_valid_ipv4_address (curtok, end) == 1)
+ {
+ tp += NS_INADDRSZ;
+ saw_xdigit = 0;
+ break;
}
+
+ return 0;
+ }
+
+ if (saw_xdigit == 1)
+ {
+ if (tp > NS_IN6ADDRSZ - NS_INT16SZ)
+ return 0;
+ tp += NS_INT16SZ;
+ }
+
+ if (colonp != NULL)
+ {
+ if (tp == NS_IN6ADDRSZ)
+ return 0;
+ tp = NS_IN6ADDRSZ;
}
- else /* Nonempty directory with or without a filename */
+
+ if (tp != NS_IN6ADDRSZ)
+ return 0;
+
+ return 1;
+}
+#endif
+
+/* Parse a URL.
+
+ Return a new struct url if successful, NULL on error. In case of
+ error, and if ERROR is not NULL, also set *ERROR to the appropriate
+ error code. */
+struct url *
+url_parse (const char *url, int *error)
+{
+ struct url *u;
+ const char *p;
+ int path_modified, host_modified;
+
+ enum url_scheme scheme;
+
+ const char *uname_b, *uname_e;
+ const char *host_b, *host_e;
+ const char *path_b, *path_e;
+ const char *params_b, *params_e;
+ const char *query_b, *query_e;
+ const char *fragment_b, *fragment_e;
+
+ int port;
+ char *user = NULL, *passwd = NULL;
+
+ char *url_encoded = NULL;
+
+ int error_code;
+
+ scheme = url_scheme (url);
+ if (scheme == SCHEME_INVALID)
+ {
+ error_code = PE_UNSUPPORTED_SCHEME;
+ goto error;
+ }
+
+ url_encoded = reencode_escapes (url);
+ p = url_encoded;
+
+ p += strlen (supported_schemes[scheme].leading_string);
+ uname_b = p;
+ p += url_skip_credentials (p);
+ uname_e = p;
+
+ /* scheme://user:pass@host[:port]... */
+ /* ^ */
+
+ /* We attempt to break down the URL into the components path,
+ params, query, and fragment. They are ordered like this:
+
+ scheme://host[:port][/path][;params][?query][#fragment] */
+
+ params_b = params_e = NULL;
+ query_b = query_e = NULL;
+ fragment_b = fragment_e = NULL;
+
+ host_b = p;
+
+ if (*p == '[')
{
- if (PD_DOTP (path + i + 1) || PD_DDOTP (path + i + 1))
+ /* Handle IPv6 address inside square brackets. Ideally we'd
+ just look for the terminating ']', but rfc2732 mandates
+ rejecting invalid IPv6 addresses. */
+
+ /* The address begins after '['. */
+ host_b = p + 1;
+ host_e = strchr (host_b, ']');
+
+ if (!host_e)
{
- *dir = strdupdelim (path, path + l);
- *file = xstrdup (path + l); /* normally empty, but could
- contain ?... */
+ error_code = PE_UNTERMINATED_IPV6_ADDRESS;
+ goto error;
}
- else
+
+#ifdef ENABLE_IPV6
+ /* Check if the IPv6 address is valid. */
+ if (!is_valid_ipv6_address(host_b, host_e))
{
- *dir = strdupdelim (path, path + i);
- *file = xstrdup (path + i + 1);
+ error_code = PE_INVALID_IPV6_ADDRESS;
+ goto error;
}
+
+ /* Continue parsing after the closing ']'. */
+ p = host_e + 1;
+#else
+ error_code = PE_IPV6_NOT_SUPPORTED;
+ goto error;
+#endif
+ }
+ else
+ {
+ p = strpbrk_or_eos (p, ":/;?#");
+ host_e = p;
}
-}
-/* Find the optional username and password within the URL, as per
- RFC1738. The returned user and passwd char pointers are
- malloc-ed. */
-static uerr_t
-parse_uname (const char *url, char **user, char **passwd)
-{
- int l;
- const char *p, *col;
- char **where;
-
- *user = NULL;
- *passwd = NULL;
- url += skip_url (url);
- /* Look for end of protocol string. */
- l = skip_proto (url);
- if (!l)
- return URLUNKNOWN;
- /* Add protocol offset. */
- url += l;
- /* Is there an `@' character? */
- for (p = url; *p && *p != '/'; p++)
- if (*p == '@')
- break;
- /* If not, return. */
- if (*p != '@')
- return URLOK;
- /* Else find the username and password. */
- for (p = col = url; *p != '@'; p++)
+ if (host_b == host_e)
+ {
+ error_code = PE_EMPTY_HOST;
+ goto error;
+ }
+
+ port = scheme_default_port (scheme);
+ if (*p == ':')
{
- if (*p == ':' && !*user)
+ const char *port_b, *port_e, *pp;
+
+ /* scheme://host:port/tralala */
+ /* ^ */
+ ++p;
+ port_b = p;
+ p = strpbrk_or_eos (p, "/;?#");
+ port_e = p;
+
+ if (port_b == port_e)
{
- *user = (char *)xmalloc (p - url + 1);
- memcpy (*user, url, p - url);
- (*user)[p - url] = '\0';
- col = p + 1;
+ /* http://host:/whatever */
+ /* ^ */
+ error_code = PE_BAD_PORT_NUMBER;
+ goto error;
}
- }
- /* Decide whether you have only the username or both. */
- where = *user ? passwd : user;
- *where = (char *)xmalloc (p - col + 1);
- memcpy (*where, col, p - col);
- (*where)[p - col] = '\0';
- return URLOK;
-}
-/* If PATH ends with `;type=X', return the character X. */
-static char
-process_ftp_type (char *path)
-{
- int len = strlen (path);
+ for (port = 0, pp = port_b; pp < port_e; pp++)
+ {
+ if (!ISDIGIT (*pp))
+ {
+ /* http://host:12randomgarbage/blah */
+ /* ^ */
+ error_code = PE_BAD_PORT_NUMBER;
+ goto error;
+ }
+
+ port = 10 * port + (*pp - '0');
+ }
+ }
- if (len >= 7
- && !memcmp (path + len - 7, ";type=", 6))
+ if (*p == '/')
{
- path[len - 7] = '\0';
- return path[len - 1];
+ ++p;
+ path_b = p;
+ p = strpbrk_or_eos (p, ";?#");
+ path_e = p;
}
else
- return '\0';
-}
-\f
-/* Return the URL as fine-formed string, with a proper protocol,
- optional port number, directory and optional user/password. If
- HIDE is non-zero, password will be hidden. The forbidden
- characters in the URL will be cleansed. */
-char *
-str_url (const struct urlinfo *u, int hide)
-{
- char *res, *host, *user, *passwd, *proto_name, *dir, *file;
- int i, l, ln, lu, lh, lp, lf, ld;
- unsigned short proto_default_port;
-
- /* Look for the protocol name. */
- for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
- if (sup_protos[i].ind == u->proto)
- break;
- if (i == ARRAY_SIZE (sup_protos))
- return NULL;
- proto_name = sup_protos[i].name;
- proto_default_port = sup_protos[i].port;
- host = CLEANDUP (u->host);
- dir = CLEANDUP (u->dir);
- file = CLEANDUP (u->file);
- user = passwd = NULL;
- if (u->user)
- user = CLEANDUP (u->user);
- if (u->passwd)
{
- int i;
- passwd = CLEANDUP (u->passwd);
- if (hide)
- for (i = 0; passwd[i]; i++)
- passwd[i] = 'x';
+ /* Path is not allowed not to exist. */
+ path_b = path_e = p;
}
- if (u->proto == URLFTP && *dir == '/')
+
+ if (*p == ';')
{
- char *tmp = (char *)xmalloc (strlen (dir) + 3);
- /*sprintf (tmp, "%%2F%s", dir + 1);*/
- tmp[0] = '%';
- tmp[1] = '2';
- tmp[2] = 'F';
- strcpy (tmp + 3, dir + 1);
- free (dir);
- dir = tmp;
+ ++p;
+ params_b = p;
+ p = strpbrk_or_eos (p, "?#");
+ params_e = p;
}
-
- ln = strlen (proto_name);
- lu = user ? strlen (user) : 0;
- lp = passwd ? strlen (passwd) : 0;
- lh = strlen (host);
- ld = strlen (dir);
- lf = strlen (file);
- res = (char *)xmalloc (ln + lu + lp + lh + ld + lf + 20); /* safe sex */
- /* sprintf (res, "%s%s%s%s%s%s:%d/%s%s%s", proto_name,
- (user ? user : ""), (passwd ? ":" : ""),
- (passwd ? passwd : ""), (user ? "@" : ""),
- host, u->port, dir, *dir ? "/" : "", file); */
- l = 0;
- memcpy (res, proto_name, ln);
- l += ln;
- if (user)
+ if (*p == '?')
{
- memcpy (res + l, user, lu);
- l += lu;
- if (passwd)
+ ++p;
+ query_b = p;
+ p = strpbrk_or_eos (p, "#");
+ query_e = p;
+
+ /* Hack that allows users to use '?' (a wildcard character) in
+ FTP URLs without it being interpreted as a query string
+ delimiter. */
+ if (scheme == SCHEME_FTP)
{
- res[l++] = ':';
- memcpy (res + l, passwd, lp);
- l += lp;
+ query_b = query_e = NULL;
+ path_e = p;
}
- res[l++] = '@';
}
- memcpy (res + l, host, lh);
- l += lh;
- if (u->port != proto_default_port)
+ if (*p == '#')
{
- res[l++] = ':';
- long_to_string (res + l, (long)u->port);
- l += numdigit (u->port);
+ ++p;
+ fragment_b = p;
+ p += strlen (p);
+ fragment_e = p;
+ }
+ assert (*p == 0);
+
+ if (uname_b != uname_e)
+ {
+ /* http://user:pass@host */
+ /* ^ ^ */
+ /* uname_b uname_e */
+ if (!parse_credentials (uname_b, uname_e - 1, &user, &passwd))
+ {
+ error_code = PE_INVALID_USER_NAME;
+ goto error;
+ }
}
- res[l++] = '/';
- memcpy (res + l, dir, ld);
- l += ld;
- if (*dir)
- res[l++] = '/';
- strcpy (res + l, file);
- free (host);
- free (dir);
- free (file);
- FREE_MAYBE (user);
- FREE_MAYBE (passwd);
- return res;
-}
-/* Check whether two URL-s are equivalent, i.e. pointing to the same
- location. Uses parseurl to parse them, and compares the canonical
- forms.
+ u = xnew0 (struct url);
+ u->scheme = scheme;
+ u->host = strdupdelim (host_b, host_e);
+ u->port = port;
+ u->user = user;
+ u->passwd = passwd;
- Returns 1 if the URL1 is equivalent to URL2, 0 otherwise. Also
- return 0 on error. */
-int
-url_equal (const char *url1, const char *url2)
-{
- struct urlinfo *u1, *u2;
- uerr_t err;
- int res;
+ u->path = strdupdelim (path_b, path_e);
+ path_modified = path_simplify (u->path);
+ split_path (u->path, &u->dir, &u->file);
- u1 = newurl ();
- err = parseurl (url1, u1, 0);
- if (err != URLOK)
+ host_modified = lowercase_str (u->host);
+
+ if (params_b)
+ u->params = strdupdelim (params_b, params_e);
+ if (query_b)
+ u->query = strdupdelim (query_b, query_e);
+ if (fragment_b)
+ u->fragment = strdupdelim (fragment_b, fragment_e);
+
+ if (path_modified || u->fragment || host_modified || path_b == path_e)
{
- freeurl (u1, 1);
- return 0;
+ /* If we suspect that a transformation has rendered what
+ url_string might return different from URL_ENCODED, rebuild
+ u->url using url_string. */
+ u->url = url_string (u, 0);
+
+ if (url_encoded != url)
+ xfree ((char *) url_encoded);
}
- u2 = newurl ();
- err = parseurl (url2, u2, 0);
- if (err != URLOK)
+ else
{
- freeurl (u2, 1);
- return 0;
+ if (url_encoded == url)
+ u->url = xstrdup (url);
+ else
+ u->url = url_encoded;
}
- res = !strcmp (u1->url, u2->url);
- freeurl (u1, 1);
- freeurl (u2, 1);
- return res;
-}
-\f
-/* Find URL of format scheme:hostname[:port]/dir in a buffer. The
- buffer may contain pretty much anything; no errors are signaled. */
-static const char *
-findurl (const char *buf, int howmuch, int *count)
-{
- char **prot;
- const char *s1, *s2;
+ url_encoded = NULL;
- for (s1 = buf; howmuch; s1++, howmuch--)
- for (prot = protostrings; *prot; prot++)
- if (howmuch <= strlen (*prot))
- continue;
- else if (!strncasecmp (*prot, s1, strlen (*prot)))
- {
- for (s2 = s1, *count = 0;
- howmuch && *s2 && *s2 >= 32 && *s2 < 127 && !ISSPACE (*s2) &&
- !strchr (URL_SEPARATOR, *s2);
- s2++, (*count)++, howmuch--);
- return s1;
- }
+ return u;
+
+ error:
+ /* Cleanup in case of error: */
+ if (url_encoded && url_encoded != url)
+ xfree (url_encoded);
+
+ /* Transmit the error code to the caller, if the caller wants to
+ know. */
+ if (error)
+ *error = error_code;
return NULL;
}
-/* Scans the file for signs of URL-s. Returns a vector of pointers,
- each pointer representing a URL string. The file is *not* assumed
- to be HTML. */
-urlpos *
-get_urls_file (const char *file)
+/* Return the error message string from ERROR_CODE, which should have
+ been retrieved from url_parse. The error message is translated. */
+
+const char *
+url_error (int error_code)
{
- long nread;
- FILE *fp;
- char *buf;
- const char *pbuf;
- int size;
- urlpos *first, *current, *old;
+ assert (error_code >= 0 && error_code < countof (parse_errors));
+ return _(parse_errors[error_code]);
+}
- if (file && !HYPHENP (file))
+/* Split PATH into DIR and FILE. PATH comes from the URL and is
+ expected to be URL-escaped.
+
+ The path is split into directory (the part up to the last slash)
+ and file (the part after the last slash), which are subsequently
+ unescaped. Examples:
+
+ PATH DIR FILE
+ "foo/bar/baz" "foo/bar" "baz"
+ "foo/bar/" "foo/bar" ""
+ "foo" "" "foo"
+ "foo/bar/baz%2fqux" "foo/bar" "baz/qux" (!)
+
+ DIR and FILE are freshly allocated. */
+
+static void
+split_path (const char *path, char **dir, char **file)
+{
+ char *last_slash = strrchr (path, '/');
+ if (!last_slash)
{
- fp = fopen (file, "rb");
- if (!fp)
- {
- logprintf (LOG_NOTQUIET, "%s: %s\n", file, strerror (errno));
- return NULL;
- }
+ *dir = xstrdup ("");
+ *file = xstrdup (path);
}
else
- fp = stdin;
- /* Load the file. */
- load_file (fp, &buf, &nread);
- if (file && !HYPHENP (file))
- fclose (fp);
- DEBUGP (("Loaded %s (size %ld).\n", file, nread));
- first = current = NULL;
- /* Fill the linked list with URLs. */
- for (pbuf = buf; (pbuf = findurl (pbuf, nread - (pbuf - buf), &size));
- pbuf += size)
{
- /* Allocate the space. */
- old = current;
- current = (urlpos *)xmalloc (sizeof (urlpos));
- if (old)
- old->next = current;
- memset (current, 0, sizeof (*current));
- current->next = NULL;
- current->url = (char *)xmalloc (size + 1);
- memcpy (current->url, pbuf, size);
- current->url[size] = '\0';
- if (!first)
- first = current;
+ *dir = strdupdelim (path, last_slash);
+ *file = xstrdup (last_slash + 1);
}
- /* Free the buffer. */
- free (buf);
+ url_unescape (*dir);
+ url_unescape (*file);
+}
+
+/* Note: URL's "full path" is the path with the query string and
+ params appended. The "fragment" (#foo) is intentionally ignored,
+ but that might be changed. For example, if the original URL was
+ "http://host:port/foo/bar/baz;bullshit?querystring#uselessfragment",
+ the full path will be "/foo/bar/baz;bullshit?querystring". */
+
+/* Return the length of the full path, without the terminating
+ zero. */
+
+static int
+full_path_length (const struct url *url)
+{
+ int len = 0;
+
+#define FROB(el) if (url->el) len += 1 + strlen (url->el)
- return first;
+ FROB (path);
+ FROB (params);
+ FROB (query);
+
+#undef FROB
+
+ return len;
}
-/* Similar to get_urls_file, but for HTML files. FILE is scanned as
- an HTML document using htmlfindurl(), which see. get_urls_html()
- constructs the HTML-s from the relative href-s.
+/* Write out the full path. */
- If SILENT is non-zero, do not barf on baseless relative links. */
-urlpos *
-get_urls_html (const char *file, const char *this_url, int silent,
- int dash_p_leaf_HTML)
+static void
+full_path_write (const struct url *url, char *where)
{
- long nread;
- FILE *fp;
- char *orig_buf;
- const char *buf;
- int step, first_time;
- urlpos *first, *current, *old;
-
- if (file && !HYPHENP (file))
- {
- fp = fopen (file, "rb");
- if (!fp)
- {
- logprintf (LOG_NOTQUIET, "%s: %s\n", file, strerror (errno));
- return NULL;
- }
- }
- else
- fp = stdin;
- /* Load the file. */
- load_file (fp, &orig_buf, &nread);
- if (file && !HYPHENP (file))
- fclose (fp);
- DEBUGP (("Loaded HTML file %s (size %ld).\n", file, nread));
- first = current = NULL;
- first_time = 1;
- /* Iterate over the URLs in BUF, picked by htmlfindurl(). */
- for (buf = orig_buf;
- (buf = htmlfindurl (buf, nread - (buf - orig_buf), &step, first_time,
- dash_p_leaf_HTML));
- buf += step)
- {
- int i, no_proto;
- int size = step;
- const char *pbuf = buf;
- char *constr, *base;
- const char *cbase;
- char *needs_freeing, *url_data;
-
- first_time = 0;
-
- /* A frequent phenomenon that needs to be handled are pages
- generated by brain-damaged HTML generators, which refer to to
- URI-s as <a href="<spaces>URI<spaces>">. We simply ignore
- any spaces at the beginning or at the end of the string.
- This is probably not strictly correct, but that's what the
- browsers do, so we may follow. May the authors of "WYSIWYG"
- HTML tools burn in hell for the damage they've inflicted! */
- while ((pbuf < buf + step) && ISSPACE (*pbuf))
- {
- ++pbuf;
- --size;
- }
- while (size && ISSPACE (pbuf[size - 1]))
- --size;
- if (!size)
- break;
-
- /* It would be nice if we could avoid allocating memory in this
- loop, but I don't see an easy way. To process the entities,
- we need to either copy the data, or change it destructively.
- I choose the former.
-
- We have two pointers: needs_freeing and url_data, because the
- code below does thing like url_data += <something>, and we
- want to pass the original string to free(). */
- needs_freeing = url_data = html_decode_entities (pbuf, pbuf + size);
- size = strlen (url_data);
-
- for (i = 0; protostrings[i]; i++)
- {
- if (!strncasecmp (protostrings[i], url_data,
- MINVAL (strlen (protostrings[i]), size)))
- break;
- }
- /* Check for http:RELATIVE_URI. See below for details. */
- if (protostrings[i]
- && !(strncasecmp (url_data, "http:", 5) == 0
- && strncasecmp (url_data, "http://", 7) != 0))
- {
- no_proto = 0;
- }
- else
- {
- no_proto = 1;
- /* This is for extremely brain-damaged pages that refer to
- relative URI-s as <a href="http:URL">. Just strip off the
- silly leading "http:" (as well as any leading blanks
- before it). */
- if ((size > 5) && !strncasecmp ("http:", url_data, 5))
- url_data += 5, size -= 5;
- }
- if (!no_proto)
- {
- for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
- {
- if (!strncasecmp (sup_protos[i].name, url_data,
- MINVAL (strlen (sup_protos[i].name), size)))
- break;
- }
- /* Do *not* accept a non-supported protocol. */
- if (i == ARRAY_SIZE (sup_protos))
- {
- free (needs_freeing);
- continue;
- }
- }
- if (no_proto)
- {
- /* First, construct the base, which can be relative itself.
-
- Criteria for creating the base are:
- 1) html_base created by <base href="...">
- 2) current URL
- 3) base provided from the command line */
- cbase = html_base ();
- if (!cbase)
- cbase = this_url;
- if (!cbase)
- cbase = opt.base_href;
- if (!cbase) /* Error condition -- a baseless
- relative link. */
- {
- if (!opt.quiet && !silent)
- {
- /* Use malloc, not alloca because this is called in
- a loop. */
- char *temp = (char *)malloc (size + 1);
- strncpy (temp, url_data, size);
- temp[size] = '\0';
- logprintf (LOG_NOTQUIET,
- _("Error (%s): Link %s without a base provided.\n"),
- file, temp);
- free (temp);
- }
- free (needs_freeing);
- continue;
- }
- if (this_url)
- base = construct (this_url, cbase, strlen (cbase),
- !has_proto (cbase));
- else
- {
- /* Base must now be absolute, with host name and
- protocol. */
- if (!has_proto (cbase))
- {
- logprintf (LOG_NOTQUIET, _("\
-Error (%s): Base %s relative, without referer URL.\n"),
- file, cbase);
- free (needs_freeing);
- continue;
- }
- base = xstrdup (cbase);
- }
- constr = construct (base, url_data, size, no_proto);
- free (base);
- }
- else /* has proto */
- {
- constr = (char *)xmalloc (size + 1);
- strncpy (constr, url_data, size);
- constr[size] = '\0';
- }
-#ifdef DEBUG
- if (opt.debug)
- {
- char *tmp;
- const char *tmp2;
-
- tmp2 = html_base ();
- /* Use malloc, not alloca because this is called in a loop. */
- tmp = (char *)xmalloc (size + 1);
- strncpy (tmp, url_data, size);
- tmp[size] = '\0';
- logprintf (LOG_ALWAYS,
- "file %s; this_url %s; base %s\nlink: %s; constr: %s\n",
- file, this_url ? this_url : "(null)",
- tmp2 ? tmp2 : "(null)", tmp, constr);
- free (tmp);
- }
-#endif
+#define FROB(el, chr) do { \
+ char *f_el = url->el; \
+ if (f_el) { \
+ int l = strlen (f_el); \
+ *where++ = chr; \
+ memcpy (where, f_el, l); \
+ where += l; \
+ } \
+} while (0)
+
+ FROB (path, '/');
+ FROB (params, ';');
+ FROB (query, '?');
+
+#undef FROB
+}
+
+/* Public function for getting the "full path". E.g. if u->path is
+ "foo/bar" and u->query is "param=value", full_path will be
+ "/foo/bar?param=value". */
+
+char *
+url_full_path (const struct url *url)
+{
+ int length = full_path_length (url);
+ char *full_path = (char *)xmalloc(length + 1);
- /* Allocate the space. */
- old = current;
- current = (urlpos *)xmalloc (sizeof (urlpos));
- if (old)
- old->next = current;
- if (!first)
- first = current;
- /* Fill the values. */
- memset (current, 0, sizeof (*current));
- current->next = NULL;
- current->url = constr;
- current->size = step;
- current->pos = buf - orig_buf;
- /* A URL is relative if the host and protocol are not named,
- and the name does not start with `/'. */
- if (no_proto && *url_data != '/')
- current->flags |= (URELATIVE | UNOPROTO);
- else if (no_proto)
- current->flags |= UNOPROTO;
- free (needs_freeing);
- }
- free (orig_buf);
+ full_path_write (url, full_path);
+ full_path[length] = '\0';
- return first;
+ return full_path;
}
-\f
-/* Free the linked list of urlpos. */
-void
-free_urlpos (urlpos *l)
+
+/* Escape unsafe and reserved characters, except for the slash
+ characters. */
+
+static char *
+url_escape_dir (const char *dir)
{
- while (l)
+ char *newdir = url_escape_1 (dir, urlchr_unsafe | urlchr_reserved, 1);
+ char *h, *t;
+ if (newdir == dir)
+ return (char *)dir;
+
+ /* Unescape slashes in NEWDIR. */
+
+ h = newdir; /* hare */
+ t = newdir; /* tortoise */
+
+ for (; *h; h++, t++)
{
- urlpos *next = l->next;
- free (l->url);
- FREE_MAYBE (l->local_name);
- free (l);
- l = next;
+ /* url_escape_1 having converted '/' to "%2F" exactly. */
+ if (*h == '%' && h[1] == '2' && h[2] == 'F')
+ {
+ *t = '/';
+ h += 2;
+ }
+ else
+ *t = *h;
}
+ *t = '\0';
+
+ return newdir;
}
-/* Rotate FNAME opt.backups times */
-void
-rotate_backups(const char *fname)
+/* Sync u->path and u->url with u->dir and u->file. Called after
+ u->file or u->dir have been changed, typically by the FTP code. */
+
+static void
+sync_path (struct url *u)
{
- int maxlen = strlen (fname) + 1 + numdigit (opt.backups) + 1;
- char *from = (char *)alloca (maxlen);
- char *to = (char *)alloca (maxlen);
- struct stat sb;
- int i;
+ char *newpath, *efile, *edir;
+
+ xfree (u->path);
- if (stat (fname, &sb) == 0)
- if (S_ISREG (sb.st_mode) == 0)
- return;
+ /* u->dir and u->file are not escaped. URL-escape them before
+ reassembling them into u->path. That way, if they contain
+ separators like '?' or even if u->file contains slashes, the
+ path will be correctly assembled. (u->file can contain slashes
+ if the URL specifies it with %2f, or if an FTP server returns
+ it.) */
+ edir = url_escape_dir (u->dir);
+ efile = url_escape_1 (u->file, urlchr_unsafe | urlchr_reserved, 1);
- for (i = opt.backups; i > 1; i--)
+ if (!*edir)
+ newpath = xstrdup (efile);
+ else
{
- sprintf (from, "%s.%d", fname, i - 1);
- sprintf (to, "%s.%d", fname, i);
- /* #### This will fail on machines without the rename() system
- call. */
- rename (from, to);
+ int dirlen = strlen (edir);
+ int filelen = strlen (efile);
+
+ /* Copy "DIR/FILE" to newpath. */
+ char *p = newpath = xmalloc (dirlen + 1 + filelen + 1);
+ memcpy (p, edir, dirlen);
+ p += dirlen;
+ *p++ = '/';
+ memcpy (p, efile, filelen);
+ p += filelen;
+ *p++ = '\0';
}
- sprintf (to, "%s.%d", fname, 1);
- rename(fname, to);
+ u->path = newpath;
+
+ if (edir != u->dir)
+ xfree (edir);
+ if (efile != u->file)
+ xfree (efile);
+
+ /* Regenerate u->url as well. */
+ xfree (u->url);
+ u->url = url_string (u, 0);
+}
+
+/* Mutators. Code in ftp.c insists on changing u->dir and u->file.
+ This way we can sync u->path and u->url when they get changed. */
+
+void
+url_set_dir (struct url *url, const char *newdir)
+{
+ xfree (url->dir);
+ url->dir = xstrdup (newdir);
+ sync_path (url);
+}
+
+void
+url_set_file (struct url *url, const char *newfile)
+{
+ xfree (url->file);
+ url->file = xstrdup (newfile);
+ sync_path (url);
}
+void
+url_free (struct url *url)
+{
+ xfree (url->host);
+ xfree (url->path);
+ xfree (url->url);
+
+ xfree_null (url->params);
+ xfree_null (url->query);
+ xfree_null (url->fragment);
+ xfree_null (url->user);
+ xfree_null (url->passwd);
+
+ xfree (url->dir);
+ xfree (url->file);
+
+ xfree (url);
+}
+\f
/* Create all the necessary directories for PATH (a file). Calls
mkdirhier() internally. */
int
int res;
p = path + strlen (path);
- for (; *p != '/' && p != path; p--);
+ for (; *p != '/' && p != path; p--)
+ ;
+
/* Don't create if it's just a file. */
if ((p == path) && (*p != '/'))
return 0;
t = strdupdelim (path, p);
+
/* Check whether the directory exists. */
if ((stat (t, &st) == 0))
{
if (S_ISDIR (st.st_mode))
{
- free (t);
+ xfree (t);
return 0;
}
else
res = make_directory (t);
if (res != 0)
logprintf (LOG_NOTQUIET, "%s: %s", t, strerror (errno));
- free (t);
+ xfree (t);
return res;
}
+\f
+/* Functions for constructing the file name out of URL components. */
-static int
-count_slashes (const char *s)
+/* A growable string structure, used by url_file_name and friends.
+ This should perhaps be moved to utils.c.
+
+ The idea is to have a convenient and efficient way to construct a
+ string by having various functions append data to it. Instead of
+ passing the obligatory BASEVAR, SIZEVAR and TAILPOS to all the
+ functions in questions, we pass the pointer to this struct. */
+
+struct growable {
+ char *base;
+ int size;
+ int tail;
+};
+
+/* Ensure that the string can accept APPEND_COUNT more characters past
+ the current TAIL position. If necessary, this will grow the string
+ and update its allocated size. If the string is already large
+ enough to take TAIL+APPEND_COUNT characters, this does nothing. */
+#define GROW(g, append_size) do { \
+ struct growable *G_ = g; \
+ DO_REALLOC (G_->base, G_->size, G_->tail + append_size, char); \
+} while (0)
+
+/* Return the tail position of the string. */
+#define TAIL(r) ((r)->base + (r)->tail)
+
+/* Move the tail position by APPEND_COUNT characters. */
+#define TAIL_INCR(r, append_count) ((r)->tail += append_count)
+
+/* Append the string STR to DEST. NOTICE: the string in DEST is not
+ terminated. */
+
+static void
+append_string (const char *str, struct growable *dest)
{
- int i = 0;
- while (*s)
- if (*s++ == '/')
- ++i;
- return i;
+ int l = strlen (str);
+ GROW (dest, l);
+ memcpy (TAIL (dest), str, l);
+ TAIL_INCR (dest, l);
}
-/* Return the path name of the URL-equivalent file name, with a
- remote-like structure of directories. */
-static char *
-mkstruct (const struct urlinfo *u)
+/* Append CH to DEST. For example, append_char (0, DEST)
+ zero-terminates DEST. */
+
+static void
+append_char (char ch, struct growable *dest)
+{
+ GROW (dest, 1);
+ *TAIL (dest) = ch;
+ TAIL_INCR (dest, 1);
+}
+
+enum {
+ filechr_not_unix = 1, /* unusable on Unix, / and \0 */
+ filechr_not_windows = 2, /* unusable on Windows, one of \|/<>?:*" */
+ filechr_control = 4 /* a control character, e.g. 0-31 */
+};
+
+#define FILE_CHAR_TEST(c, mask) (filechr_table[(unsigned char)(c)] & (mask))
+
+/* Shorthands for the table: */
+#define U filechr_not_unix
+#define W filechr_not_windows
+#define C filechr_control
+
+#define UW U|W
+#define UWC U|W|C
+
+/* Table of characters unsafe under various conditions (see above).
+
+ Arguably we could also claim `%' to be unsafe, since we use it as
+ the escape character. If we ever want to be able to reliably
+ translate file name back to URL, this would become important
+ crucial. Right now, it's better to be minimal in escaping. */
+
+const static unsigned char filechr_table[256] =
+{
+UWC, C, C, C, C, C, C, C, /* NUL SOH STX ETX EOT ENQ ACK BEL */
+ C, C, C, C, C, C, C, C, /* BS HT LF VT FF CR SO SI */
+ C, C, C, C, C, C, C, C, /* DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
+ C, C, C, C, C, C, C, C, /* CAN EM SUB ESC FS GS RS US */
+ 0, 0, W, 0, 0, 0, 0, 0, /* SP ! " # $ % & ' */
+ 0, 0, W, 0, 0, 0, 0, UW, /* ( ) * + , - . / */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 1 2 3 4 5 6 7 */
+ 0, 0, W, 0, W, 0, W, W, /* 8 9 : ; < = > ? */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* @ A B C D E F G */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* H I J K L M N O */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* P Q R S T U V W */
+ 0, 0, 0, 0, W, 0, 0, 0, /* X Y Z [ \ ] ^ _ */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* ` a b c d e f g */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* h i j k l m n o */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* p q r s t u v w */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* x y z { | } ~ DEL */
+
+ C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, /* 128-143 */
+ C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, /* 144-159 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+#undef U
+#undef W
+#undef C
+#undef UW
+#undef UWC
+
+/* FN_PORT_SEP is the separator between host and port in file names
+ for non-standard port numbers. On Unix this is normally ':', as in
+ "www.xemacs.org:4001/index.html". Under Windows, we set it to +
+ because Windows can't handle ':' in file names. */
+#define FN_PORT_SEP (opt.restrict_files_os != restrict_windows ? ':' : '+')
+
+/* FN_QUERY_SEP is the separator between the file name and the URL
+ query, normally '?'. Since Windows cannot handle '?' as part of
+ file name, we use '@' instead there. */
+#define FN_QUERY_SEP (opt.restrict_files_os != restrict_windows ? '?' : '@')
+
+/* Quote path element, characters in [b, e), as file name, and append
+ the quoted string to DEST. Each character is quoted as per
+ file_unsafe_char and the corresponding table.
+
+ If ESCAPED_P is non-zero, the path element is considered to be
+ URL-escaped and will be unescaped prior to inspection. */
+
+static void
+append_uri_pathel (const char *b, const char *e, int escaped_p,
+ struct growable *dest)
{
- char *host, *dir, *file, *res, *dirpref;
- int l;
+ const char *p;
+ int quoted, outlen;
- assert (u->dir != NULL);
- assert (u->host != NULL);
+ int mask;
+ if (opt.restrict_files_os == restrict_unix)
+ mask = filechr_not_unix;
+ else
+ mask = filechr_not_windows;
+ if (opt.restrict_files_ctrl)
+ mask |= filechr_control;
- if (opt.cut_dirs)
+ /* Copy [b, e) to PATHEL and URL-unescape it. */
+ if (escaped_p)
{
- char *ptr = u->dir + (*u->dir == '/');
- int slash_count = 1 + count_slashes (ptr);
- int cut = MINVAL (opt.cut_dirs, slash_count);
- for (; cut && *ptr; ptr++)
- if (*ptr == '/')
- --cut;
- STRDUP_ALLOCA (dir, ptr);
+ char *unescaped;
+ BOUNDED_TO_ALLOCA (b, e, unescaped);
+ url_unescape (unescaped);
+ b = unescaped;
+ e = unescaped + strlen (unescaped);
}
- else
- dir = u->dir + (*u->dir == '/');
- host = xstrdup (u->host);
- /* Check for the true name (or at least a consistent name for saving
- to directory) of HOST, reusing the hlist if possible. */
- if (opt.add_hostdir && !opt.simple_check)
+ /* Walk the PATHEL string and check how many characters we'll need
+ to add for file quoting. */
+ quoted = 0;
+ for (p = b; p < e; p++)
+ if (FILE_CHAR_TEST (*p, mask))
+ ++quoted;
+
+ /* e-b is the string length. Each quoted char means two additional
+ characters in the string, hence 2*quoted. */
+ outlen = (e - b) + (2 * quoted);
+ GROW (dest, outlen);
+
+ if (!quoted)
{
- char *nhost = realhost (host);
- free (host);
- host = nhost;
+ /* If there's nothing to quote, we don't need to go through the
+ string the second time. */
+ memcpy (TAIL (dest), b, outlen);
}
- /* Add dir_prefix and hostname (if required) to the beginning of
- dir. */
- if (opt.add_hostdir)
+ else
{
- if (!DOTP (opt.dir_prefix))
+ char *q = TAIL (dest);
+ for (p = b; p < e; p++)
{
- dirpref = (char *)alloca (strlen (opt.dir_prefix) + 1
- + strlen (host) + 1);
- sprintf (dirpref, "%s/%s", opt.dir_prefix, host);
+ if (!FILE_CHAR_TEST (*p, mask))
+ *q++ = *p;
+ else
+ {
+ unsigned char ch = *p;
+ *q++ = '%';
+ *q++ = XNUM_TO_DIGIT (ch >> 4);
+ *q++ = XNUM_TO_DIGIT (ch & 0xf);
+ }
}
- else
- STRDUP_ALLOCA (dirpref, host);
- }
- else /* not add_hostdir */
- {
- if (!DOTP (opt.dir_prefix))
- dirpref = opt.dir_prefix;
- else
- dirpref = "";
+ assert (q - TAIL (dest) == outlen);
}
- free (host);
+ TAIL_INCR (dest, outlen);
+}
+
+/* Append to DEST the directory structure that corresponds the
+ directory part of URL's path. For example, if the URL is
+ http://server/dir1/dir2/file, this appends "/dir1/dir2".
+
+ Each path element ("dir1" and "dir2" in the above example) is
+ examined, url-unescaped, and re-escaped as file name element.
+
+ Additionally, it cuts as many directories from the path as
+ specified by opt.cut_dirs. For example, if opt.cut_dirs is 1, it
+ will produce "bar" for the above example. For 2 or more, it will
+ produce "".
- /* If there is a prefix, prepend it. */
- if (*dirpref)
+ Each component of the path is quoted for use as file name. */
+
+static void
+append_dir_structure (const struct url *u, struct growable *dest)
+{
+ char *pathel, *next;
+ int cut = opt.cut_dirs;
+
+ /* Go through the path components, de-URL-quote them, and quote them
+ (if necessary) as file names. */
+
+ pathel = u->path;
+ for (; (next = strchr (pathel, '/')) != NULL; pathel = next + 1)
{
- char *newdir = (char *)alloca (strlen (dirpref) + 1 + strlen (dir) + 2);
- sprintf (newdir, "%s%s%s", dirpref, *dir == '/' ? "" : "/", dir);
- dir = newdir;
- }
- dir = xstrdup (dir);
- URL_CLEANSE (dir);
- l = strlen (dir);
- if (l && dir[l - 1] == '/')
- dir[l - 1] = '\0';
-
- if (!*u->file)
- file = "index.html";
- else
- file = u->file;
+ if (cut-- > 0)
+ continue;
+ if (pathel == next)
+ /* Ignore empty pathels. */
+ continue;
- /* Finally, construct the full name. */
- res = (char *)xmalloc (strlen (dir) + 1 + strlen (file) + 1);
- sprintf (res, "%s%s%s", dir, *dir ? "/" : "", file);
- free (dir);
- return res;
+ if (dest->tail)
+ append_char ('/', dest);
+ append_uri_pathel (pathel, next, 1, dest);
+ }
}
-/* Create a unique filename, corresponding to a given URL. Calls
- mkstruct if necessary. Does *not* actually create any directories. */
+/* Return a unique file name that matches the given URL as good as
+ possible. Does not create directories on the file system. */
+
char *
-url_filename (const struct urlinfo *u)
+url_file_name (const struct url *u)
{
- char *file, *name;
- int have_prefix = 0; /* whether we must prepend opt.dir_prefix */
+ struct growable fnres;
+ char *u_file, *u_query;
+ char *fname, *unique;
+
+ fnres.base = NULL;
+ fnres.size = 0;
+ fnres.tail = 0;
+
+ /* Start with the directory prefix, if specified. */
+ if (opt.dir_prefix)
+ append_string (opt.dir_prefix, &fnres);
+
+ /* If "dirstruct" is turned on (typically the case with -r), add
+ the host and port (unless those have been turned off) and
+ directory structure. */
if (opt.dirstruct)
{
- file = mkstruct (u);
- have_prefix = 1;
- }
- else
- {
- if (!*u->file)
- file = xstrdup ("index.html");
- else
- file = xstrdup (u->file);
+ if (opt.add_hostdir)
+ {
+ if (fnres.tail)
+ append_char ('/', &fnres);
+ append_string (u->host, &fnres);
+ if (u->port != scheme_default_port (u->scheme))
+ {
+ char portstr[24];
+ number_to_string (portstr, u->port);
+ append_char (FN_PORT_SEP, &fnres);
+ append_string (portstr, &fnres);
+ }
+ }
+
+ append_dir_structure (u, &fnres);
}
- if (!have_prefix)
+ /* Add the file name. */
+ if (fnres.tail)
+ append_char ('/', &fnres);
+ u_file = *u->file ? u->file : "index.html";
+ append_uri_pathel (u_file, u_file + strlen (u_file), 0, &fnres);
+
+ /* Append "?query" to the file name. */
+ u_query = u->query && *u->query ? u->query : NULL;
+ if (u_query)
{
- /* Check whether the prefix directory is something other than "."
- before prepending it. */
- if (!DOTP (opt.dir_prefix))
- {
- char *nfile = (char *)xmalloc (strlen (opt.dir_prefix)
- + 1 + strlen (file) + 1);
- sprintf (nfile, "%s/%s", opt.dir_prefix, file);
- free (file);
- file = nfile;
- }
+ append_char (FN_QUERY_SEP, &fnres);
+ append_uri_pathel (u_query, u_query + strlen (u_query), 1, &fnres);
}
- /* DOS-ish file systems don't like `%' signs in them; we change it
- to `@'. */
-#ifdef WINDOWS
- {
- char *p = file;
- for (p = file; *p; p++)
- if (*p == '%')
- *p = '@';
- }
-#endif /* WINDOWS */
+
+ /* Zero-terminate the file name. */
+ append_char ('\0', &fnres);
+
+ fname = fnres.base;
/* Check the cases in which the unique extensions are not used:
1) Clobbering is turned off (-nc).
4) Hierarchy is built.
The exception is the case when file does exist and is a
- directory (actually support for bad httpd-s). */
+ directory (see `mkalldirs' for explanation). */
+
if ((opt.noclobber || opt.always_rest || opt.timestamping || opt.dirstruct)
- && !(file_exists_p (file) && !file_non_directory_p (file)))
- return file;
+ && !(file_exists_p (fname) && !file_non_directory_p (fname)))
+ return fname;
- /* Find a unique name. */
- name = unique_name (file);
- free (file);
- return name;
+ unique = unique_name (fname, 1);
+ if (unique != fname)
+ xfree (fname);
+ return unique;
}
-/* Like strlen(), but allow the URL to be ended with '?'. */
+/* Return the length of URL's path. Path is considered to be
+ terminated by one of '?', ';', '#', or by the end of the
+ string. */
static int
-urlpath_length (const char *url)
+path_length (const char *url)
{
- const char *q = strchr (url, '?');
- if (q)
- return q - url;
- return strlen (url);
+ const char *q = strpbrk_or_eos (url, "?;#");
+ return q - url;
}
+/* Find the last occurrence of character C in the range [b, e), or
+ NULL, if none are present. This is equivalent to strrchr(b, c),
+ except that it accepts an END argument instead of requiring the
+ string to be zero-terminated. Why is there no memrchr()? */
static const char *
find_last_char (const char *b, const char *e, char c)
{
return e;
return NULL;
}
+\f
+/* Resolve "." and ".." elements of PATH by destructively modifying
+ PATH and return non-zero if PATH has been modified, zero otherwise.
-/* Construct an absolute URL, given a (possibly) relative one. This
- gets tricky if you want to cover all the "reasonable" cases, but
- I'm satisfied with the result. */
-static char *
-construct (const char *url, const char *sub, int subsize, int no_proto)
+ The algorithm is in spirit similar to the one described in rfc1808,
+ although implemented differently, in one pass. To recap, path
+ elements containing only "." are removed, and ".." is taken to mean
+ "back up one element". Single leading and trailing slashes are
+ preserved.
+
+ This function does not handle URL escapes explicitly. If you're
+ passing paths from URLs, make sure to unquote "%2e" and "%2E" to
+ ".", so that this function can find the dots. (Wget's URL parser
+ calls reencode_escapes, which see.)
+
+ For example, "a/b/c/./../d/.." will yield "a/b/". More exhaustive
+ test examples are provided below. If you change anything in this
+ function, run test_path_simplify to make sure you haven't broken a
+ test case. */
+
+static int
+path_simplify (char *path)
{
- char *constr;
+ char *h, *t, *end;
- if (no_proto)
+ /* Preserve the leading '/'. */
+ if (path[0] == '/')
+ ++path;
+
+ h = path; /* hare */
+ t = path; /* tortoise */
+ end = path + strlen (path);
+
+ while (h < end)
{
- const char *end = url + urlpath_length (url);
+ /* Hare should be at the beginning of a path element. */
- if (*sub != '/')
+ if (h[0] == '.' && (h[1] == '/' || h[1] == '\0'))
+ {
+ /* Ignore "./". */
+ h += 2;
+ }
+ else if (h[0] == '.' && h[1] == '.' && (h[2] == '/' || h[2] == '\0'))
{
- /* SUB is a relative URL: we need to replace everything
- after last slash (possibly empty) with SUB.
-
- So, if URL is "whatever/foo/bar", and SUB is "qux/xyzzy",
- our result should be "whatever/foo/qux/xyzzy". */
- int need_explicit_slash = 0;
- int span;
- const char *start_insert;
- const char *last_slash = find_last_char (url, end, '/'); /* the last slash. */
- if (!last_slash)
+ /* Handle "../" by retreating the tortoise by one path
+ element -- but not past beggining of PATH. */
+ if (t > path)
{
- /* No slash found at all. Append SUB to what we have,
- but we'll need a slash as a separator.
-
- Example: if url == "foo" and sub == "qux/xyzzy", then
- we cannot just append sub to url, because we'd get
- "fooqux/xyzzy", whereas what we want is
- "foo/qux/xyzzy".
-
- To make sure the / gets inserted, we set
- need_explicit_slash to 1. We also set start_insert
- to end + 1, so that the length calculations work out
- correctly for one more (slash) character. Accessing
- that character is fine, since it will be the
- delimiter, '\0' or '?'. */
- /* example: "foo?..." */
- /* ^ ('?' gets changed to '/') */
- start_insert = end + 1;
- need_explicit_slash = 1;
+ /* Move backwards until T hits the beginning of the
+ previous path element or the beginning of path. */
+ for (--t; t > path && t[-1] != '/'; t--)
+ ;
+ }
+ h += 3;
+ }
+ else if (*h == '/')
+ {
+ /* Ignore empty path elements. Supporting them well is hard
+ (where do you save "http://x.com///y.html"?), and they
+ don't bring any practical gain. Plus, they break our
+ filesystem-influenced assumptions: allowing them would
+ make "x/y//../z" simplify to "x/y/z", whereas most people
+ would expect "x/z". */
+ ++h;
+ }
+ else
+ {
+ /* A regular path element. If H hasn't advanced past T,
+ simply skip to the next path element. Otherwise, copy
+ the path element until the next slash. */
+ if (t == h)
+ {
+ /* Skip the path element, including the slash. */
+ while (h < end && *h != '/')
+ t++, h++;
+ if (h < end)
+ t++, h++;
}
else
{
- /* example: "whatever/foo/bar" */
- /* ^ */
- start_insert = last_slash + 1;
+ /* Copy the path element, including the final slash. */
+ while (h < end && *h != '/')
+ *t++ = *h++;
+ if (h < end)
+ *t++ = *h++;
}
-
- span = start_insert - url;
- constr = (char *)xmalloc (span + subsize + 1);
- if (span)
- memcpy (constr, url, span);
- if (need_explicit_slash)
- constr[span - 1] = '/';
- if (subsize)
- memcpy (constr + span, sub, subsize);
- constr[span + subsize] = '\0';
}
- else /* *sub == `/' */
- {
- /* SUB is an absolute path: we need to replace everything
- after (and including) the FIRST slash with SUB.
-
- So, if URL is "http://host/whatever/foo/bar", and SUB is
- "/qux/xyzzy", our result should be
- "http://host/qux/xyzzy". */
- int span;
- const char *slash, *start_insert;
- const char *pos = url;
- int seen_slash_slash = 0;
- /* We're looking for the first slash, but want to ignore
- double slash. */
- again:
- slash = memchr (pos, '/', end - pos);
- if (slash && !seen_slash_slash)
- if (*(slash + 1) == '/')
- {
- pos = slash + 2;
- seen_slash_slash = 1;
- goto again;
- }
-
- /* At this point, SLASH is the location of the first / after
- "//", or the first slash altogether. START_INSERT is the
- pointer to the location where SUB will be inserted. When
- examining the last two examples, keep in mind that SUB
- begins with '/'. */
-
- if (!slash && !seen_slash_slash)
- /* example: "foo" */
- /* ^ */
- start_insert = url;
- else if (!slash && seen_slash_slash)
- /* example: "http://foo" */
- /* ^ */
- start_insert = end;
- else if (slash && !seen_slash_slash)
- /* example: "foo/bar" */
- /* ^ */
- start_insert = url;
- else if (slash && seen_slash_slash)
- /* example: "http://something/" */
- /* ^ */
- start_insert = slash;
-
- span = start_insert - url;
- constr = (char *)xmalloc (span + subsize + 1);
- if (span)
- memcpy (constr, url, span);
- if (subsize)
- memcpy (constr + span, sub, subsize);
- constr[span + subsize] = '\0';
- }
- }
- else /* !no_proto */
- {
- constr = strdupdelim (sub, sub + subsize);
}
- return constr;
-}
-/* Like the function above, but with a saner caller interface. */
-char *
-url_concat (const char *base_url, const char *new_url)
-{
- return construct (base_url, new_url, strlen (new_url), !has_proto (new_url));
-}
-\f
-/* Optimize URL by host, destructively replacing u->host with realhost
- (u->host). Do this regardless of opt.simple_check. */
-void
-opt_url (struct urlinfo *u)
-{
- /* Find the "true" host. */
- char *host = realhost (u->host);
- free (u->host);
- u->host = host;
- assert (u->dir != NULL); /* the URL must have been parsed */
- /* Refresh the printed representation. */
- free (u->url);
- u->url = str_url (u, 0);
-}
+ if (t != h)
+ *t = '\0';
-/* This beautiful kludge is fortunately not needed, as I've made
- parse_dir do the (almost) right thing, so that a query can never
- become a part of directory. */
-#if 0
-/* Call path_simplify, but make sure that the part after the
- question-mark, if any, is not destroyed by path_simplify's
- "optimizations". */
-void
-path_simplify_with_kludge (char *path)
-{
- char *query = strchr (path, '?');
- if (query)
- /* path_simplify also works destructively, so we also have the
- license to write. */
- *query = '\0';
- path_simplify (path);
- if (query)
- {
- char *newend = path + strlen (path);
- *query = '?';
- if (newend != query)
- memmove (newend, query, strlen (query) + 1);
- }
+ return t != h;
}
-#endif
\f
-/* Returns proxy host address, in accordance with PROTO. */
+/* Merge BASE with LINK and return the resulting URI.
+
+ Either of the URIs may be absolute or relative, complete with the
+ host name, or path only. This tries to reasonably handle all
+ foreseeable cases. It only employs minimal URL parsing, without
+ knowledge of the specifics of schemes.
+
+ Perhaps this function should call path_simplify so that the callers
+ don't have to call url_parse unconditionally. */
+
char *
-getproxy (uerr_t proto)
+uri_merge (const char *base, const char *link)
{
- if (proto == URLHTTP)
- return opt.http_proxy ? opt.http_proxy : getenv ("http_proxy");
- else if (proto == URLFTP)
- return opt.ftp_proxy ? opt.ftp_proxy : getenv ("ftp_proxy");
- else
- return NULL;
-}
+ int linklength;
+ const char *end;
+ char *merge;
-/* Should a host be accessed through proxy, concerning no_proxy? */
-int
-no_proxy_match (const char *host, const char **no_proxy)
-{
- if (!no_proxy)
- return 1;
- else
- return !sufmatch (no_proxy, host);
-}
-\f
-/* Change the links in an HTML document. Accepts a structure that
- defines the positions of all the links. */
-void
-convert_links (const char *file, urlpos *l)
-{
- FILE *fp;
- char *buf, *p, *p2;
- downloaded_file_t downloaded_file_return;
- long size;
-
- logprintf (LOG_VERBOSE, _("Converting %s... "), file);
- /* Read from the file.... */
- fp = fopen (file, "rb");
- if (!fp)
+ if (url_has_scheme (link))
+ return xstrdup (link);
+
+ /* We may not examine BASE past END. */
+ end = base + path_length (base);
+ linklength = strlen (link);
+
+ if (!*link)
{
- logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
- file, strerror (errno));
- return;
+ /* Empty LINK points back to BASE, query string and all. */
+ return xstrdup (base);
}
- /* ...to a buffer. */
- load_file (fp, &buf, &size);
- fclose (fp);
-
- downloaded_file_return = downloaded_file(CHECK_FOR_FILE, file);
+ else if (*link == '?')
+ {
+ /* LINK points to the same location, but changes the query
+ string. Examples: */
+ /* uri_merge("path", "?new") -> "path?new" */
+ /* uri_merge("path?foo", "?new") -> "path?new" */
+ /* uri_merge("path?foo#bar", "?new") -> "path?new" */
+ /* uri_merge("path#foo", "?new") -> "path?new" */
+ int baselength = end - base;
+ merge = xmalloc (baselength + linklength + 1);
+ memcpy (merge, base, baselength);
+ memcpy (merge + baselength, link, linklength);
+ merge[baselength + linklength] = '\0';
+ }
+ else if (*link == '#')
+ {
+ /* uri_merge("path", "#new") -> "path#new" */
+ /* uri_merge("path#foo", "#new") -> "path#new" */
+ /* uri_merge("path?foo", "#new") -> "path?foo#new" */
+ /* uri_merge("path?foo#bar", "#new") -> "path?foo#new" */
+ int baselength;
+ const char *end1 = strchr (base, '#');
+ if (!end1)
+ end1 = base + strlen (base);
+ baselength = end1 - base;
+ merge = xmalloc (baselength + linklength + 1);
+ memcpy (merge, base, baselength);
+ memcpy (merge + baselength, link, linklength);
+ merge[baselength + linklength] = '\0';
+ }
+ else if (*link == '/' && *(link + 1) == '/')
+ {
+ /* LINK begins with "//" and so is a net path: we need to
+ replace everything after (and including) the double slash
+ with LINK. */
+
+ /* uri_merge("foo", "//new/bar") -> "//new/bar" */
+ /* uri_merge("//old/foo", "//new/bar") -> "//new/bar" */
+ /* uri_merge("http://old/foo", "//new/bar") -> "http://new/bar" */
+
+ int span;
+ const char *slash;
+ const char *start_insert;
+
+ /* Look for first slash. */
+ slash = memchr (base, '/', end - base);
+ /* If found slash and it is a double slash, then replace
+ from this point, else default to replacing from the
+ beginning. */
+ if (slash && *(slash + 1) == '/')
+ start_insert = slash;
+ else
+ start_insert = base;
+
+ span = start_insert - base;
+ merge = (char *)xmalloc (span + linklength + 1);
+ if (span)
+ memcpy (merge, base, span);
+ memcpy (merge + span, link, linklength);
+ merge[span + linklength] = '\0';
+ }
+ else if (*link == '/')
+ {
+ /* LINK is an absolute path: we need to replace everything
+ after (and including) the FIRST slash with LINK.
+
+ So, if BASE is "http://host/whatever/foo/bar", and LINK is
+ "/qux/xyzzy", our result should be
+ "http://host/qux/xyzzy". */
+ int span;
+ const char *slash;
+ const char *start_insert = NULL; /* for gcc to shut up. */
+ const char *pos = base;
+ int seen_slash_slash = 0;
+ /* We're looking for the first slash, but want to ignore
+ double slash. */
+ again:
+ slash = memchr (pos, '/', end - pos);
+ if (slash && !seen_slash_slash)
+ if (*(slash + 1) == '/')
+ {
+ pos = slash + 2;
+ seen_slash_slash = 1;
+ goto again;
+ }
- if (opt.backup_converted && downloaded_file_return)
- /* Rather than just writing over the original .html file with the converted
- version, save the former to *.orig. Note we only do this for files we've
- _successfully_ downloaded, so we don't clobber .orig files sitting around
- from previous invocations. */
+ /* At this point, SLASH is the location of the first / after
+ "//", or the first slash altogether. START_INSERT is the
+ pointer to the location where LINK will be inserted. When
+ examining the last two examples, keep in mind that LINK
+ begins with '/'. */
+
+ if (!slash && !seen_slash_slash)
+ /* example: "foo" */
+ /* ^ */
+ start_insert = base;
+ else if (!slash && seen_slash_slash)
+ /* example: "http://foo" */
+ /* ^ */
+ start_insert = end;
+ else if (slash && !seen_slash_slash)
+ /* example: "foo/bar" */
+ /* ^ */
+ start_insert = base;
+ else if (slash && seen_slash_slash)
+ /* example: "http://something/" */
+ /* ^ */
+ start_insert = slash;
+
+ span = start_insert - base;
+ merge = (char *)xmalloc (span + linklength + 1);
+ if (span)
+ memcpy (merge, base, span);
+ memcpy (merge + span, link, linklength);
+ merge[span + linklength] = '\0';
+ }
+ else
{
- /* Construct the backup filename as the original name plus ".orig". */
- size_t filename_len = strlen(file);
- char* filename_plus_orig_suffix;
- boolean already_wrote_backup_file = FALSE;
- slist* converted_file_ptr;
- static slist* converted_files = NULL;
-
- if (downloaded_file_return == FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED)
+ /* LINK is a relative URL: we need to replace everything
+ after last slash (possibly empty) with LINK.
+
+ So, if BASE is "whatever/foo/bar", and LINK is "qux/xyzzy",
+ our result should be "whatever/foo/qux/xyzzy". */
+ int need_explicit_slash = 0;
+ int span;
+ const char *start_insert;
+ const char *last_slash = find_last_char (base, end, '/');
+ if (!last_slash)
+ {
+ /* No slash found at all. Append LINK to what we have,
+ but we'll need a slash as a separator.
+
+ Example: if base == "foo" and link == "qux/xyzzy", then
+ we cannot just append link to base, because we'd get
+ "fooqux/xyzzy", whereas what we want is
+ "foo/qux/xyzzy".
+
+ To make sure the / gets inserted, we set
+ need_explicit_slash to 1. We also set start_insert
+ to end + 1, so that the length calculations work out
+ correctly for one more (slash) character. Accessing
+ that character is fine, since it will be the
+ delimiter, '\0' or '?'. */
+ /* example: "foo?..." */
+ /* ^ ('?' gets changed to '/') */
+ start_insert = end + 1;
+ need_explicit_slash = 1;
+ }
+ else if (last_slash && last_slash >= base + 2
+ && last_slash[-2] == ':' && last_slash[-1] == '/')
{
- /* Just write "orig" over "html". We need to do it this way because
- when we're checking to see if we've downloaded the file before (to
- see if we can skip downloading it), we don't know if it's a
- text/html file. Therefore we don't know yet at that stage that -E
- is going to cause us to tack on ".html", so we need to compare
- vs. the original URL plus ".orig", not the original URL plus
- ".html.orig". */
- filename_plus_orig_suffix = xmalloc(filename_len + 1);
- strcpy(filename_plus_orig_suffix, file);
- strcpy((filename_plus_orig_suffix + filename_len) - 4, "orig");
+ /* example: http://host" */
+ /* ^ */
+ start_insert = end + 1;
+ need_explicit_slash = 1;
}
- else /* downloaded_file_return == FILE_DOWNLOADED_NORMALLY */
+ else
{
- /* Append ".orig" to the name. */
- filename_plus_orig_suffix = xmalloc(filename_len + sizeof(".orig"));
- strcpy(filename_plus_orig_suffix, file);
- strcpy(filename_plus_orig_suffix + filename_len, ".orig");
+ /* example: "whatever/foo/bar" */
+ /* ^ */
+ start_insert = last_slash + 1;
}
- /* We can get called twice on the same URL thanks to the
- convert_all_links() call in main(). If we write the .orig file each
- time in such a case, it'll end up containing the first-pass conversion,
- not the original file. So, see if we've already been called on this
- file. */
- converted_file_ptr = converted_files;
- while (converted_file_ptr != NULL)
- if (strcmp(converted_file_ptr->string, file) == 0)
- {
- already_wrote_backup_file = TRUE;
- break;
- }
- else
- converted_file_ptr = converted_file_ptr->next;
+ span = start_insert - base;
+ merge = (char *)xmalloc (span + linklength + 1);
+ if (span)
+ memcpy (merge, base, span);
+ if (need_explicit_slash)
+ merge[span - 1] = '/';
+ memcpy (merge + span, link, linklength);
+ merge[span + linklength] = '\0';
+ }
+
+ return merge;
+}
+\f
+#define APPEND(p, s) do { \
+ int len = strlen (s); \
+ memcpy (p, s, len); \
+ p += len; \
+} while (0)
+
+/* Use this instead of password when the actual password is supposed
+ to be hidden. We intentionally use a generic string without giving
+ away the number of characters in the password, like previous
+ versions did. */
+#define HIDDEN_PASSWORD "*password*"
+
+/* Recreate the URL string from the data in URL.
+
+ If HIDE is non-zero (as it is when we're calling this on a URL we
+ plan to print, but not when calling it to canonicalize a URL for
+ use within the program), password will be hidden. Unsafe
+ characters in the URL will be quoted. */
+
+char *
+url_string (const struct url *url, int hide_password)
+{
+ int size;
+ char *result, *p;
+ char *quoted_user = NULL, *quoted_passwd = NULL;
+
+ int scheme_port = supported_schemes[url->scheme].default_port;
+ char *scheme_str = supported_schemes[url->scheme].leading_string;
+ int fplen = full_path_length (url);
+
+ int brackets_around_host = 0;
+
+ assert (scheme_str != NULL);
- if (!already_wrote_backup_file)
+ /* Make sure the user name and password are quoted. */
+ if (url->user)
+ {
+ quoted_user = url_escape_allow_passthrough (url->user);
+ if (url->passwd)
{
- /* Rename <file> to <file>.orig before former gets written over. */
- if (rename(file, filename_plus_orig_suffix) != 0)
- logprintf (LOG_NOTQUIET, _("Cannot back up %s as %s: %s\n"),
- file, filename_plus_orig_suffix, strerror (errno));
-
- /* Remember that we've already written a .orig backup for this file.
- Note that we never free this memory since we need it till the
- convert_all_links() call, which is one of the last things the
- program does before terminating. BTW, I'm not sure if it would be
- safe to just set 'converted_file_ptr->string' to 'file' below,
- rather than making a copy of the string... Another note is that I
- thought I could just add a field to the urlpos structure saying
- that we'd written a .orig file for this URL, but that didn't work,
- so I had to make this separate list. */
- converted_file_ptr = xmalloc(sizeof(*converted_file_ptr));
- converted_file_ptr->string = xstrdup(file); /* die on out-of-mem. */
- converted_file_ptr->next = converted_files;
- converted_files = converted_file_ptr;
+ if (hide_password)
+ quoted_passwd = HIDDEN_PASSWORD;
+ else
+ quoted_passwd = url_escape_allow_passthrough (url->passwd);
}
-
- free(filename_plus_orig_suffix);
}
- /* Now open the file for writing. */
- fp = fopen (file, "wb");
- if (!fp)
+
+ if (strchr (url->host, ':'))
+ brackets_around_host = 1;
+
+ size = (strlen (scheme_str)
+ + strlen (url->host)
+ + (brackets_around_host ? 2 : 0)
+ + fplen
+ + 1);
+ if (url->port != scheme_port)
+ size += 1 + numdigit (url->port);
+ if (quoted_user)
{
- logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
- file, strerror (errno));
- free (buf);
- return;
+ size += 1 + strlen (quoted_user);
+ if (quoted_passwd)
+ size += 1 + strlen (quoted_passwd);
}
- /* Presumably we have to loop through multiple URLs here (even though we're
- only talking about a single local file) because of the -O option. */
- for (p = buf; l; l = l->next)
+
+ p = result = xmalloc (size);
+
+ APPEND (p, scheme_str);
+ if (quoted_user)
{
- if (l->pos >= size)
- {
- DEBUGP (("Something strange is going on. Please investigate."));
- break;
- }
- /* If the URL already is relative or it is not to be converted
- for some other reason (e.g. because of not having been
- downloaded in the first place), skip it. */
- if ((l->flags & URELATIVE) || !(l->flags & UABS2REL))
- {
- DEBUGP (("Skipping %s at position %d (flags %d).\n", l->url,
- l->pos, l->flags));
- continue;
- }
- /* Else, reach the position of the offending URL, echoing
- everything up to it to the outfile. */
- for (p2 = buf + l->pos; p < p2; p++)
- putc (*p, fp);
- if (l->flags & UABS2REL)
- /* Convert absolute URL to relative. */
+ APPEND (p, quoted_user);
+ if (quoted_passwd)
{
- char *newname = construct_relative (file, l->local_name);
- fprintf (fp, "%s", newname);
- DEBUGP (("ABS2REL: %s to %s at position %d in %s.\n",
- l->url, newname, l->pos, file));
- free (newname);
+ *p++ = ':';
+ APPEND (p, quoted_passwd);
}
- p += l->size;
+ *p++ = '@';
}
- /* Output the rest of the file. */
- if (p - buf < size)
+
+ if (brackets_around_host)
+ *p++ = '[';
+ APPEND (p, url->host);
+ if (brackets_around_host)
+ *p++ = ']';
+ if (url->port != scheme_port)
{
- for (p2 = buf + size; p < p2; p++)
- putc (*p, fp);
+ *p++ = ':';
+ p = number_to_string (p, url->port);
}
- fclose (fp);
- free (buf);
- logputs (LOG_VERBOSE, _("done.\n"));
-}
-/* Construct and return a malloced copy of the relative link from two
- pieces of information: local name S1 of the referring file and
- local name S2 of the referred file.
+ full_path_write (url, p);
+ p += fplen;
+ *p++ = '\0';
- So, if S1 is "jagor.srce.hr/index.html" and S2 is
- "jagor.srce.hr/images/news.gif", the function will return
- "images/news.gif".
+ assert (p - result == size);
- Alternately, if S1 is "fly.cc.fer.hr/ioccc/index.html", and S2 is
- "fly.cc.fer.hr/images/fly.gif", the function will return
- "../images/fly.gif".
+ if (quoted_user && quoted_user != url->user)
+ xfree (quoted_user);
+ if (quoted_passwd && !hide_password
+ && quoted_passwd != url->passwd)
+ xfree (quoted_passwd);
- Caveats: S1 should not begin with `/', unless S2 also begins with
- '/'. S1 should not contain things like ".." and such --
- construct_relative ("fly/ioccc/../index.html",
- "fly/images/fly.gif") will fail. (A workaround is to call
- something like path_simplify() on S1). */
-static char *
-construct_relative (const char *s1, const char *s2)
-{
- int i, cnt, sepdirs1;
- char *res;
-
- if (*s2 == '/')
- return xstrdup (s2);
- /* S1 should *not* be absolute, if S2 wasn't. */
- assert (*s1 != '/');
- i = cnt = 0;
- /* Skip the directories common to both strings. */
- while (1)
- {
- while (s1[i] && s2[i]
- && (s1[i] == s2[i])
- && (s1[i] != '/')
- && (s2[i] != '/'))
- ++i;
- if (s1[i] == '/' && s2[i] == '/')
- cnt = ++i;
- else
- break;
- }
- for (sepdirs1 = 0; s1[i]; i++)
- if (s1[i] == '/')
- ++sepdirs1;
- /* Now, construct the file as of:
- - ../ repeated sepdirs1 time
- - all the non-mutual directories of S2. */
- res = (char *)xmalloc (3 * sepdirs1 + strlen (s2 + cnt) + 1);
- for (i = 0; i < sepdirs1; i++)
- memcpy (res + 3 * i, "../", 3);
- strcpy (res + 3 * i, s2 + cnt);
- return res;
+ return result;
}
\f
-/* Add URL to the head of the list L. */
-urlpos *
-add_url (urlpos *l, const char *url, const char *file)
+/* Return non-zero if scheme a is similar to scheme b.
+
+ Schemes are similar if they are equal. If SSL is supported, schemes
+ are also similar if one is http (SCHEME_HTTP) and the other is https
+ (SCHEME_HTTPS). */
+int
+schemes_are_similar_p (enum url_scheme a, enum url_scheme b)
{
- urlpos *t;
-
- t = (urlpos *)xmalloc (sizeof (urlpos));
- memset (t, 0, sizeof (*t));
- t->url = xstrdup (url);
- t->local_name = xstrdup (file);
- t->next = l;
- return t;
+ if (a == b)
+ return 1;
+#ifdef HAVE_SSL
+ if ((a == SCHEME_HTTP && b == SCHEME_HTTPS)
+ || (a == SCHEME_HTTPS && b == SCHEME_HTTP))
+ return 1;
+#endif
+ return 0;
}
+\f
+#if 0
+/* Debugging and testing support for path_simplify. */
+/* Debug: run path_simplify on PATH and return the result in a new
+ string. Useful for calling from the debugger. */
+static char *
+ps (char *path)
+{
+ char *copy = xstrdup (path);
+ path_simplify (copy);
+ return copy;
+}
-/* Remembers which files have been downloaded. In the standard case, should be
- called with mode == FILE_DOWNLOADED_NORMALLY for each file we actually
- download successfully (i.e. not for ones we have failures on or that we skip
- due to -N).
+static void
+run_test (char *test, char *expected_result, int expected_change)
+{
+ char *test_copy = xstrdup (test);
+ int modified = path_simplify (test_copy);
- When we've downloaded a file and tacked on a ".html" extension due to -E,
- call this function with FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED rather than
- FILE_DOWNLOADED_NORMALLY.
+ if (0 != strcmp (test_copy, expected_result))
+ {
+ printf ("Failed path_simplify(\"%s\"): expected \"%s\", got \"%s\".\n",
+ test, expected_result, test_copy);
+ }
+ if (modified != expected_change)
+ {
+ if (expected_change == 1)
+ printf ("Expected no modification with path_simplify(\"%s\").\n",
+ test);
+ else
+ printf ("Expected modification with path_simplify(\"%s\").\n",
+ test);
+ }
+ xfree (test_copy);
+}
- If you just want to check if a file has been previously added without adding
- it, call with mode == CHECK_FOR_FILE. Please be sure to call this function
- with local filenames, not remote URLs. */
-downloaded_file_t
-downloaded_file (downloaded_file_t mode, const char* file)
+static void
+test_path_simplify (void)
{
- typedef struct _downloaded_file_list
- {
- char* file;
- downloaded_file_t download_type;
- struct _downloaded_file_list* next;
- } downloaded_file_list;
-
- boolean found_file = FALSE;
- static downloaded_file_list* downloaded_files = NULL;
- downloaded_file_list* rover = downloaded_files;
+ static struct {
+ char *test, *result;
+ int should_modify;
+ } tests[] = {
+ { "", "", 0 },
+ { ".", "", 1 },
+ { "..", "", 1 },
+ { "foo", "foo", 0 },
+ { "foo/bar", "foo/bar", 0 },
+ { "foo///bar", "foo/bar", 1 },
+ { "foo/.", "foo/", 1 },
+ { "foo/./", "foo/", 1 },
+ { "foo./", "foo./", 0 },
+ { "foo/../bar", "bar", 1 },
+ { "foo/../bar/", "bar/", 1 },
+ { "foo/bar/..", "foo/", 1 },
+ { "foo/bar/../x", "foo/x", 1 },
+ { "foo/bar/../x/", "foo/x/", 1 },
+ { "foo/..", "", 1 },
+ { "foo/../..", "", 1 },
+ { "a/b/../../c", "c", 1 },
+ { "./a/../b", "b", 1 }
+ };
+ int i;
- while (rover != NULL)
- if (strcmp(rover->file, file) == 0)
- {
- found_file = TRUE;
- break;
- }
- else
- rover = rover->next;
+ for (i = 0; i < countof (tests); i++)
+ {
+ char *test = tests[i].test;
+ char *expected_result = tests[i].result;
+ int expected_change = tests[i].should_modify;
+ run_test (test, expected_result, expected_change);
+ }
- if (found_file)
- return rover->download_type; /* file had already been downloaded */
- else
+ /* Now run all the tests with a leading slash before the test case,
+ to prove that the slash is being preserved. */
+ for (i = 0; i < countof (tests); i++)
{
- if (mode != CHECK_FOR_FILE)
- {
- rover = xmalloc(sizeof(*rover));
- rover->file = xstrdup(file); /* use xstrdup() so die on out-of-mem. */
- rover->download_type = mode;
- rover->next = downloaded_files;
- downloaded_files = rover;
- }
+ char *test, *expected_result;
+ int expected_change = tests[i].should_modify;
- return FILE_NOT_ALREADY_DOWNLOADED;
+ test = xmalloc (1 + strlen (tests[i].test) + 1);
+ sprintf (test, "/%s", tests[i].test);
+
+ expected_result = xmalloc (1 + strlen (tests[i].result) + 1);
+ sprintf (expected_result, "/%s", tests[i].result);
+
+ run_test (test, expected_result, expected_change);
+
+ xfree (test);
+ xfree (expected_result);
}
}
+#endif