2 Copyright (C) 2005 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or (at
9 your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 In addition, as a special exception, the Free Software Foundation
21 gives permission to link the code of its release of Wget with the
22 OpenSSL project's "OpenSSL" library (or with modified versions of it
23 that use the same license as the "OpenSSL" library), and distribute
24 the linked executables. You must obey the GNU General Public License
25 in all respects for all of the code used other than "OpenSSL". If you
26 modify this file, you may extend this exception to your version of the
27 file, but you are not obligated to do so. If you do not wish to do
28 so, delete this exception statement from your version. */
39 #include <sys/types.h>
49 #include "host.h" /* for is_valid_ipv6_address */
58 const char *leading_string;
63 /* Supported schemes: */
64 static struct scheme_data supported_schemes[] =
66 { "http", "http://", DEFAULT_HTTP_PORT, 1 },
68 { "https", "https://", DEFAULT_HTTPS_PORT, 1 },
70 { "ftp", "ftp://", DEFAULT_FTP_PORT, 1 },
76 /* Forward declarations: */
78 static int path_simplify PARAMS ((char *));
80 /* Support for escaping and unescaping of URL strings. */
82 /* Table of "reserved" and "unsafe" characters. Those terms are
83 rfc1738-speak, as such largely obsoleted by rfc2396 and later
84 specs, but the general idea remains.
86 A reserved character is the one that you can't decode without
87 changing the meaning of the URL. For example, you can't decode
88 "/foo/%2f/bar" into "/foo///bar" because the number and contents of
89 path components is different. Non-reserved characters can be
90 changed, so "/foo/%78/bar" is safe to change to "/foo/x/bar". The
91 unsafe characters are loosely based on rfc1738, plus "$" and ",",
92 as recommended by rfc2396, and minus "~", which is very frequently
93 used (and sometimes unrecognized as %7E by broken servers).
95 An unsafe character is the one that should be encoded when URLs are
96 placed in foreign environments. E.g. space and newline are unsafe
97 in HTTP contexts because HTTP uses them as separator and line
98 terminator, so they must be encoded to %20 and %0A respectively.
99 "*" is unsafe in shell context, etc.
101 We determine whether a character is unsafe through static table
102 lookup. This code assumes ASCII character set and 8-bit chars. */
105 /* rfc1738 reserved chars + "$" and ",". */
108 /* rfc1738 unsafe chars, plus non-printables. */
112 #define urlchr_test(c, mask) (urlchr_table[(unsigned char)(c)] & (mask))
113 #define URL_RESERVED_CHAR(c) urlchr_test(c, urlchr_reserved)
114 #define URL_UNSAFE_CHAR(c) urlchr_test(c, urlchr_unsafe)
116 /* Shorthands for the table: */
117 #define R urlchr_reserved
118 #define U urlchr_unsafe
121 static const unsigned char urlchr_table[256] =
123 U, U, U, U, U, U, U, U, /* NUL SOH STX ETX EOT ENQ ACK BEL */
124 U, U, U, U, U, U, U, U, /* BS HT LF VT FF CR SO SI */
125 U, U, U, U, U, U, U, U, /* DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
126 U, U, U, U, U, U, U, U, /* CAN EM SUB ESC FS GS RS US */
127 U, 0, U, RU, R, U, R, 0, /* SP ! " # $ % & ' */
128 0, 0, 0, R, R, 0, 0, R, /* ( ) * + , - . / */
129 0, 0, 0, 0, 0, 0, 0, 0, /* 0 1 2 3 4 5 6 7 */
130 0, 0, RU, R, U, R, U, R, /* 8 9 : ; < = > ? */
131 RU, 0, 0, 0, 0, 0, 0, 0, /* @ A B C D E F G */
132 0, 0, 0, 0, 0, 0, 0, 0, /* H I J K L M N O */
133 0, 0, 0, 0, 0, 0, 0, 0, /* P Q R S T U V W */
134 0, 0, 0, RU, U, RU, U, 0, /* X Y Z [ \ ] ^ _ */
135 U, 0, 0, 0, 0, 0, 0, 0, /* ` a b c d e f g */
136 0, 0, 0, 0, 0, 0, 0, 0, /* h i j k l m n o */
137 0, 0, 0, 0, 0, 0, 0, 0, /* p q r s t u v w */
138 0, 0, 0, U, U, U, 0, U, /* x y z { | } ~ DEL */
140 U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
141 U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
142 U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
143 U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
145 U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
146 U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
147 U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
148 U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
154 /* URL-unescape the string S.
156 This is done by transforming the sequences "%HH" to the character
157 represented by the hexadecimal digits HH. If % is not followed by
158 two hexadecimal digits, it is inserted literally.
160 The transformation is done in place. If you need the original
161 string intact, make a copy before calling this function. */
164 url_unescape (char *s)
166 char *t = s; /* t - tortoise */
167 char *h = s; /* h - hare */
179 /* Do nothing if '%' is not followed by two hex digits. */
180 if (!h[1] || !h[2] || !(ISXDIGIT (h[1]) && ISXDIGIT (h[2])))
182 c = X2DIGITS_TO_NUM (h[1], h[2]);
183 /* Don't unescape %00 because there is no way to insert it
184 into a C string without effectively truncating it. */
194 /* The core of url_escape_* functions. Escapes the characters that
195 match the provided mask in urlchr_table.
197 If ALLOW_PASSTHROUGH is non-zero, a string with no unsafe chars
198 will be returned unchanged. If ALLOW_PASSTHROUGH is zero, a
199 freshly allocated string will be returned in all cases. */
202 url_escape_1 (const char *s, unsigned char mask, int allow_passthrough)
209 for (p1 = s; *p1; p1++)
210 if (urlchr_test (*p1, mask))
211 addition += 2; /* Two more characters (hex digits) */
214 return allow_passthrough ? (char *)s : xstrdup (s);
216 newlen = (p1 - s) + addition;
217 newstr = (char *)xmalloc (newlen + 1);
223 /* Quote the characters that match the test mask. */
224 if (urlchr_test (*p1, mask))
226 unsigned char c = *p1++;
228 *p2++ = XNUM_TO_DIGIT (c >> 4);
229 *p2++ = XNUM_TO_DIGIT (c & 0xf);
234 assert (p2 - newstr == newlen);
240 /* URL-escape the unsafe characters (see urlchr_table) in a given
241 string, returning a freshly allocated string. */
244 url_escape (const char *s)
246 return url_escape_1 (s, urlchr_unsafe, 0);
249 /* URL-escape the unsafe characters (see urlchr_table) in a given
250 string. If no characters are unsafe, S is returned. */
253 url_escape_allow_passthrough (const char *s)
255 return url_escape_1 (s, urlchr_unsafe, 1);
258 /* Decide whether the char at position P needs to be encoded. (It is
259 not enough to pass a single char *P because the function may need
260 to inspect the surrounding context.)
262 Return 1 if the char should be escaped as %XX, 0 otherwise. */
265 char_needs_escaping (const char *p)
269 if (ISXDIGIT (*(p + 1)) && ISXDIGIT (*(p + 2)))
272 /* Garbled %.. sequence: encode `%'. */
275 else if (URL_UNSAFE_CHAR (*p) && !URL_RESERVED_CHAR (*p))
281 /* Translate a %-escaped (but possibly non-conformant) input string S
282 into a %-escaped (and conformant) output string. If no characters
283 are encoded or decoded, return the same string S; otherwise, return
284 a freshly allocated string with the new contents.
286 After a URL has been run through this function, the protocols that
287 use `%' as the quote character can use the resulting string as-is,
288 while those that don't can use url_unescape to get to the intended
289 data. This function is stable: once the input is transformed,
290 further transformations of the result yield the same output.
292 Let's discuss why this function is needed.
294 Imagine Wget is asked to retrieve `http://abc.xyz/abc def'. Since
295 a raw space character would mess up the HTTP request, it needs to
296 be quoted, like this:
298 GET /abc%20def HTTP/1.0
300 It would appear that the unsafe chars need to be quoted, for
301 example with url_escape. But what if we're requested to download
302 `abc%20def'? url_escape transforms "%" to "%25", which would leave
303 us with `abc%2520def'. This is incorrect -- since %-escapes are
304 part of URL syntax, "%20" is the correct way to denote a literal
305 space on the Wget command line. This leads to the conclusion that
306 in that case Wget should not call url_escape, but leave the `%20'
307 as is. This is clearly contradictory, but it only gets worse.
309 What if the requested URI is `abc%20 def'? If we call url_escape,
310 we end up with `/abc%2520%20def', which is almost certainly not
311 intended. If we don't call url_escape, we are left with the
312 embedded space and cannot complete the request. What the user
313 meant was for Wget to request `/abc%20%20def', and this is where
314 reencode_escapes kicks in.
316 Wget used to solve this by first decoding %-quotes, and then
317 encoding all the "unsafe" characters found in the resulting string.
318 This was wrong because it didn't preserve certain URL special
319 (reserved) characters. For instance, URI containing "a%2B+b" (0x2b
320 == '+') would get translated to "a%2B%2Bb" or "a++b" depending on
321 whether we considered `+' reserved (it is). One of these results
322 is inevitable because by the second step we would lose information
323 on whether the `+' was originally encoded or not. Both results
324 were wrong because in CGI parameters + means space, while %2B means
325 literal plus. reencode_escapes correctly translates the above to
326 "a%2B+b", i.e. returns the original string.
328 This function uses a modified version of the algorithm originally
329 proposed by Anon Sricharoenchai:
331 * Encode all "unsafe" characters, except those that are also
332 "reserved", to %XX. See urlchr_table for which characters are
335 * Encode the "%" characters not followed by two hex digits to
338 * Pass through all other characters and %XX escapes as-is. (Up to
339 Wget 1.10 this decoded %XX escapes corresponding to "safe"
340 characters, but that was obtrusive and broke some servers.)
344 "http://abc.xyz/%20%3F%%36%31%25aa% a?a=%61+a%2Ba&b=b%26c%3Dc"
346 "http://abc.xyz/%20%3F%25%36%31%25aa%25%20a?a=%61+a%2Ba&b=b%26c%3Dc"
350 "foo bar" -> "foo%20bar"
351 "foo%20bar" -> "foo%20bar"
352 "foo %20bar" -> "foo%20%20bar"
353 "foo%%20bar" -> "foo%25%20bar" (0x25 == '%')
354 "foo%25%20bar" -> "foo%25%20bar"
355 "foo%2%20bar" -> "foo%252%20bar"
356 "foo+bar" -> "foo+bar" (plus is reserved!)
357 "foo%2b+bar" -> "foo%2b+bar" */
360 reencode_escapes (const char *s)
366 int encode_count = 0;
368 /* First pass: inspect the string to see if there's anything to do,
369 and to calculate the new length. */
370 for (p1 = s; *p1; p1++)
371 if (char_needs_escaping (p1))
375 /* The string is good as it is. */
376 return (char *) s; /* C const model sucks. */
379 /* Each encoding adds two characters (hex digits). */
380 newlen = oldlen + 2 * encode_count;
381 newstr = xmalloc (newlen + 1);
383 /* Second pass: copy the string to the destination address, encoding
384 chars when needed. */
389 if (char_needs_escaping (p1))
391 unsigned char c = *p1++;
393 *p2++ = XNUM_TO_DIGIT (c >> 4);
394 *p2++ = XNUM_TO_DIGIT (c & 0xf);
400 assert (p2 - newstr == newlen);
404 /* Returns the scheme type if the scheme is supported, or
405 SCHEME_INVALID if not. */
408 url_scheme (const char *url)
412 for (i = 0; supported_schemes[i].leading_string; i++)
413 if (0 == strncasecmp (url, supported_schemes[i].leading_string,
414 strlen (supported_schemes[i].leading_string)))
416 if (supported_schemes[i].enabled)
417 return (enum url_scheme) i;
419 return SCHEME_INVALID;
422 return SCHEME_INVALID;
425 #define SCHEME_CHAR(ch) (ISALNUM (ch) || (ch) == '-' || (ch) == '+')
427 /* Return 1 if the URL begins with any "scheme", 0 otherwise. As
428 currently implemented, it returns true if URL begins with
432 url_has_scheme (const char *url)
436 /* The first char must be a scheme char. */
437 if (!*p || !SCHEME_CHAR (*p))
440 /* Followed by 0 or more scheme chars. */
441 while (*p && SCHEME_CHAR (*p))
443 /* Terminated by ':'. */
448 scheme_default_port (enum url_scheme scheme)
450 return supported_schemes[scheme].default_port;
454 scheme_disable (enum url_scheme scheme)
456 supported_schemes[scheme].enabled = 0;
459 /* Skip the username and password, if present in the URL. The
460 function should *not* be called with the complete URL, but with the
461 portion after the scheme.
463 If no username and password are found, return URL. */
466 url_skip_credentials (const char *url)
468 /* Look for '@' that comes before terminators, such as '/', '?',
470 const char *p = (const char *)strpbrk (url, "@/?#;");
476 /* Parse credentials contained in [BEG, END). The region is expected
477 to have come from a URL and is unescaped. */
480 parse_credentials (const char *beg, const char *end, char **user, char **passwd)
486 return 0; /* empty user name */
488 colon = memchr (beg, ':', end - beg);
490 return 0; /* again empty user name */
494 *passwd = strdupdelim (colon + 1, end);
496 url_unescape (*passwd);
503 *user = strdupdelim (beg, userend);
504 url_unescape (*user);
508 /* Used by main.c: detect URLs written using the "shorthand" URL forms
509 popularized by Netscape and NcFTP. HTTP shorthands look like this:
511 www.foo.com[:port]/dir/file -> http://www.foo.com[:port]/dir/file
512 www.foo.com[:port] -> http://www.foo.com[:port]
514 FTP shorthands look like this:
516 foo.bar.com:dir/file -> ftp://foo.bar.com/dir/file
517 foo.bar.com:/absdir/file -> ftp://foo.bar.com//absdir/file
519 If the URL needs not or cannot be rewritten, return NULL. */
522 rewrite_shorthand_url (const char *url)
526 if (url_scheme (url) != SCHEME_INVALID)
529 /* Look for a ':' or '/'. The former signifies NcFTP syntax, the
531 for (p = url; *p && *p != ':' && *p != '/'; p++)
541 /* If the characters after the colon and before the next slash
542 or end of string are all digits, it's HTTP. */
544 for (pp = p + 1; ISDIGIT (*pp); pp++)
546 if (digits > 0 && (*pp == '/' || *pp == '\0'))
549 /* Prepend "ftp://" to the entire URL... */
550 res = xmalloc (6 + strlen (url) + 1);
551 sprintf (res, "ftp://%s", url);
552 /* ...and replace ':' with '/'. */
553 res[6 + (p - url)] = '/';
560 /* Just prepend "http://" to what we have. */
561 res = xmalloc (7 + strlen (url) + 1);
562 sprintf (res, "http://%s", url);
567 static void split_path PARAMS ((const char *, char **, char **));
569 /* Like strpbrk, with the exception that it returns the pointer to the
570 terminating zero (end-of-string aka "eos") if no matching character
573 Although I normally balk at Gcc-specific optimizations, it probably
574 makes sense here: glibc has optimizations that detect strpbrk being
575 called with literal string as ACCEPT and inline the search. That
576 optimization is defeated if strpbrk is hidden within the call to
577 another function. (And no, making strpbrk_or_eos inline doesn't
578 help because the check for literal accept is in the
583 #define strpbrk_or_eos(s, accept) ({ \
584 char *SOE_p = strpbrk (s, accept); \
586 SOE_p = strchr (s, '\0'); \
590 #else /* not __GNUC__ */
593 strpbrk_or_eos (const char *s, const char *accept)
595 char *p = strpbrk (s, accept);
597 p = strchr (s, '\0');
600 #endif /* not __GNUC__ */
602 /* Turn STR into lowercase; return non-zero if a character was
606 lowercase_str (char *str)
613 *str = TOLOWER (*str);
618 static const char *parse_errors[] = {
619 #define PE_NO_ERROR 0
621 #define PE_UNSUPPORTED_SCHEME 1
622 N_("Unsupported scheme"),
623 #define PE_EMPTY_HOST 2
625 #define PE_BAD_PORT_NUMBER 3
626 N_("Bad port number"),
627 #define PE_INVALID_USER_NAME 4
628 N_("Invalid user name"),
629 #define PE_UNTERMINATED_IPV6_ADDRESS 5
630 N_("Unterminated IPv6 numeric address"),
631 #define PE_IPV6_NOT_SUPPORTED 6
632 N_("IPv6 addresses not supported"),
633 #define PE_INVALID_IPV6_ADDRESS 7
634 N_("Invalid IPv6 numeric address")
639 Return a new struct url if successful, NULL on error. In case of
640 error, and if ERROR is not NULL, also set *ERROR to the appropriate
643 url_parse (const char *url, int *error)
647 int path_modified, host_modified;
649 enum url_scheme scheme;
651 const char *uname_b, *uname_e;
652 const char *host_b, *host_e;
653 const char *path_b, *path_e;
654 const char *params_b, *params_e;
655 const char *query_b, *query_e;
656 const char *fragment_b, *fragment_e;
659 char *user = NULL, *passwd = NULL;
661 char *url_encoded = NULL;
665 scheme = url_scheme (url);
666 if (scheme == SCHEME_INVALID)
668 error_code = PE_UNSUPPORTED_SCHEME;
672 url_encoded = reencode_escapes (url);
675 p += strlen (supported_schemes[scheme].leading_string);
677 p = url_skip_credentials (p);
680 /* scheme://user:pass@host[:port]... */
683 /* We attempt to break down the URL into the components path,
684 params, query, and fragment. They are ordered like this:
686 scheme://host[:port][/path][;params][?query][#fragment] */
688 params_b = params_e = NULL;
689 query_b = query_e = NULL;
690 fragment_b = fragment_e = NULL;
696 /* Handle IPv6 address inside square brackets. Ideally we'd
697 just look for the terminating ']', but rfc2732 mandates
698 rejecting invalid IPv6 addresses. */
700 /* The address begins after '['. */
702 host_e = strchr (host_b, ']');
706 error_code = PE_UNTERMINATED_IPV6_ADDRESS;
711 /* Check if the IPv6 address is valid. */
712 if (!is_valid_ipv6_address(host_b, host_e))
714 error_code = PE_INVALID_IPV6_ADDRESS;
718 /* Continue parsing after the closing ']'. */
721 error_code = PE_IPV6_NOT_SUPPORTED;
727 p = strpbrk_or_eos (p, ":/;?#");
731 if (host_b == host_e)
733 error_code = PE_EMPTY_HOST;
737 port = scheme_default_port (scheme);
740 const char *port_b, *port_e, *pp;
742 /* scheme://host:port/tralala */
746 p = strpbrk_or_eos (p, "/;?#");
749 /* Allow empty port, as per rfc2396. */
750 if (port_b != port_e)
752 for (port = 0, pp = port_b; pp < port_e; pp++)
756 /* http://host:12randomgarbage/blah */
758 error_code = PE_BAD_PORT_NUMBER;
761 port = 10 * port + (*pp - '0');
762 /* Check for too large port numbers here, before we have
763 a chance to overflow on bogus port values. */
766 error_code = PE_BAD_PORT_NUMBER;
777 p = strpbrk_or_eos (p, ";?#");
782 /* Path is not allowed not to exist. */
790 p = strpbrk_or_eos (p, "?#");
797 p = strpbrk_or_eos (p, "#");
800 /* Hack that allows users to use '?' (a wildcard character) in
801 FTP URLs without it being interpreted as a query string
803 if (scheme == SCHEME_FTP)
805 query_b = query_e = NULL;
818 if (uname_b != uname_e)
820 /* http://user:pass@host */
822 /* uname_b uname_e */
823 if (!parse_credentials (uname_b, uname_e - 1, &user, &passwd))
825 error_code = PE_INVALID_USER_NAME;
830 u = xnew0 (struct url);
832 u->host = strdupdelim (host_b, host_e);
837 u->path = strdupdelim (path_b, path_e);
838 path_modified = path_simplify (u->path);
839 split_path (u->path, &u->dir, &u->file);
841 host_modified = lowercase_str (u->host);
843 /* Decode %HH sequences in host name. This is important not so much
844 to support %HH sequences in host names (which other browser
845 don't), but to support binary characters (which will have been
846 converted to %HH by reencode_escapes). */
847 if (strchr (u->host, '%'))
849 url_unescape (u->host);
854 u->params = strdupdelim (params_b, params_e);
856 u->query = strdupdelim (query_b, query_e);
858 u->fragment = strdupdelim (fragment_b, fragment_e);
860 if (path_modified || u->fragment || host_modified || path_b == path_e)
862 /* If we suspect that a transformation has rendered what
863 url_string might return different from URL_ENCODED, rebuild
864 u->url using url_string. */
865 u->url = url_string (u, 0);
867 if (url_encoded != url)
868 xfree ((char *) url_encoded);
872 if (url_encoded == url)
873 u->url = xstrdup (url);
875 u->url = url_encoded;
881 /* Cleanup in case of error: */
882 if (url_encoded && url_encoded != url)
885 /* Transmit the error code to the caller, if the caller wants to
892 /* Return the error message string from ERROR_CODE, which should have
893 been retrieved from url_parse. The error message is translated. */
896 url_error (int error_code)
898 assert (error_code >= 0 && error_code < countof (parse_errors));
899 return _(parse_errors[error_code]);
902 /* Split PATH into DIR and FILE. PATH comes from the URL and is
903 expected to be URL-escaped.
905 The path is split into directory (the part up to the last slash)
906 and file (the part after the last slash), which are subsequently
910 "foo/bar/baz" "foo/bar" "baz"
911 "foo/bar/" "foo/bar" ""
913 "foo/bar/baz%2fqux" "foo/bar" "baz/qux" (!)
915 DIR and FILE are freshly allocated. */
918 split_path (const char *path, char **dir, char **file)
920 char *last_slash = strrchr (path, '/');
924 *file = xstrdup (path);
928 *dir = strdupdelim (path, last_slash);
929 *file = xstrdup (last_slash + 1);
932 url_unescape (*file);
935 /* Note: URL's "full path" is the path with the query string and
936 params appended. The "fragment" (#foo) is intentionally ignored,
937 but that might be changed. For example, if the original URL was
938 "http://host:port/foo/bar/baz;bullshit?querystring#uselessfragment",
939 the full path will be "/foo/bar/baz;bullshit?querystring". */
941 /* Return the length of the full path, without the terminating
945 full_path_length (const struct url *url)
949 #define FROB(el) if (url->el) len += 1 + strlen (url->el)
960 /* Write out the full path. */
963 full_path_write (const struct url *url, char *where)
965 #define FROB(el, chr) do { \
966 char *f_el = url->el; \
968 int l = strlen (f_el); \
970 memcpy (where, f_el, l); \
982 /* Public function for getting the "full path". E.g. if u->path is
983 "foo/bar" and u->query is "param=value", full_path will be
984 "/foo/bar?param=value". */
987 url_full_path (const struct url *url)
989 int length = full_path_length (url);
990 char *full_path = (char *) xmalloc (length + 1);
992 full_path_write (url, full_path);
993 full_path[length] = '\0';
998 /* Unescape CHR in an otherwise escaped STR. Used to selectively
999 escaping of certain characters, such as "/" and ":". Returns a
1000 count of unescaped chars. */
1003 unescape_single_char (char *str, char chr)
1005 const char c1 = XNUM_TO_DIGIT (chr >> 4);
1006 const char c2 = XNUM_TO_DIGIT (chr & 0xf);
1007 char *h = str; /* hare */
1008 char *t = str; /* tortoise */
1009 for (; *h; h++, t++)
1011 if (h[0] == '%' && h[1] == c1 && h[2] == c2)
1022 /* Escape unsafe and reserved characters, except for the slash
1026 url_escape_dir (const char *dir)
1028 char *newdir = url_escape_1 (dir, urlchr_unsafe | urlchr_reserved, 1);
1032 unescape_single_char (newdir, '/');
1036 /* Sync u->path and u->url with u->dir and u->file. Called after
1037 u->file or u->dir have been changed, typically by the FTP code. */
1040 sync_path (struct url *u)
1042 char *newpath, *efile, *edir;
1046 /* u->dir and u->file are not escaped. URL-escape them before
1047 reassembling them into u->path. That way, if they contain
1048 separators like '?' or even if u->file contains slashes, the
1049 path will be correctly assembled. (u->file can contain slashes
1050 if the URL specifies it with %2f, or if an FTP server returns
1052 edir = url_escape_dir (u->dir);
1053 efile = url_escape_1 (u->file, urlchr_unsafe | urlchr_reserved, 1);
1056 newpath = xstrdup (efile);
1059 int dirlen = strlen (edir);
1060 int filelen = strlen (efile);
1062 /* Copy "DIR/FILE" to newpath. */
1063 char *p = newpath = xmalloc (dirlen + 1 + filelen + 1);
1064 memcpy (p, edir, dirlen);
1067 memcpy (p, efile, filelen);
1076 if (efile != u->file)
1079 /* Regenerate u->url as well. */
1081 u->url = url_string (u, 0);
1084 /* Mutators. Code in ftp.c insists on changing u->dir and u->file.
1085 This way we can sync u->path and u->url when they get changed. */
1088 url_set_dir (struct url *url, const char *newdir)
1091 url->dir = xstrdup (newdir);
1096 url_set_file (struct url *url, const char *newfile)
1099 url->file = xstrdup (newfile);
1104 url_free (struct url *url)
1110 xfree_null (url->params);
1111 xfree_null (url->query);
1112 xfree_null (url->fragment);
1113 xfree_null (url->user);
1114 xfree_null (url->passwd);
1122 /* Create all the necessary directories for PATH (a file). Calls
1123 mkdirhier() internally. */
1125 mkalldirs (const char *path)
1132 p = path + strlen (path);
1133 for (; *p != '/' && p != path; p--)
1136 /* Don't create if it's just a file. */
1137 if ((p == path) && (*p != '/'))
1139 t = strdupdelim (path, p);
1141 /* Check whether the directory exists. */
1142 if ((stat (t, &st) == 0))
1144 if (S_ISDIR (st.st_mode))
1151 /* If the dir exists as a file name, remove it first. This
1152 is *only* for Wget to work with buggy old CERN http
1153 servers. Here is the scenario: When Wget tries to
1154 retrieve a directory without a slash, e.g.
1155 http://foo/bar (bar being a directory), CERN server will
1156 not redirect it too http://foo/bar/ -- it will generate a
1157 directory listing containing links to bar/file1,
1158 bar/file2, etc. Wget will lose because it saves this
1159 HTML listing to a file `bar', so it cannot create the
1160 directory. To work around this, if the file of the same
1161 name exists, we just remove it and create the directory
1163 DEBUGP (("Removing %s because of directory danger!\n", t));
1167 res = make_directory (t);
1169 logprintf (LOG_NOTQUIET, "%s: %s", t, strerror (errno));
1174 /* Functions for constructing the file name out of URL components. */
1176 /* A growable string structure, used by url_file_name and friends.
1177 This should perhaps be moved to utils.c.
1179 The idea is to have a convenient and efficient way to construct a
1180 string by having various functions append data to it. Instead of
1181 passing the obligatory BASEVAR, SIZEVAR and TAILPOS to all the
1182 functions in questions, we pass the pointer to this struct. */
1190 /* Ensure that the string can accept APPEND_COUNT more characters past
1191 the current TAIL position. If necessary, this will grow the string
1192 and update its allocated size. If the string is already large
1193 enough to take TAIL+APPEND_COUNT characters, this does nothing. */
1194 #define GROW(g, append_size) do { \
1195 struct growable *G_ = g; \
1196 DO_REALLOC (G_->base, G_->size, G_->tail + append_size, char); \
1199 /* Return the tail position of the string. */
1200 #define TAIL(r) ((r)->base + (r)->tail)
1202 /* Move the tail position by APPEND_COUNT characters. */
1203 #define TAIL_INCR(r, append_count) ((r)->tail += append_count)
1205 /* Append the string STR to DEST. NOTICE: the string in DEST is not
1209 append_string (const char *str, struct growable *dest)
1211 int l = strlen (str);
1213 memcpy (TAIL (dest), str, l);
1214 TAIL_INCR (dest, l);
1217 /* Append CH to DEST. For example, append_char (0, DEST)
1218 zero-terminates DEST. */
1221 append_char (char ch, struct growable *dest)
1225 TAIL_INCR (dest, 1);
1229 filechr_not_unix = 1, /* unusable on Unix, / and \0 */
1230 filechr_not_windows = 2, /* unusable on Windows, one of \|/<>?:*" */
1231 filechr_control = 4 /* a control character, e.g. 0-31 */
1234 #define FILE_CHAR_TEST(c, mask) (filechr_table[(unsigned char)(c)] & (mask))
1236 /* Shorthands for the table: */
1237 #define U filechr_not_unix
1238 #define W filechr_not_windows
1239 #define C filechr_control
1244 /* Table of characters unsafe under various conditions (see above).
1246 Arguably we could also claim `%' to be unsafe, since we use it as
1247 the escape character. If we ever want to be able to reliably
1248 translate file name back to URL, this would become important
1249 crucial. Right now, it's better to be minimal in escaping. */
1251 static const unsigned char filechr_table[256] =
1253 UWC, C, C, C, C, C, C, C, /* NUL SOH STX ETX EOT ENQ ACK BEL */
1254 C, C, C, C, C, C, C, C, /* BS HT LF VT FF CR SO SI */
1255 C, C, C, C, C, C, C, C, /* DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
1256 C, C, C, C, C, C, C, C, /* CAN EM SUB ESC FS GS RS US */
1257 0, 0, W, 0, 0, 0, 0, 0, /* SP ! " # $ % & ' */
1258 0, 0, W, 0, 0, 0, 0, UW, /* ( ) * + , - . / */
1259 0, 0, 0, 0, 0, 0, 0, 0, /* 0 1 2 3 4 5 6 7 */
1260 0, 0, W, 0, W, 0, W, W, /* 8 9 : ; < = > ? */
1261 0, 0, 0, 0, 0, 0, 0, 0, /* @ A B C D E F G */
1262 0, 0, 0, 0, 0, 0, 0, 0, /* H I J K L M N O */
1263 0, 0, 0, 0, 0, 0, 0, 0, /* P Q R S T U V W */
1264 0, 0, 0, 0, W, 0, 0, 0, /* X Y Z [ \ ] ^ _ */
1265 0, 0, 0, 0, 0, 0, 0, 0, /* ` a b c d e f g */
1266 0, 0, 0, 0, 0, 0, 0, 0, /* h i j k l m n o */
1267 0, 0, 0, 0, 0, 0, 0, 0, /* p q r s t u v w */
1268 0, 0, 0, 0, 0, 0, 0, 0, /* x y z { | } ~ DEL */
1270 C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, /* 128-143 */
1271 C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, /* 144-159 */
1272 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1273 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1275 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1276 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1286 /* FN_PORT_SEP is the separator between host and port in file names
1287 for non-standard port numbers. On Unix this is normally ':', as in
1288 "www.xemacs.org:4001/index.html". Under Windows, we set it to +
1289 because Windows can't handle ':' in file names. */
1290 #define FN_PORT_SEP (opt.restrict_files_os != restrict_windows ? ':' : '+')
1292 /* FN_QUERY_SEP is the separator between the file name and the URL
1293 query, normally '?'. Since Windows cannot handle '?' as part of
1294 file name, we use '@' instead there. */
1295 #define FN_QUERY_SEP (opt.restrict_files_os != restrict_windows ? '?' : '@')
1297 /* Quote path element, characters in [b, e), as file name, and append
1298 the quoted string to DEST. Each character is quoted as per
1299 file_unsafe_char and the corresponding table.
1301 If ESCAPED_P is non-zero, the path element is considered to be
1302 URL-escaped and will be unescaped prior to inspection. */
1305 append_uri_pathel (const char *b, const char *e, int escaped_p,
1306 struct growable *dest)
1312 if (opt.restrict_files_os == restrict_unix)
1313 mask = filechr_not_unix;
1315 mask = filechr_not_windows;
1316 if (opt.restrict_files_ctrl)
1317 mask |= filechr_control;
1319 /* Copy [b, e) to PATHEL and URL-unescape it. */
1323 BOUNDED_TO_ALLOCA (b, e, unescaped);
1324 url_unescape (unescaped);
1326 e = unescaped + strlen (unescaped);
1329 /* Defang ".." when found as component of path. Remember that path
1330 comes from the URL and might contain malicious input. */
1331 if (e - b == 2 && b[0] == '.' && b[1] == '.')
1337 /* Walk the PATHEL string and check how many characters we'll need
1340 for (p = b; p < e; p++)
1341 if (FILE_CHAR_TEST (*p, mask))
1344 /* Calculate the length of the output string. e-b is the input
1345 string length. Each quoted char introduces two additional
1346 characters in the string, hence 2*quoted. */
1347 outlen = (e - b) + (2 * quoted);
1348 GROW (dest, outlen);
1352 /* If there's nothing to quote, we can simply append the string
1353 without processing it again. */
1354 memcpy (TAIL (dest), b, outlen);
1358 char *q = TAIL (dest);
1359 for (p = b; p < e; p++)
1361 if (!FILE_CHAR_TEST (*p, mask))
1365 unsigned char ch = *p;
1367 *q++ = XNUM_TO_DIGIT (ch >> 4);
1368 *q++ = XNUM_TO_DIGIT (ch & 0xf);
1371 assert (q - TAIL (dest) == outlen);
1373 TAIL_INCR (dest, outlen);
1376 /* Append to DEST the directory structure that corresponds the
1377 directory part of URL's path. For example, if the URL is
1378 http://server/dir1/dir2/file, this appends "/dir1/dir2".
1380 Each path element ("dir1" and "dir2" in the above example) is
1381 examined, url-unescaped, and re-escaped as file name element.
1383 Additionally, it cuts as many directories from the path as
1384 specified by opt.cut_dirs. For example, if opt.cut_dirs is 1, it
1385 will produce "bar" for the above example. For 2 or more, it will
1388 Each component of the path is quoted for use as file name. */
1391 append_dir_structure (const struct url *u, struct growable *dest)
1393 char *pathel, *next;
1394 int cut = opt.cut_dirs;
1396 /* Go through the path components, de-URL-quote them, and quote them
1397 (if necessary) as file names. */
1400 for (; (next = strchr (pathel, '/')) != NULL; pathel = next + 1)
1405 /* Ignore empty pathels. */
1409 append_char ('/', dest);
1410 append_uri_pathel (pathel, next, 1, dest);
1414 /* Return a unique file name that matches the given URL as good as
1415 possible. Does not create directories on the file system. */
1418 url_file_name (const struct url *u)
1420 struct growable fnres; /* stands for "file name result" */
1422 const char *u_file, *u_query;
1423 char *fname, *unique;
1429 /* Start with the directory prefix, if specified. */
1431 append_string (opt.dir_prefix, &fnres);
1433 /* If "dirstruct" is turned on (typically the case with -r), add
1434 the host and port (unless those have been turned off) and
1435 directory structure. */
1438 if (opt.protocol_directories)
1441 append_char ('/', &fnres);
1442 append_string (supported_schemes[u->scheme].name, &fnres);
1444 if (opt.add_hostdir)
1447 append_char ('/', &fnres);
1448 if (0 != strcmp (u->host, ".."))
1449 append_string (u->host, &fnres);
1451 /* Host name can come from the network; malicious DNS may
1452 allow ".." to be resolved, causing us to write to
1453 "../<file>". Defang such host names. */
1454 append_string ("%2E%2E", &fnres);
1455 if (u->port != scheme_default_port (u->scheme))
1458 number_to_string (portstr, u->port);
1459 append_char (FN_PORT_SEP, &fnres);
1460 append_string (portstr, &fnres);
1464 append_dir_structure (u, &fnres);
1467 /* Add the file name. */
1469 append_char ('/', &fnres);
1470 u_file = *u->file ? u->file : "index.html";
1471 append_uri_pathel (u_file, u_file + strlen (u_file), 0, &fnres);
1473 /* Append "?query" to the file name. */
1474 u_query = u->query && *u->query ? u->query : NULL;
1477 append_char (FN_QUERY_SEP, &fnres);
1478 append_uri_pathel (u_query, u_query + strlen (u_query), 1, &fnres);
1481 /* Zero-terminate the file name. */
1482 append_char ('\0', &fnres);
1486 /* Check the cases in which the unique extensions are not used:
1487 1) Clobbering is turned off (-nc).
1488 2) Retrieval with regetting.
1489 3) Timestamping is used.
1490 4) Hierarchy is built.
1492 The exception is the case when file does exist and is a
1493 directory (see `mkalldirs' for explanation). */
1495 if ((opt.noclobber || opt.always_rest || opt.timestamping || opt.dirstruct)
1496 && !(file_exists_p (fname) && !file_non_directory_p (fname)))
1499 unique = unique_name (fname, 1);
1500 if (unique != fname)
1505 /* Resolve "." and ".." elements of PATH by destructively modifying
1506 PATH and return non-zero if PATH has been modified, zero otherwise.
1508 The algorithm is in spirit similar to the one described in rfc1808,
1509 although implemented differently, in one pass. To recap, path
1510 elements containing only "." are removed, and ".." is taken to mean
1511 "back up one element". Single leading and trailing slashes are
1514 For example, "a/b/c/./../d/.." will yield "a/b/". More exhaustive
1515 test examples are provided below. If you change anything in this
1516 function, run test_path_simplify to make sure you haven't broken a
1520 path_simplify (char *path)
1522 char *h = path; /* hare */
1523 char *t = path; /* tortoise */
1524 char *beg = path; /* boundary for backing the tortoise */
1525 char *end = path + strlen (path);
1529 /* Hare should be at the beginning of a path element. */
1531 if (h[0] == '.' && (h[1] == '/' || h[1] == '\0'))
1536 else if (h[0] == '.' && h[1] == '.' && (h[2] == '/' || h[2] == '\0'))
1538 /* Handle "../" by retreating the tortoise by one path
1539 element -- but not past beggining. */
1542 /* Move backwards until T hits the beginning of the
1543 previous path element or the beginning of path. */
1544 for (--t; t > beg && t[-1] != '/'; t--)
1549 /* If we're at the beginning, copy the "../" literally
1550 move the beginning so a later ".." doesn't remove
1560 /* A regular path element. If H hasn't advanced past T,
1561 simply skip to the next path element. Otherwise, copy
1562 the path element until the next slash. */
1565 /* Skip the path element, including the slash. */
1566 while (h < end && *h != '/')
1573 /* Copy the path element, including the final slash. */
1574 while (h < end && *h != '/')
1588 /* Return the length of URL's path. Path is considered to be
1589 terminated by one of '?', ';', '#', or by the end of the
1593 path_length (const char *url)
1595 const char *q = strpbrk_or_eos (url, "?;#");
1599 /* Find the last occurrence of character C in the range [b, e), or
1600 NULL, if none are present. We might want to use memrchr (a GNU
1601 extension) under GNU libc. */
1604 find_last_char (const char *b, const char *e, char c)
1612 /* Merge BASE with LINK and return the resulting URI.
1614 Either of the URIs may be absolute or relative, complete with the
1615 host name, or path only. This tries to reasonably handle all
1616 foreseeable cases. It only employs minimal URL parsing, without
1617 knowledge of the specifics of schemes.
1619 I briefly considered making this function call path_simplify after
1620 the merging process, as rfc1738 seems to suggest. This is a bad
1621 idea for several reasons: 1) it complexifies the code, and 2)
1622 url_parse has to simplify path anyway, so it's wasteful to boot. */
1625 uri_merge (const char *base, const char *link)
1631 if (url_has_scheme (link))
1632 return xstrdup (link);
1634 /* We may not examine BASE past END. */
1635 end = base + path_length (base);
1636 linklength = strlen (link);
1640 /* Empty LINK points back to BASE, query string and all. */
1641 return xstrdup (base);
1643 else if (*link == '?')
1645 /* LINK points to the same location, but changes the query
1646 string. Examples: */
1647 /* uri_merge("path", "?new") -> "path?new" */
1648 /* uri_merge("path?foo", "?new") -> "path?new" */
1649 /* uri_merge("path?foo#bar", "?new") -> "path?new" */
1650 /* uri_merge("path#foo", "?new") -> "path?new" */
1651 int baselength = end - base;
1652 merge = xmalloc (baselength + linklength + 1);
1653 memcpy (merge, base, baselength);
1654 memcpy (merge + baselength, link, linklength);
1655 merge[baselength + linklength] = '\0';
1657 else if (*link == '#')
1659 /* uri_merge("path", "#new") -> "path#new" */
1660 /* uri_merge("path#foo", "#new") -> "path#new" */
1661 /* uri_merge("path?foo", "#new") -> "path?foo#new" */
1662 /* uri_merge("path?foo#bar", "#new") -> "path?foo#new" */
1664 const char *end1 = strchr (base, '#');
1666 end1 = base + strlen (base);
1667 baselength = end1 - base;
1668 merge = xmalloc (baselength + linklength + 1);
1669 memcpy (merge, base, baselength);
1670 memcpy (merge + baselength, link, linklength);
1671 merge[baselength + linklength] = '\0';
1673 else if (*link == '/' && *(link + 1) == '/')
1675 /* LINK begins with "//" and so is a net path: we need to
1676 replace everything after (and including) the double slash
1679 /* uri_merge("foo", "//new/bar") -> "//new/bar" */
1680 /* uri_merge("//old/foo", "//new/bar") -> "//new/bar" */
1681 /* uri_merge("http://old/foo", "//new/bar") -> "http://new/bar" */
1685 const char *start_insert;
1687 /* Look for first slash. */
1688 slash = memchr (base, '/', end - base);
1689 /* If found slash and it is a double slash, then replace
1690 from this point, else default to replacing from the
1692 if (slash && *(slash + 1) == '/')
1693 start_insert = slash;
1695 start_insert = base;
1697 span = start_insert - base;
1698 merge = (char *)xmalloc (span + linklength + 1);
1700 memcpy (merge, base, span);
1701 memcpy (merge + span, link, linklength);
1702 merge[span + linklength] = '\0';
1704 else if (*link == '/')
1706 /* LINK is an absolute path: we need to replace everything
1707 after (and including) the FIRST slash with LINK.
1709 So, if BASE is "http://host/whatever/foo/bar", and LINK is
1710 "/qux/xyzzy", our result should be
1711 "http://host/qux/xyzzy". */
1714 const char *start_insert = NULL; /* for gcc to shut up. */
1715 const char *pos = base;
1716 int seen_slash_slash = 0;
1717 /* We're looking for the first slash, but want to ignore
1720 slash = memchr (pos, '/', end - pos);
1721 if (slash && !seen_slash_slash)
1722 if (*(slash + 1) == '/')
1725 seen_slash_slash = 1;
1729 /* At this point, SLASH is the location of the first / after
1730 "//", or the first slash altogether. START_INSERT is the
1731 pointer to the location where LINK will be inserted. When
1732 examining the last two examples, keep in mind that LINK
1735 if (!slash && !seen_slash_slash)
1736 /* example: "foo" */
1738 start_insert = base;
1739 else if (!slash && seen_slash_slash)
1740 /* example: "http://foo" */
1743 else if (slash && !seen_slash_slash)
1744 /* example: "foo/bar" */
1746 start_insert = base;
1747 else if (slash && seen_slash_slash)
1748 /* example: "http://something/" */
1750 start_insert = slash;
1752 span = start_insert - base;
1753 merge = (char *)xmalloc (span + linklength + 1);
1755 memcpy (merge, base, span);
1756 memcpy (merge + span, link, linklength);
1757 merge[span + linklength] = '\0';
1761 /* LINK is a relative URL: we need to replace everything
1762 after last slash (possibly empty) with LINK.
1764 So, if BASE is "whatever/foo/bar", and LINK is "qux/xyzzy",
1765 our result should be "whatever/foo/qux/xyzzy". */
1766 int need_explicit_slash = 0;
1768 const char *start_insert;
1769 const char *last_slash = find_last_char (base, end, '/');
1772 /* No slash found at all. Replace what we have with LINK. */
1773 start_insert = base;
1775 else if (last_slash && last_slash >= base + 2
1776 && last_slash[-2] == ':' && last_slash[-1] == '/')
1778 /* example: http://host" */
1780 start_insert = end + 1;
1781 need_explicit_slash = 1;
1785 /* example: "whatever/foo/bar" */
1787 start_insert = last_slash + 1;
1790 span = start_insert - base;
1791 merge = (char *)xmalloc (span + linklength + 1);
1793 memcpy (merge, base, span);
1794 if (need_explicit_slash)
1795 merge[span - 1] = '/';
1796 memcpy (merge + span, link, linklength);
1797 merge[span + linklength] = '\0';
1803 #define APPEND(p, s) do { \
1804 int len = strlen (s); \
1805 memcpy (p, s, len); \
1809 /* Use this instead of password when the actual password is supposed
1810 to be hidden. We intentionally use a generic string without giving
1811 away the number of characters in the password, like previous
1813 #define HIDDEN_PASSWORD "*password*"
1815 /* Recreate the URL string from the data in URL.
1817 If HIDE is non-zero (as it is when we're calling this on a URL we
1818 plan to print, but not when calling it to canonicalize a URL for
1819 use within the program), password will be hidden. Unsafe
1820 characters in the URL will be quoted. */
1823 url_string (const struct url *url, int hide_password)
1827 char *quoted_host, *quoted_user = NULL, *quoted_passwd = NULL;
1829 int scheme_port = supported_schemes[url->scheme].default_port;
1830 const char *scheme_str = supported_schemes[url->scheme].leading_string;
1831 int fplen = full_path_length (url);
1833 int brackets_around_host;
1835 assert (scheme_str != NULL);
1837 /* Make sure the user name and password are quoted. */
1840 quoted_user = url_escape_allow_passthrough (url->user);
1844 quoted_passwd = HIDDEN_PASSWORD;
1846 quoted_passwd = url_escape_allow_passthrough (url->passwd);
1850 /* In the unlikely event that the host name contains non-printable
1851 characters, quote it for displaying to the user. */
1852 quoted_host = url_escape_allow_passthrough (url->host);
1854 /* Undo the quoting of colons that URL escaping performs. IPv6
1855 addresses may legally contain colons, and in that case must be
1856 placed in square brackets. */
1857 if (quoted_host != url->host)
1858 unescape_single_char (quoted_host, ':');
1859 brackets_around_host = strchr (quoted_host, ':') != NULL;
1861 size = (strlen (scheme_str)
1862 + strlen (quoted_host)
1863 + (brackets_around_host ? 2 : 0)
1866 if (url->port != scheme_port)
1867 size += 1 + numdigit (url->port);
1870 size += 1 + strlen (quoted_user);
1872 size += 1 + strlen (quoted_passwd);
1875 p = result = xmalloc (size);
1877 APPEND (p, scheme_str);
1880 APPEND (p, quoted_user);
1884 APPEND (p, quoted_passwd);
1889 if (brackets_around_host)
1891 APPEND (p, quoted_host);
1892 if (brackets_around_host)
1894 if (url->port != scheme_port)
1897 p = number_to_string (p, url->port);
1900 full_path_write (url, p);
1904 assert (p - result == size);
1906 if (quoted_user && quoted_user != url->user)
1907 xfree (quoted_user);
1908 if (quoted_passwd && !hide_password && quoted_passwd != url->passwd)
1909 xfree (quoted_passwd);
1910 if (quoted_host != url->host)
1911 xfree (quoted_host);
1916 /* Return non-zero if scheme a is similar to scheme b.
1918 Schemes are similar if they are equal. If SSL is supported, schemes
1919 are also similar if one is http (SCHEME_HTTP) and the other is https
1922 schemes_are_similar_p (enum url_scheme a, enum url_scheme b)
1927 if ((a == SCHEME_HTTP && b == SCHEME_HTTPS)
1928 || (a == SCHEME_HTTPS && b == SCHEME_HTTP))
1935 /* Debugging and testing support for path_simplify. */
1937 /* Debug: run path_simplify on PATH and return the result in a new
1938 string. Useful for calling from the debugger. */
1942 char *copy = xstrdup (path);
1943 path_simplify (copy);
1948 run_test (char *test, char *expected_result, int expected_change)
1950 char *test_copy = xstrdup (test);
1951 int modified = path_simplify (test_copy);
1953 if (0 != strcmp (test_copy, expected_result))
1955 printf ("Failed path_simplify(\"%s\"): expected \"%s\", got \"%s\".\n",
1956 test, expected_result, test_copy);
1958 if (modified != expected_change)
1960 if (expected_change == 1)
1961 printf ("Expected modification with path_simplify(\"%s\").\n",
1964 printf ("Expected no modification with path_simplify(\"%s\").\n",
1971 test_path_simplify (void)
1974 char *test, *result;
1981 { "../", "../", 0 },
1982 { "foo", "foo", 0 },
1983 { "foo/bar", "foo/bar", 0 },
1984 { "foo///bar", "foo///bar", 0 },
1985 { "foo/.", "foo/", 1 },
1986 { "foo/./", "foo/", 1 },
1987 { "foo./", "foo./", 0 },
1988 { "foo/../bar", "bar", 1 },
1989 { "foo/../bar/", "bar/", 1 },
1990 { "foo/bar/..", "foo/", 1 },
1991 { "foo/bar/../x", "foo/x", 1 },
1992 { "foo/bar/../x/", "foo/x/", 1 },
1993 { "foo/..", "", 1 },
1994 { "foo/../..", "..", 1 },
1995 { "foo/../../..", "../..", 1 },
1996 { "foo/../../bar/../../baz", "../../baz", 1 },
1997 { "a/b/../../c", "c", 1 },
1998 { "./a/../b", "b", 1 }
2002 for (i = 0; i < countof (tests); i++)
2004 char *test = tests[i].test;
2005 char *expected_result = tests[i].result;
2006 int expected_change = tests[i].should_modify;
2007 run_test (test, expected_result, expected_change);