/* URL handling.
- Copyright (C) 1995, 1996, 1997, 2000 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
This file is part of Wget.
#else
# include <strings.h>
#endif
-#include <ctype.h>
#include <sys/types.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
extern int errno;
#endif
-/* Default port definitions */
-#define DEFAULT_HTTP_PORT 80
-#define DEFAULT_FTP_PORT 21
-
/* Table of Unsafe chars. This is intialized in
init_unsafe_char_table. */
#define UNSAFE_CHAR(c) (unsafe_char_table[(unsigned char)(c)])
-/* If S contains unsafe characters, free it and replace it with a
- version that doesn't. */
-#define URL_CLEANSE(s) do \
-{ \
- if (contains_unsafe (s)) \
- { \
- char *uc_tmp = encode_string (s); \
- free (s); \
- (s) = uc_tmp; \
- } \
-} while (0)
+/* rfc1738 reserved chars. This is too short to warrant a table. We
+ don't use this yet; preservation of reserved chars will be
+ implemented when I integrate the new `reencode_string'
+ function. */
+#define RESERVED_CHAR(c) ( (c) == ';' || (c) == '/' || (c) == '?' \
+ || (c) == '@' || (c) == '=' || (c) == '&' \
+ || (c) == '+')
-/* Is a directory "."? */
+/* Is X "."? */
#define DOTP(x) ((*(x) == '.') && (!*(x + 1)))
-/* Is a directory ".."? */
+/* Is X ".."? */
#define DDOTP(x) ((*(x) == '.') && (*(x + 1) == '.') && (!*(x + 2)))
-#if 0
-static void path_simplify_with_kludge PARAMS ((char *));
-#endif
static int urlpath_length PARAMS ((const char *));
-/* NULL-terminated list of strings to be recognized as prototypes (URL
- schemes). Note that recognized doesn't mean supported -- only HTTP
- and FTP are currently supported.
+/* A NULL-terminated list of strings to be recognized as protocol
+ types (URL schemes). Note that recognized doesn't mean supported
+ -- only HTTP, HTTPS and FTP are currently supported.
However, a string that does not match anything in the list will be
considered a relative URL. Thus it's important that this list has
anything anyone could think of being legal.
- There are wild things here. :-) Take a look at
- <URL:http://www.w3.org/pub/WWW/Addressing/schemes.html> for more
- fun. */
+ #### This is probably broken. Wget should use other means to
+ distinguish between absolute and relative URIs in HTML links.
+
+ Take a look at <http://www.w3.org/pub/WWW/Addressing/schemes.html>
+ for more. */
static char *protostrings[] =
{
"cid:",
static struct proto sup_protos[] =
{
{ "http://", URLHTTP, DEFAULT_HTTP_PORT },
- { "ftp://", URLFTP, DEFAULT_FTP_PORT },
- /*{ "file://", URLFILE, DEFAULT_FTP_PORT },*/
+#ifdef HAVE_SSL
+ { "https://",URLHTTPS, DEFAULT_HTTPS_PORT},
+#endif
+ { "ftp://", URLFTP, DEFAULT_FTP_PORT }
};
static void parse_dir PARAMS ((const char *, char **, char **));
static uerr_t parse_uname PARAMS ((const char *, char **, char **));
-static char *construct PARAMS ((const char *, const char *, int , int));
static char *construct_relative PARAMS ((const char *, const char *));
static char process_ftp_type PARAMS ((char *));
\f
-/* Returns the number of characters to be skipped if the first thing
- in a URL is URL: (which is 0 or 4+). The optional spaces after
- URL: are also skipped. */
-int
-skip_url (const char *url)
-{
- int i;
-
- if (TOUPPER (url[0]) == 'U'
- && TOUPPER (url[1]) == 'R'
- && TOUPPER (url[2]) == 'L'
- && url[3] == ':')
- {
- /* Skip blanks. */
- for (i = 4; url[i] && ISSPACE (url[i]); i++);
- return i;
- }
- else
- return 0;
-}
-
/* Unsafe chars:
- anything <= 32;
- stuff from rfc1738 ("<>\"#%{}|\\^~[]`");
int i;
for (i = 0; i < 256; i++)
if (i < 32 || i >= 127
+ || i == ' '
|| i == '<'
|| i == '>'
|| i == '\"'
unsafe_char_table[i] = 1;
}
-/* Returns 1 if the string contains unsafe characters, 0 otherwise. */
-int
-contains_unsafe (const char *s)
-{
- for (; *s; s++)
- if (UNSAFE_CHAR (*s))
- return 1;
- return 0;
-}
-
/* Decodes the forms %xy in a URL to the character the hexadecimal
code of which is xy. xy are hexadecimal digits from
[0123456789ABCDEF] (case-insensitive). If x or y are not
*p = *s;
continue;
}
- *p = (ASC2HEXD (*(s + 1)) << 4) + ASC2HEXD (*(s + 2));
+ *p = (XCHAR_TO_XDIGIT (*(s + 1)) << 4) + XCHAR_TO_XDIGIT (*(s + 2));
s += 2;
}
}
*p = '\0';
}
-/* Encode the unsafe characters (as determined by URL_UNSAFE) in a
+/* Like encode_string, but return S if there are no unsafe chars. */
+
+static char *
+encode_string_maybe (const char *s)
+{
+ const char *p1;
+ char *p2, *newstr;
+ int newlen;
+ int addition = 0;
+
+ for (p1 = s; *p1; p1++)
+ if (UNSAFE_CHAR (*p1))
+ addition += 2; /* Two more characters (hex digits) */
+
+ if (!addition)
+ return (char *)s;
+
+ newlen = (p1 - s) + addition;
+ newstr = (char *)xmalloc (newlen + 1);
+
+ p1 = s;
+ p2 = newstr;
+ while (*p1)
+ {
+ if (UNSAFE_CHAR (*p1))
+ {
+ const unsigned char c = *p1++;
+ *p2++ = '%';
+ *p2++ = XDIGIT_TO_XCHAR (c >> 4);
+ *p2++ = XDIGIT_TO_XCHAR (c & 0xf);
+ }
+ else
+ *p2++ = *p1++;
+ }
+ *p2 = '\0';
+ assert (p2 - newstr == newlen);
+
+ return newstr;
+}
+
+/* Encode the unsafe characters (as determined by UNSAFE_CHAR) in a
given string, returning a malloc-ed %XX encoded string. */
+
char *
encode_string (const char *s)
{
- const char *b;
- char *p, *res;
- int i;
-
- b = s;
- for (i = 0; *s; s++, i++)
- if (UNSAFE_CHAR (*s))
- i += 2; /* Two more characters (hex digits) */
- res = (char *)xmalloc (i + 1);
- s = b;
- for (p = res; *s; s++)
- if (UNSAFE_CHAR (*s))
- {
- const unsigned char c = *s;
- *p++ = '%';
- *p++ = HEXD2ASC (c >> 4);
- *p++ = HEXD2ASC (c & 0xf);
- }
- else
- *p++ = *s;
- *p = '\0';
- return res;
+ char *encoded = encode_string_maybe (s);
+ if (encoded != s)
+ return encoded;
+ else
+ return xstrdup (s);
}
+
+/* Encode unsafe characters in PTR to %xx. If such encoding is done,
+ the old value of PTR is freed and PTR is made to point to the newly
+ allocated storage. */
+
+#define ENCODE(ptr) do { \
+ char *e_new = encode_string_maybe (ptr); \
+ if (e_new != ptr) \
+ { \
+ xfree (ptr); \
+ ptr = e_new; \
+ } \
+} while (0)
\f
-/* Returns the proto-type if URL's protocol is supported, or
+/* Returns the protocol type if URL's protocol is supported, or
URLUNKNOWN if not. */
uerr_t
urlproto (const char *url)
{
int i;
- url += skip_url (url);
for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
if (!strncasecmp (url, sup_protos[i].name, strlen (sup_protos[i].name)))
return sup_protos[i].ind;
{
char **s;
- url += skip_url (url);
for (s = protostrings; *s; s++)
if (strncasecmp (url, *s, strlen (*s)) == 0)
return 1;
skip_uname (const char *url)
{
const char *p;
- for (p = url; *p && *p != '/'; p++)
- if (*p == '@')
- break;
+ const char *q = NULL;
+ for (p = url ; *p && *p != '/'; p++)
+ if (*p == '@') q = p;
/* If a `@' was found before the first occurrence of `/', skip
it. */
- if (*p == '@')
- return p - url + 1;
+ if (q != NULL)
+ return q - url + 1;
else
return 0;
}
if (u->proxy)
freeurl (u->proxy, 1);
if (complete)
- free (u);
+ xfree (u);
return;
}
\f
uerr_t type;
DEBUGP (("parseurl (\"%s\") -> ", url));
- url += skip_url (url);
recognizable = has_proto (url);
if (strict && !recognizable)
return URLUNKNOWN;
u->proto = type = URLHTTP;
if (!u->port)
{
- int i;
- for (i = 0; i < ARRAY_SIZE (sup_protos); i++)
- if (sup_protos[i].ind == type)
+ int ind;
+ for (ind = 0; ind < ARRAY_SIZE (sup_protos); ind++)
+ if (sup_protos[ind].ind == type)
break;
- if (i == ARRAY_SIZE (sup_protos))
+ if (ind == ARRAY_SIZE (sup_protos))
return URLUNKNOWN;
- u->port = sup_protos[i].port;
+ u->port = sup_protos[ind].port;
}
/* Some delimiter troubles... */
if (url[i] == '/' && url[i - 1] != ':')
/* #### We don't handle type `d' correctly yet. */
if (!u->ftp_type || TOUPPER (u->ftp_type) == 'D')
u->ftp_type = 'I';
+ DEBUGP (("ftp_type %c -> ", u->ftp_type));
}
DEBUGP (("opath %s -> ", u->path));
/* Parse the username and password (if existing). */
DEBUGP (("ndir %s\n", u->dir));
/* Strip trailing `/'. */
l = strlen (u->dir);
- if (l && u->dir[l - 1] == '/')
+ if (l > 1 && u->dir[l - 1] == '/')
u->dir[l - 1] = '\0';
/* Re-create the path: */
abs_ftp = (u->proto == URLFTP && *u->dir == '/');
strcat (u->path, abs_ftp ? (u->dir + 1) : u->dir);
strcat (u->path, *u->dir ? "/" : "");
strcat (u->path, u->file);
- URL_CLEANSE (u->path);
+ ENCODE (u->path);
DEBUGP (("newpath: %s\n", u->path));
/* Create the clean URL. */
u->url = str_url (u, 0);
return URLOK;
}
\f
-/* Special versions of DOTP and DDOTP for parse_dir(). */
+/* Special versions of DOTP and DDOTP for parse_dir(). They work like
+ DOTP and DDOTP, but they also recognize `?' as end-of-string
+ delimiter. This is needed for correct handling of query
+ strings. */
#define PD_DOTP(x) ((*(x) == '.') && (!*((x) + 1) || *((x) + 1) == '?'))
#define PD_DDOTP(x) ((*(x) == '.') && (*(x) == '.') \
parse_uname (const char *url, char **user, char **passwd)
{
int l;
- const char *p, *col;
+ const char *p, *q, *col;
char **where;
*user = NULL;
*passwd = NULL;
- url += skip_url (url);
- /* Look for end of protocol string. */
+
+ /* Look for the end of the protocol string. */
l = skip_proto (url);
if (!l)
return URLUNKNOWN;
if (*p != '@')
return URLOK;
/* Else find the username and password. */
- for (p = col = url; *p != '@'; p++)
+ for (p = q = col = url; *p && *p != '/'; p++)
{
if (*p == ':' && !*user)
{
(*user)[p - url] = '\0';
col = p + 1;
}
+ if (*p == '@') q = p;
}
/* Decide whether you have only the username or both. */
where = *user ? passwd : user;
- *where = (char *)xmalloc (p - col + 1);
- memcpy (*where, col, p - col);
- (*where)[p - col] = '\0';
+ *where = (char *)xmalloc (q - col + 1);
+ memcpy (*where, col, q - col);
+ (*where)[q - col] = '\0';
return URLOK;
}
return '\0';
}
\f
-/* Return the URL as fine-formed string, with a proper protocol,
- optional port number, directory and optional user/password. If
- HIDE is non-zero, password will be hidden. The forbidden
- characters in the URL will be cleansed. */
+/* Return the URL as fine-formed string, with a proper protocol, optional port
+ number, directory and optional user/password. If `hide' is non-zero (as it
+ is when we're calling this on a URL we plan to print, but not when calling it
+ to canonicalize a URL for use within the program), password will be hidden.
+ The forbidden characters in the URL will be cleansed. */
char *
str_url (const struct urlinfo *u, int hide)
{
return NULL;
proto_name = sup_protos[i].name;
proto_default_port = sup_protos[i].port;
- host = CLEANDUP (u->host);
- dir = CLEANDUP (u->dir);
- file = CLEANDUP (u->file);
+ host = encode_string (u->host);
+ dir = encode_string (u->dir);
+ file = encode_string (u->file);
user = passwd = NULL;
if (u->user)
- user = CLEANDUP (u->user);
+ user = encode_string (u->user);
if (u->passwd)
{
- int i;
- passwd = CLEANDUP (u->passwd);
if (hide)
- for (i = 0; passwd[i]; i++)
- passwd[i] = 'x';
+ /* Don't output the password, or someone might see it over the user's
+ shoulder (or in saved wget output). Don't give away the number of
+ characters in the password, either, as we did in past versions of
+ this code, when we replaced the password characters with 'x's. */
+ passwd = xstrdup("<password>");
+ else
+ passwd = encode_string (u->passwd);
}
if (u->proto == URLFTP && *dir == '/')
{
tmp[1] = '2';
tmp[2] = 'F';
strcpy (tmp + 3, dir + 1);
- free (dir);
+ xfree (dir);
dir = tmp;
}
if (*dir)
res[l++] = '/';
strcpy (res + l, file);
- free (host);
- free (dir);
- free (file);
+ xfree (host);
+ xfree (dir);
+ xfree (file);
FREE_MAYBE (user);
FREE_MAYBE (passwd);
return res;
while (l)
{
urlpos *next = l->next;
- free (l->url);
+ xfree (l->url);
FREE_MAYBE (l->local_name);
- free (l);
+ xfree (l);
l = next;
}
}
{
if (S_ISDIR (st.st_mode))
{
- free (t);
+ xfree (t);
return 0;
}
else
res = make_directory (t);
if (res != 0)
logprintf (LOG_NOTQUIET, "%s: %s", t, strerror (errno));
- free (t);
+ xfree (t);
return res;
}
if (opt.add_hostdir && !opt.simple_check)
{
char *nhost = realhost (host);
- free (host);
+ xfree (host);
host = nhost;
}
/* Add dir_prefix and hostname (if required) to the beginning of
else
dirpref = "";
}
- free (host);
+ xfree (host);
/* If there is a prefix, prepend it. */
if (*dirpref)
sprintf (newdir, "%s%s%s", dirpref, *dir == '/' ? "" : "/", dir);
dir = newdir;
}
- dir = xstrdup (dir);
- URL_CLEANSE (dir);
+ dir = encode_string (dir);
l = strlen (dir);
if (l && dir[l - 1] == '/')
dir[l - 1] = '\0';
/* Finally, construct the full name. */
res = (char *)xmalloc (strlen (dir) + 1 + strlen (file) + 1);
sprintf (res, "%s%s%s", dir, *dir ? "/" : "", file);
- free (dir);
+ xfree (dir);
return res;
}
char *nfile = (char *)xmalloc (strlen (opt.dir_prefix)
+ 1 + strlen (file) + 1);
sprintf (nfile, "%s/%s", opt.dir_prefix, file);
- free (file);
+ xfree (file);
file = nfile;
}
}
/* Find a unique name. */
name = unique_name (file);
- free (file);
+ xfree (file);
return name;
}
return NULL;
}
-/* Construct a URL by concatenating an absolute URL and a path, which
- may or may not be absolute. This tries to behave "reasonably" in
- all foreseeable cases. It employs little specific knowledge about
- protocols or URL-specific stuff -- it just works on strings. */
+/* Resolve the result of "linking" a base URI (BASE) to a
+ link-specified URI (LINK).
+
+ Either of the URIs may be absolute or relative, complete with the
+ host name, or path only. This tries to behave "reasonably" in all
+ foreseeable cases. It employs little specific knowledge about
+ protocols or URL-specific stuff -- it just works on strings.
+
+ The parameters LINKLENGTH is useful if LINK is not zero-terminated.
+ See uri_merge for a gentler interface to this functionality.
+
+ #### This function should handle `./' and `../' so that the evil
+ path_simplify can go. */
static char *
-construct (const char *url, const char *sub, int subsize, int no_proto)
+uri_merge_1 (const char *base, const char *link, int linklength, int no_proto)
{
char *constr;
if (no_proto)
{
- const char *end = url + urlpath_length (url);
+ const char *end = base + urlpath_length (base);
- if (*sub != '/')
+ if (*link != '/')
{
- /* SUB is a relative URL: we need to replace everything
- after last slash (possibly empty) with SUB.
+ /* LINK is a relative URL: we need to replace everything
+ after last slash (possibly empty) with LINK.
- So, if URL is "whatever/foo/bar", and SUB is "qux/xyzzy",
+ So, if BASE is "whatever/foo/bar", and LINK is "qux/xyzzy",
our result should be "whatever/foo/qux/xyzzy". */
int need_explicit_slash = 0;
int span;
const char *start_insert;
- const char *last_slash = find_last_char (url, end, '/'); /* the last slash. */
+ const char *last_slash = find_last_char (base, end, '/');
if (!last_slash)
{
- /* No slash found at all. Append SUB to what we have,
+ /* No slash found at all. Append LINK to what we have,
but we'll need a slash as a separator.
- Example: if url == "foo" and sub == "qux/xyzzy", then
- we cannot just append sub to url, because we'd get
+ Example: if base == "foo" and link == "qux/xyzzy", then
+ we cannot just append link to base, because we'd get
"fooqux/xyzzy", whereas what we want is
"foo/qux/xyzzy".
start_insert = end + 1;
need_explicit_slash = 1;
}
- else if (last_slash && last_slash != url && *(last_slash - 1) == '/')
+ else if (last_slash && last_slash != base && *(last_slash - 1) == '/')
{
/* example: http://host" */
/* ^ */
start_insert = last_slash + 1;
}
- span = start_insert - url;
- constr = (char *)xmalloc (span + subsize + 1);
+ span = start_insert - base;
+ constr = (char *)xmalloc (span + linklength + 1);
if (span)
- memcpy (constr, url, span);
+ memcpy (constr, base, span);
if (need_explicit_slash)
constr[span - 1] = '/';
- if (subsize)
- memcpy (constr + span, sub, subsize);
- constr[span + subsize] = '\0';
+ if (linklength)
+ memcpy (constr + span, link, linklength);
+ constr[span + linklength] = '\0';
}
- else /* *sub == `/' */
+ else /* *link == `/' */
{
- /* SUB is an absolute path: we need to replace everything
- after (and including) the FIRST slash with SUB.
+ /* LINK is an absolute path: we need to replace everything
+ after (and including) the FIRST slash with LINK.
- So, if URL is "http://host/whatever/foo/bar", and SUB is
+ So, if BASE is "http://host/whatever/foo/bar", and LINK is
"/qux/xyzzy", our result should be
"http://host/qux/xyzzy". */
int span;
const char *slash;
const char *start_insert = NULL; /* for gcc to shut up. */
- const char *pos = url;
+ const char *pos = base;
int seen_slash_slash = 0;
/* We're looking for the first slash, but want to ignore
double slash. */
/* At this point, SLASH is the location of the first / after
"//", or the first slash altogether. START_INSERT is the
- pointer to the location where SUB will be inserted. When
- examining the last two examples, keep in mind that SUB
+ pointer to the location where LINK will be inserted. When
+ examining the last two examples, keep in mind that LINK
begins with '/'. */
if (!slash && !seen_slash_slash)
/* example: "foo" */
/* ^ */
- start_insert = url;
+ start_insert = base;
else if (!slash && seen_slash_slash)
/* example: "http://foo" */
/* ^ */
else if (slash && !seen_slash_slash)
/* example: "foo/bar" */
/* ^ */
- start_insert = url;
+ start_insert = base;
else if (slash && seen_slash_slash)
/* example: "http://something/" */
/* ^ */
start_insert = slash;
- span = start_insert - url;
- constr = (char *)xmalloc (span + subsize + 1);
+ span = start_insert - base;
+ constr = (char *)xmalloc (span + linklength + 1);
if (span)
- memcpy (constr, url, span);
- if (subsize)
- memcpy (constr + span, sub, subsize);
- constr[span + subsize] = '\0';
+ memcpy (constr, base, span);
+ if (linklength)
+ memcpy (constr + span, link, linklength);
+ constr[span + linklength] = '\0';
}
}
else /* !no_proto */
{
- constr = strdupdelim (sub, sub + subsize);
+ constr = strdupdelim (link, link + linklength);
}
return constr;
}
-/* Like the function above, but with a saner caller interface. */
+/* Merge BASE with LINK and return the resulting URI. This is an
+ interface to uri_merge_1 that assumes that LINK is a
+ zero-terminated string. */
char *
-url_concat (const char *base_url, const char *new_url)
+uri_merge (const char *base, const char *link)
{
- return construct (base_url, new_url, strlen (new_url), !has_proto (new_url));
+ return uri_merge_1 (base, link, strlen (link), !has_proto (link));
}
\f
/* Optimize URL by host, destructively replacing u->host with realhost
{
/* Find the "true" host. */
char *host = realhost (u->host);
- free (u->host);
+ xfree (u->host);
u->host = host;
assert (u->dir != NULL); /* the URL must have been parsed */
/* Refresh the printed representation. */
- free (u->url);
+ xfree (u->url);
u->url = str_url (u, 0);
}
-
-/* This beautiful kludge is fortunately not needed, as I've made
- parse_dir do the (almost) right thing, so that a query can never
- become a part of directory. */
-#if 0
-/* Call path_simplify, but make sure that the part after the
- question-mark, if any, is not destroyed by path_simplify's
- "optimizations". */
-void
-path_simplify_with_kludge (char *path)
-{
- char *query = strchr (path, '?');
- if (query)
- /* path_simplify also works destructively, so we also have the
- license to write. */
- *query = '\0';
- path_simplify (path);
- if (query)
- {
- char *newend = path + strlen (path);
- *query = '?';
- if (newend != query)
- memmove (newend, query, strlen (query) + 1);
- }
-}
-#endif
\f
/* Returns proxy host address, in accordance with PROTO. */
char *
return opt.http_proxy ? opt.http_proxy : getenv ("http_proxy");
else if (proto == URLFTP)
return opt.ftp_proxy ? opt.ftp_proxy : getenv ("ftp_proxy");
+#ifdef HAVE_SSL
+ else if (proto == URLHTTPS)
+ return opt.https_proxy ? opt.https_proxy : getenv ("https_proxy");
+#endif /* HAVE_SSL */
else
return NULL;
}
}
\f
static void write_backup_file PARAMS ((const char *, downloaded_file_t));
+static void replace_attr PARAMS ((const char **, int, FILE *, const char *));
/* Change the links in an HTML document. Accepts a structure that
defines the positions of all the links. */
{
struct file_memory *fm;
FILE *fp;
- char *p;
+ const char *p;
downloaded_file_t downloaded_file_return;
+ logprintf (LOG_VERBOSE, _("Converting %s... "), file);
+
{
/* First we do a "dry run": go through the list L and see whether
any URL needs to be converted in the first place. If not, just
int count = 0;
urlpos *dry = l;
for (dry = l; dry; dry = dry->next)
- if (dry->flags & (UABS2REL | UREL2ABS))
+ if (dry->convert != CO_NOCONVERT)
++count;
if (!count)
{
- logprintf (LOG_VERBOSE, _("Nothing to do while converting %s.\n"),
- file);
+ logputs (LOG_VERBOSE, _("nothing to do.\n"));
return;
}
}
- logprintf (LOG_VERBOSE, _("Converting %s... "), file);
-
fm = read_file (file);
if (!fm)
{
for (; l; l = l->next)
{
char *url_start = fm->content + l->pos;
+
if (l->pos >= fm->length)
{
DEBUGP (("Something strange is going on. Please investigate."));
break;
}
/* If the URL is not to be converted, skip it. */
- if (!(l->flags & (UABS2REL | UREL2ABS)))
+ if (l->convert == CO_NOCONVERT)
{
- DEBUGP (("Skipping %s at position %d (flags %d).\n", l->url,
- l->pos, l->flags));
+ DEBUGP (("Skipping %s at position %d.\n", l->url, l->pos));
continue;
}
quote, to the outfile. */
fwrite (p, 1, url_start - p, fp);
p = url_start;
- if (l->flags & UABS2REL)
+ if (l->convert == CO_CONVERT_TO_RELATIVE)
{
/* Convert absolute URL to relative. */
char *newname = construct_relative (file, l->local_name);
- putc (*p, fp); /* quoting char */
- fputs (newname, fp);
- p += l->size - 1;
- putc (*p, fp); /* close quote */
- ++p;
- DEBUGP (("ABS2REL: %s to %s at position %d in %s.\n",
+ char *quoted_newname = html_quote_string (newname);
+ replace_attr (&p, l->size, fp, quoted_newname);
+ DEBUGP (("TO_RELATIVE: %s to %s at position %d in %s.\n",
l->url, newname, l->pos, file));
- free (newname);
+ xfree (newname);
+ xfree (quoted_newname);
}
- else if (l->flags & UREL2ABS)
+ else if (l->convert == CO_CONVERT_TO_COMPLETE)
{
/* Convert the link to absolute URL. */
char *newlink = l->url;
- putc (*p, fp); /* quoting char */
- fputs (newlink, fp);
- p += l->size - 1;
- putc (*p, fp); /* close quote */
- ++p;
- DEBUGP (("REL2ABS: <something> to %s at position %d in %s.\n",
+ char *quoted_newlink = html_quote_string (newlink);
+ replace_attr (&p, l->size, fp, quoted_newlink);
+ DEBUGP (("TO_COMPLETE: <something> to %s at position %d in %s.\n",
newlink, l->pos, file));
+ xfree (quoted_newlink);
}
}
/* Output the rest of the file. */
rather than making a copy of the string... Another note is that I
thought I could just add a field to the urlpos structure saying
that we'd written a .orig file for this URL, but that didn't work,
- so I had to make this separate list. */
+ so I had to make this separate list.
+ -- Dan Harkless <wget@harkless.org>
+
+ This [adding a field to the urlpos structure] didn't work
+ because convert_file() is called twice: once after all its
+ sublinks have been retrieved in recursive_retrieve(), and
+ once at the end of the day in convert_all_links(). The
+ original linked list collected in recursive_retrieve() is
+ lost after the first invocation of convert_links(), and
+ convert_all_links() makes a new one (it calls get_urls_html()
+ for each file it covers.) That's why your first approach didn't
+ work. The way to make it work is perhaps to make this flag a
+ field in the `urls_html' list.
+ -- Hrvoje Niksic <hniksic@arsdigita.com>
+ */
converted_file_ptr = xmalloc(sizeof(*converted_file_ptr));
converted_file_ptr->string = xstrdup(file); /* die on out-of-mem. */
converted_file_ptr->next = converted_files;
}
}
+static int find_fragment PARAMS ((const char *, int, const char **,
+ const char **));
+
+static void
+replace_attr (const char **pp, int raw_size, FILE *fp, const char *new_str)
+{
+ const char *p = *pp;
+ int quote_flag = 0;
+ int size = raw_size;
+ char quote_char = '\"';
+ const char *frag_beg, *frag_end;
+
+ /* Structure of our string is:
+ "...old-contents..."
+ <--- l->size ---> (with quotes)
+ OR:
+ ...old-contents...
+ <--- l->size --> (no quotes) */
+
+ if (*p == '\"' || *p == '\'')
+ {
+ quote_char = *p;
+ quote_flag = 1;
+ ++p;
+ size -= 2; /* disregard opening and closing quote */
+ }
+ putc (quote_char, fp);
+ fputs (new_str, fp);
+
+ /* Look for fragment identifier, if any. */
+ if (find_fragment (p, size, &frag_beg, &frag_end))
+ fwrite (frag_beg, 1, frag_end - frag_beg, fp);
+ p += size;
+ if (quote_flag)
+ ++p;
+ putc (quote_char, fp);
+ *pp = p;
+}
+
+/* Find the first occurrence of '#' in [BEG, BEG+SIZE) that is not
+ preceded by '&'. If the character is not found, return zero. If
+ the character is found, return 1 and set BP and EP to point to the
+ beginning and end of the region.
+
+ This is used for finding the fragment indentifiers in URLs. */
+
+static int
+find_fragment (const char *beg, int size, const char **bp, const char **ep)
+{
+ const char *end = beg + size;
+ int saw_amp = 0;
+ for (; beg < end; beg++)
+ {
+ switch (*beg)
+ {
+ case '&':
+ saw_amp = 1;
+ break;
+ case '#':
+ if (!saw_amp)
+ {
+ *bp = beg;
+ *ep = end;
+ return 1;
+ }
+ /* fallthrough */
+ default:
+ saw_amp = 0;
+ }
+ }
+ return 0;
+}
+
+typedef struct _downloaded_file_list {
+ char* file;
+ downloaded_file_t download_type;
+ struct _downloaded_file_list* next;
+} downloaded_file_list;
+
+static downloaded_file_list *downloaded_files;
+
/* Remembers which files have been downloaded. In the standard case, should be
called with mode == FILE_DOWNLOADED_NORMALLY for each file we actually
download successfully (i.e. not for ones we have failures on or that we skip
downloaded_file_t
downloaded_file (downloaded_file_t mode, const char* file)
{
- typedef struct _downloaded_file_list
- {
- char* file;
- downloaded_file_t download_type;
- struct _downloaded_file_list* next;
- } downloaded_file_list;
-
boolean found_file = FALSE;
- static downloaded_file_list* downloaded_files = NULL;
downloaded_file_list* rover = downloaded_files;
while (rover != NULL)
return FILE_NOT_ALREADY_DOWNLOADED;
}
}
+
+void
+downloaded_files_free (void)
+{
+ downloaded_file_list* rover = downloaded_files;
+ while (rover)
+ {
+ downloaded_file_list *next = rover->next;
+ xfree (rover->file);
+ xfree (rover);
+ rover = next;
+ }
+}
\f
/* Initialization of static stuff. */
void