static struct hash_table *dl_file_url_map;
static struct hash_table *dl_url_file_map;
-/* List of HTML files downloaded in this Wget run. Used for link
- conversion after Wget is done. This list should only be traversed
- in order. If you need to check whether a file has been downloaded,
- use a hash table, e.g. dl_file_url_map. */
-static slist *downloaded_html_files;
+/* List of HTML files downloaded in this Wget run, used for link
+ conversion after Wget is done. The list and the set contain the
+ same information, except the list maintains the order. Perhaps I
+ should get rid of the list, it's there for historical reasons. */
+static slist *downloaded_html_list;
+static struct hash_table *downloaded_html_set;
+
+static void register_delete_file PARAMS ((const char *));
\f
/* Functions for maintaining the URL queue. */
return 1;
}
\f
-static int descend_url_p PARAMS ((const struct urlpos *, struct url *, int,
- struct url *, struct hash_table *));
+static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
+ struct url *, struct hash_table *));
static int descend_redirect_p PARAMS ((const char *, const char *, int,
struct url *, struct hash_table *));
uerr_t status = RETROK;
/* The queue of URLs we need to load. */
- struct url_queue *queue = url_queue_new ();
+ struct url_queue *queue;
/* The URLs we do not wish to enqueue, because they are already in
the queue, but haven't been downloaded yet. */
- struct hash_table *blacklist = make_string_hash_table (0);
+ struct hash_table *blacklist;
- /* We'll need various components of this, so better get it over with
- now. */
- struct url *start_url_parsed = url_parse (start_url, NULL);
+ int up_error_code;
+ struct url *start_url_parsed = url_parse (start_url, &up_error_code);
- url_enqueue (queue, xstrdup (start_url), NULL, 0);
- string_set_add (blacklist, start_url);
+ if (!start_url_parsed)
+ {
+ logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
+ url_error (up_error_code));
+ return URLERROR;
+ }
+
+ queue = url_queue_new ();
+ blacklist = make_string_hash_table (0);
+
+ /* Enqueue the starting URL. Use start_url_parsed->url rather than
+ just URL so we enqueue the canonical form of the URL. */
+ url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0);
+ string_set_add (blacklist, start_url_parsed->url);
while (1)
{
if (downloaded_exceeds_quota ())
break;
-
if (status == FWRITEERR)
break;
- /* Get the next URL from the queue. */
+ /* Get the next URL from the queue... */
if (!url_dequeue (queue,
(const char **)&url, (const char **)&referer,
&depth))
break;
- /* And download it. */
+ /* ...and download it. Note that this download is in most cases
+ unconditional, as download_child_p already makes sure a file
+ doesn't get enqueued twice -- and yet this check is here, and
+ not in download_child_p. This is so that if you run `wget -r
+ URL1 URL2', and a random URL is encountered once under URL1
+ and again under URL2, but at a different (possibly smaller)
+ depth, we want the URL's children to be taken into account
+ the second time. */
+ if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
+ {
+ file = xstrdup (hash_table_get (dl_url_file_map, url));
- {
- int dt = 0;
- char *redirected = NULL;
- int oldrec = opt.recursive;
+ DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
+ url, file));
- opt.recursive = 0;
- status = retrieve_url (url, &file, &redirected, NULL, &dt);
- opt.recursive = oldrec;
+ if (string_set_contains (downloaded_html_set, file))
+ descend = 1;
+ }
+ else
+ {
+ int dt = 0;
+ char *redirected = NULL;
+ int oldrec = opt.recursive;
- if (file && status == RETROK
- && (dt & RETROKF) && (dt & TEXTHTML))
- descend = 1;
+ opt.recursive = 0;
+ status = retrieve_url (url, &file, &redirected, referer, &dt);
+ opt.recursive = oldrec;
- if (redirected)
- {
- /* We have been redirected, possibly to another host, or
- different path, or wherever. Check whether we really
- want to follow it. */
- if (descend)
- {
- if (!descend_redirect_p (redirected, url, depth,
- start_url_parsed, blacklist))
- descend = 0;
- else
- /* Make sure that the old pre-redirect form gets
- blacklisted. */
- string_set_add (blacklist, url);
- }
-
- xfree (url);
- url = redirected;
- }
- }
+ if (file && status == RETROK
+ && (dt & RETROKF) && (dt & TEXTHTML))
+ descend = 1;
+
+ if (redirected)
+ {
+ /* We have been redirected, possibly to another host, or
+ different path, or wherever. Check whether we really
+ want to follow it. */
+ if (descend)
+ {
+ if (!descend_redirect_p (redirected, url, depth,
+ start_url_parsed, blacklist))
+ descend = 0;
+ else
+ /* Make sure that the old pre-redirect form gets
+ blacklisted. */
+ string_set_add (blacklist, url);
+ }
+
+ xfree (url);
+ url = redirected;
+ }
+ }
if (descend
&& depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
{
- if (opt.page_requisites && depth == opt.reclevel)
- /* When -p is specified, we can do one more partial
- recursion from the "leaf nodes" on the HTML document
- tree. The recursion is partial in that we won't
- traverse any <A> or <AREA> tags, nor any <LINK> tags
- except for <LINK REL="stylesheet">. */
- dash_p_leaf_HTML = TRUE;
+ if (opt.page_requisites
+ && (depth == opt.reclevel || depth == opt.reclevel + 1))
+ {
+ /* When -p is specified, we are allowed to exceed the
+ maximum depth, but only for the "inline" links,
+ i.e. those that are needed to display the page.
+ Originally this could exceed the depth at most by
+ one, but we allow one more level so that the leaf
+ pages that contain frames can be loaded
+ correctly. */
+ dash_p_leaf_HTML = TRUE;
+ }
else
{
/* Either -p wasn't specified or it was and we've
- already gone the one extra (pseudo-)level that it
+ already spent the two extra (pseudo-)levels that it
affords us, so we need to bail out. */
DEBUGP (("Not descending further; at depth %d, max. %d.\n",
depth, opt.reclevel));
if (descend)
{
int meta_disallow_follow = 0;
- struct urlpos *children = get_urls_html (file, url, dash_p_leaf_HTML,
- &meta_disallow_follow);
+ struct urlpos *children
+ = get_urls_html (file, url, &meta_disallow_follow);
if (opt.use_robots && meta_disallow_follow)
{
{
if (child->ignore_when_downloading)
continue;
- if (descend_url_p (child, url_parsed, depth, start_url_parsed,
- blacklist))
+ if (dash_p_leaf_HTML && !child->link_inline_p)
+ continue;
+ if (download_child_p (child, url_parsed, depth, start_url_parsed,
+ blacklist))
{
url_enqueue (queue, xstrdup (child->url->url),
xstrdup (url), depth + 1);
file);
if (unlink (file))
logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
+ register_delete_file (file);
}
xfree (url);
will help if those URLs are encountered many times. */
static int
-descend_url_p (const struct urlpos *upos, struct url *parent, int depth,
- struct url *start_url_parsed, struct hash_table *blacklist)
+download_child_p (const struct urlpos *upos, struct url *parent, int depth,
+ struct url *start_url_parsed, struct hash_table *blacklist)
{
struct url *u = upos->url;
const char *url = u->url;
/* 1. Schemes other than HTTP are normally not recursed into. */
if (u->scheme != SCHEME_HTTP
+#ifdef HAVE_SSL
+ && u->scheme != SCHEME_HTTPS
+#endif
&& !(u->scheme == SCHEME_FTP && opt.follow_ftp))
{
DEBUGP (("Not following non-HTTP schemes.\n"));
/* 2. If it is an absolute link and they are not followed, throw it
out. */
- if (u->scheme == SCHEME_HTTP)
+ if (u->scheme == SCHEME_HTTP
+#ifdef HAVE_SSL
+ || u->scheme == SCHEME_HTTPS
+#endif
+ )
if (opt.relative_only && !upos->link_relative_p)
{
DEBUGP (("It doesn't really look like a relative link.\n"));
/* 4. Check for parent directory.
If we descended to a different host or changed the scheme, ignore
- opt.no_parent. Also ignore it for -p leaf retrievals. */
+ opt.no_parent. Also ignore it for documents needed to display
+ the parent page when in -p mode. */
if (opt.no_parent
- && u->scheme == parent->scheme
- && 0 == strcasecmp (u->host, parent->host)
- && u->port == parent->port)
+ && u->scheme == start_url_parsed->scheme
+ && 0 == strcasecmp (u->host, start_url_parsed->host)
+ && u->port == start_url_parsed->port
+ && !(opt.page_requisites && upos->link_inline_p))
{
- if (!frontcmp (parent->dir, u->dir))
+ if (!frontcmp (start_url_parsed->dir, u->dir))
{
- DEBUGP (("Trying to escape the root directory with no_parent in effect.\n"));
+ DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
+ u->dir, start_url_parsed->dir));
goto out;
}
}
/* 6. */
{
- char *suf;
/* Check for acceptance/rejection rules. We ignore these rules
for HTML documents because they might lead to other files which
need to be downloaded. Of course, we don't know which
* u->file is not "" (i.e. it is not a directory)
and either:
+ there is no file suffix,
- + or there is a suffix, but is not "html" or "htm",
+ + or there is a suffix, but is not "html" or "htm" or similar,
+ both:
- recursion is not infinite,
- and we are at its very end. */
if (u->file[0] != '\0'
- && ((suf = suffix (url)) == NULL
- || (0 != strcmp (suf, "html") && 0 != strcmp (suf, "htm"))
- || (opt.reclevel == INFINITE_RECURSION && depth >= opt.reclevel)))
+ && (!has_html_suffix_p (url)
+ || (opt.reclevel != INFINITE_RECURSION && depth >= opt.reclevel)))
{
if (!acceptable (u->file))
{
}
/* 8. */
- if (opt.use_robots && u->scheme == SCHEME_HTTP)
+ if (opt.use_robots && (u->scheme == SCHEME_HTTP
+#ifdef HAVE_SSL
+ || u->scheme == SCHEME_HTTPS
+#endif
+ )
+ )
{
struct robot_specs *specs = res_get_specs (u->host, u->port);
if (!specs)
return 0;
}
-/* This function determines whether we should descend the children of
- the URL whose download resulted in a redirection, possibly to
- another host, etc. It is needed very rarely, and thus it is merely
- a simple-minded wrapper around descend_url_p. */
+/* This function determines whether we will consider downloading the
+ children of a URL whose download resulted in a redirection,
+ possibly to another host, etc. It is needed very rarely, and thus
+ it is merely a simple-minded wrapper around download_child_p. */
static int
descend_redirect_p (const char *redirected, const char *original, int depth,
memset (upos, 0, sizeof (*upos));
upos->url = new_parsed;
- success = descend_url_p (upos, orig_parsed, depth,
- start_url_parsed, blacklist);
+ success = download_child_p (upos, orig_parsed, depth,
+ start_url_parsed, blacklist);
url_free (orig_parsed);
url_free (new_parsed);
}
\f
-/* Register that URL has been successfully downloaded to FILE. */
+#define ENSURE_TABLES_EXIST do { \
+ if (!dl_file_url_map) \
+ dl_file_url_map = make_string_hash_table (0); \
+ if (!dl_url_file_map) \
+ dl_url_file_map = make_string_hash_table (0); \
+} while (0)
+
+/* Return 1 if S1 and S2 are the same, except for "/index.html". The
+ three cases in which it returns one are (substitute any substring
+ for "foo"):
+
+ m("foo/index.html", "foo/") ==> 1
+ m("foo/", "foo/index.html") ==> 1
+ m("foo", "foo/index.html") ==> 1
+ m("foo", "foo/" ==> 1
+ m("foo", "foo") ==> 1 */
+
+static int
+match_except_index (const char *s1, const char *s2)
+{
+ int i;
+ const char *lng;
+
+ /* Skip common substring. */
+ for (i = 0; *s1 && *s2 && *s1 == *s2; s1++, s2++, i++)
+ ;
+ if (i == 0)
+ /* Strings differ at the very beginning -- bail out. We need to
+ check this explicitly to avoid `lng - 1' reading outside the
+ array. */
+ return 0;
+
+ if (!*s1 && !*s2)
+ /* Both strings hit EOF -- strings are equal. */
+ return 1;
+ else if (*s1 && *s2)
+ /* Strings are randomly different, e.g. "/foo/bar" and "/foo/qux". */
+ return 0;
+ else if (*s1)
+ /* S1 is the longer one. */
+ lng = s1;
+ else
+ /* S2 is the longer one. */
+ lng = s2;
+
+ /* foo */ /* foo/ */
+ /* foo/index.html */ /* or */ /* foo/index.html */
+ /* ^ */ /* ^ */
+
+ if (*lng != '/')
+ /* The right-hand case. */
+ --lng;
+
+ if (*lng == '/' && *(lng + 1) == '\0')
+ /* foo */
+ /* foo/ */
+ return 1;
+
+ return 0 == strcmp (lng, "/index.html");
+}
+
+static int
+dissociate_urls_from_file_mapper (void *key, void *value, void *arg)
+{
+ char *mapping_url = (char *)key;
+ char *mapping_file = (char *)value;
+ char *file = (char *)arg;
+
+ if (0 == strcmp (mapping_file, file))
+ {
+ hash_table_remove (dl_url_file_map, mapping_url);
+ xfree (mapping_url);
+ xfree (mapping_file);
+ }
+
+ /* Continue mapping. */
+ return 0;
+}
+
+/* Remove all associations from various URLs to FILE from dl_url_file_map. */
+
+static void
+dissociate_urls_from_file (const char *file)
+{
+ hash_table_map (dl_url_file_map, dissociate_urls_from_file_mapper,
+ (char *)file);
+}
+
+/* Register that URL has been successfully downloaded to FILE. This
+ is used by the link conversion code to convert references to URLs
+ to references to local files. It is also being used to check if a
+ URL has already been downloaded. */
void
register_download (const char *url, const char *file)
{
- if (!opt.convert_links)
- return;
- if (!dl_file_url_map)
- dl_file_url_map = make_string_hash_table (0);
- if (!dl_url_file_map)
- dl_url_file_map = make_string_hash_table (0);
-
- if (!hash_table_contains (dl_file_url_map, file))
- hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
- if (!hash_table_contains (dl_url_file_map, url))
- hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
+ char *old_file, *old_url;
+
+ ENSURE_TABLES_EXIST;
+
+ /* With some forms of retrieval, it is possible, although not likely
+ or particularly desirable. If both are downloaded, the second
+ download will override the first one. When that happens,
+ dissociate the old file name from the URL. */
+
+ if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
+ {
+ if (0 == strcmp (url, old_url))
+ /* We have somehow managed to download the same URL twice.
+ Nothing to do. */
+ return;
+
+ if (match_except_index (url, old_url)
+ && !hash_table_contains (dl_url_file_map, url))
+ /* The two URLs differ only in the "index.html" ending. For
+ example, one is "http://www.server.com/", and the other is
+ "http://www.server.com/index.html". Don't remove the old
+ one, just add the new one as a non-canonical entry. */
+ goto url_only;
+
+ hash_table_remove (dl_file_url_map, file);
+ xfree (old_file);
+ xfree (old_url);
+
+ /* Remove all the URLs that point to this file. Yes, there can
+ be more than one such URL, because we store redirections as
+ multiple entries in dl_url_file_map. For example, if URL1
+ redirects to URL2 which gets downloaded to FILE, we map both
+ URL1 and URL2 to FILE in dl_url_file_map. (dl_file_url_map
+ only points to URL2.) When another URL gets loaded to FILE,
+ we want both URL1 and URL2 dissociated from it.
+
+ This is a relatively expensive operation because it performs
+ a linear search of the whole hash table, but it should be
+ called very rarely, only when two URLs resolve to the same
+ file name, *and* the "<file>.1" extensions are turned off.
+ In other words, almost never. */
+ dissociate_urls_from_file (file);
+ }
+
+ hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
+
+ url_only:
+ /* A URL->FILE mapping is not possible without a FILE->URL mapping.
+ If the latter were present, it should have been removed by the
+ above `if'. So we could write:
+
+ assert (!hash_table_contains (dl_url_file_map, url));
+
+ The above is correct when running in recursive mode where the
+ same URL always resolves to the same file. But if you do
+ something like:
+
+ wget URL URL
+
+ then the first URL will resolve to "FILE", and the other to
+ "FILE.1". In that case, FILE.1 will not be found in
+ dl_file_url_map, but URL will still point to FILE in
+ dl_url_file_map. */
+ if (hash_table_get_pair (dl_url_file_map, url, &old_url, &old_file))
+ {
+ hash_table_remove (dl_url_file_map, url);
+ xfree (old_url);
+ xfree (old_file);
+ }
+
+ hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
}
/* Register that FROM has been redirected to TO. This assumes that TO
{
char *file;
- if (!opt.convert_links)
- return;
+ ENSURE_TABLES_EXIST;
file = hash_table_get (dl_url_file_map, to);
assert (file != NULL);
hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file));
}
-/* Register that URL corresponds to the HTML file FILE. */
+/* Register that the file has been deleted. */
+
+static void
+register_delete_file (const char *file)
+{
+ char *old_url, *old_file;
+
+ ENSURE_TABLES_EXIST;
+
+ if (!hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
+ return;
+
+ hash_table_remove (dl_file_url_map, file);
+ xfree (old_file);
+ xfree (old_url);
+ dissociate_urls_from_file (file);
+}
+
+/* Register that FILE is an HTML file that has been downloaded. */
void
register_html (const char *url, const char *file)
{
- if (!opt.convert_links)
+ if (!downloaded_html_set)
+ downloaded_html_set = make_string_hash_table (0);
+ else if (hash_table_contains (downloaded_html_set, file))
return;
- downloaded_html_files = slist_prepend (downloaded_html_files, file);
+
+ /* The set and the list should use the same copy of FILE, but the
+ slist interface insists on strduping the string it gets. Oh
+ well. */
+ string_set_add (downloaded_html_set, file);
+ downloaded_html_list = slist_prepend (downloaded_html_list, file);
}
/* This function is called when the retrieval is done to convert the
convert_all_links (void)
{
slist *html;
- struct wget_timer *timer;
long msecs;
int file_count = 0;
- timer = wtimer_new ();
+ struct wget_timer *timer = wtimer_new ();
/* Destructively reverse downloaded_html_files to get it in the right order.
recursive_retrieve() used slist_prepend() consistently. */
- downloaded_html_files = slist_nreverse (downloaded_html_files);
+ downloaded_html_list = slist_nreverse (downloaded_html_list);
- for (html = downloaded_html_files; html; html = html->next)
+ for (html = downloaded_html_list; html; html = html->next)
{
struct urlpos *urls, *cur_url;
char *url;
-
- DEBUGP (("Rescanning %s\n", html->string));
+ char *file = html->string;
/* Determine the URL of the HTML file. get_urls_html will need
it. */
- url = hash_table_get (dl_file_url_map, html->string);
- if (url)
- DEBUGP (("It should correspond to %s.\n", url));
- else
- DEBUGP (("I cannot find the corresponding URL.\n"));
+ url = hash_table_get (dl_file_url_map, file);
+ if (!url)
+ {
+ DEBUGP (("Apparently %s has been removed.\n", file));
+ continue;
+ }
+
+ DEBUGP (("Scanning %s (from %s)\n", file, url));
/* Parse the HTML file... */
- urls = get_urls_html (html->string, url, FALSE, NULL);
+ urls = get_urls_html (file, url, NULL);
/* We don't respect meta_disallow_follow here because, even if
the file is not followed, we might still want to convert the
a URL was downloaded. Downloaded URLs will be converted
ABS2REL, whereas non-downloaded will be converted REL2ABS. */
local_name = hash_table_get (dl_url_file_map, u->url);
- if (local_name)
- DEBUGP (("%s marked for conversion, local %s\n",
- u->url, local_name));
/* Decide on the conversion type. */
if (local_name)
`--cut-dirs', etc.) */
cur_url->convert = CO_CONVERT_TO_RELATIVE;
cur_url->local_name = xstrdup (local_name);
+ DEBUGP (("will convert url %s to local %s\n", u->url, local_name));
}
else
{
if (!cur_url->link_complete_p)
cur_url->convert = CO_CONVERT_TO_COMPLETE;
cur_url->local_name = NULL;
+ DEBUGP (("will convert url %s to complete\n", u->url));
}
}
/* Convert the links in the file. */
- convert_links (html->string, urls);
+ convert_links (file, urls);
++file_count;
/* Free the data. */
hash_table_destroy (dl_url_file_map);
dl_url_file_map = NULL;
}
- slist_free (downloaded_html_files);
- downloaded_html_files = NULL;
+ if (downloaded_html_set)
+ string_set_free (downloaded_html_set);
+ slist_free (downloaded_html_list);
+ downloaded_html_list = NULL;
}