static struct hash_table *dl_file_url_map;
static struct hash_table *dl_url_file_map;
-/* List of HTML files downloaded in this Wget run. Used for link
- conversion after Wget is done. This list should only be traversed
- in order. If you need to check whether a file has been downloaded,
- use a hash table, e.g. dl_file_url_map. */
-static slist *downloaded_html_files;
+/* List of HTML files downloaded in this Wget run, used for link
+ conversion after Wget is done. The list and the set contain the
+ same information, except the list maintains the order. Perhaps I
+ should get rid of the list, it's there for historical reasons. */
+static slist *downloaded_html_list;
+static struct hash_table *downloaded_html_set;
static void register_delete_file PARAMS ((const char *));
\f
uerr_t status = RETROK;
/* The queue of URLs we need to load. */
- struct url_queue *queue = url_queue_new ();
+ struct url_queue *queue;
/* The URLs we do not wish to enqueue, because they are already in
the queue, but haven't been downloaded yet. */
- struct hash_table *blacklist = make_string_hash_table (0);
+ struct hash_table *blacklist;
- /* We'll need various components of this, so better get it over with
- now. */
- struct url *start_url_parsed = url_parse (start_url, NULL);
+ int up_error_code;
+ struct url *start_url_parsed = url_parse (start_url, &up_error_code);
- url_enqueue (queue, xstrdup (start_url), NULL, 0);
- string_set_add (blacklist, start_url);
+ if (!start_url_parsed)
+ {
+ logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
+ url_error (up_error_code));
+ return URLERROR;
+ }
+
+ queue = url_queue_new ();
+ blacklist = make_string_hash_table (0);
+
+ /* Enqueue the starting URL. Use start_url_parsed->url rather than
+ just URL so we enqueue the canonical form of the URL. */
+ url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0);
+ string_set_add (blacklist, start_url_parsed->url);
while (1)
{
the second time. */
if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
{
+ file = xstrdup (hash_table_get (dl_url_file_map, url));
+
DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
- url, (char *)hash_table_get (dl_url_file_map, url)));
+ url, file));
+
+ if (string_set_contains (downloaded_html_set, file))
+ descend = 1;
}
else
{
int oldrec = opt.recursive;
opt.recursive = 0;
- status = retrieve_url (url, &file, &redirected, NULL, &dt);
+ status = retrieve_url (url, &file, &redirected, referer, &dt);
opt.recursive = oldrec;
if (file && status == RETROK
/* 1. Schemes other than HTTP are normally not recursed into. */
if (u->scheme != SCHEME_HTTP
+#ifdef HAVE_SSL
+ && u->scheme != SCHEME_HTTPS
+#endif
&& !(u->scheme == SCHEME_FTP && opt.follow_ftp))
{
DEBUGP (("Not following non-HTTP schemes.\n"));
/* 2. If it is an absolute link and they are not followed, throw it
out. */
- if (u->scheme == SCHEME_HTTP)
+ if (u->scheme == SCHEME_HTTP
+#ifdef HAVE_SSL
+ || u->scheme == SCHEME_HTTPS
+#endif
+ )
if (opt.relative_only && !upos->link_relative_p)
{
DEBUGP (("It doesn't really look like a relative link.\n"));
}
}
- /* 6. */
- {
- char *suf;
- /* Check for acceptance/rejection rules. We ignore these rules
- for HTML documents because they might lead to other files which
- need to be downloaded. Of course, we don't know which
- documents are HTML before downloading them, so we guess.
-
- A file is subject to acceptance/rejection rules if:
-
- * u->file is not "" (i.e. it is not a directory)
- and either:
- + there is no file suffix,
- + or there is a suffix, but is not "html" or "htm",
- + both:
- - recursion is not infinite,
- - and we are at its very end. */
-
- if (u->file[0] != '\0'
- && ((suf = suffix (url)) == NULL
- || (0 != strcmp (suf, "html") && 0 != strcmp (suf, "htm"))
- || (opt.reclevel != INFINITE_RECURSION && depth >= opt.reclevel)))
- {
- if (!acceptable (u->file))
- {
- DEBUGP (("%s (%s) does not match acc/rej rules.\n",
- url, u->file));
- goto out;
- }
- }
- }
+ /* 6. Check for acceptance/rejection rules. We ignore these rules
+ for directories (no file name to match) and for HTML documents,
+ which might lead to other files that do need to be downloaded.
+ That is, unless we've exhausted the recursion depth anyway. */
+ if (u->file[0] != '\0'
+ && !(has_html_suffix_p (u->file)
+ && depth != INFINITE_RECURSION
+ && depth < opt.reclevel - 1))
+ {
+ if (!acceptable (u->file))
+ {
+ DEBUGP (("%s (%s) does not match acc/rej rules.\n",
+ url, u->file));
+ goto out;
+ }
+ }
/* 7. */
if (u->scheme == parent->scheme)
}
/* 8. */
- if (opt.use_robots && u->scheme == SCHEME_HTTP)
+ if (opt.use_robots && (u->scheme == SCHEME_HTTP
+#ifdef HAVE_SSL
+ || u->scheme == SCHEME_HTTPS
+#endif
+ )
+ )
{
struct robot_specs *specs = res_get_specs (u->host, u->port);
if (!specs)
dl_url_file_map = make_string_hash_table (0); \
} while (0)
+/* Return 1 if S1 and S2 are the same, except for "/index.html". The
+ three cases in which it returns one are (substitute any substring
+ for "foo"):
+
+ m("foo/index.html", "foo/") ==> 1
+ m("foo/", "foo/index.html") ==> 1
+ m("foo", "foo/index.html") ==> 1
+ m("foo", "foo/" ==> 1
+ m("foo", "foo") ==> 1 */
+
+static int
+match_except_index (const char *s1, const char *s2)
+{
+ int i;
+ const char *lng;
+
+ /* Skip common substring. */
+ for (i = 0; *s1 && *s2 && *s1 == *s2; s1++, s2++, i++)
+ ;
+ if (i == 0)
+ /* Strings differ at the very beginning -- bail out. We need to
+ check this explicitly to avoid `lng - 1' reading outside the
+ array. */
+ return 0;
+
+ if (!*s1 && !*s2)
+ /* Both strings hit EOF -- strings are equal. */
+ return 1;
+ else if (*s1 && *s2)
+ /* Strings are randomly different, e.g. "/foo/bar" and "/foo/qux". */
+ return 0;
+ else if (*s1)
+ /* S1 is the longer one. */
+ lng = s1;
+ else
+ /* S2 is the longer one. */
+ lng = s2;
+
+ /* foo */ /* foo/ */
+ /* foo/index.html */ /* or */ /* foo/index.html */
+ /* ^ */ /* ^ */
+
+ if (*lng != '/')
+ /* The right-hand case. */
+ --lng;
+
+ if (*lng == '/' && *(lng + 1) == '\0')
+ /* foo */
+ /* foo/ */
+ return 1;
+
+ return 0 == strcmp (lng, "/index.html");
+}
+
static int
dissociate_urls_from_file_mapper (void *key, void *value, void *arg)
{
ENSURE_TABLES_EXIST;
- /* With some forms of retrieval, it is possible, although not
- likely, for different URLs to resolve to the same file name. For
- example, "http://www.server.com/" and
- "http://www.server.com/index.html" will both resolve to the same
- file, "index.html". If both are downloaded, the second download
- will override the first one.
-
- If that happens, dissociate the old file name from the URL. */
+ /* With some forms of retrieval, it is possible, although not likely
+ or particularly desirable. If both are downloaded, the second
+ download will override the first one. When that happens,
+ dissociate the old file name from the URL. */
if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
{
Nothing to do. */
return;
+ if (match_except_index (url, old_url)
+ && !hash_table_contains (dl_url_file_map, url))
+ /* The two URLs differ only in the "index.html" ending. For
+ example, one is "http://www.server.com/", and the other is
+ "http://www.server.com/index.html". Don't remove the old
+ one, just add the new one as a non-canonical entry. */
+ goto url_only;
+
hash_table_remove (dl_file_url_map, file);
xfree (old_file);
xfree (old_url);
dissociate_urls_from_file (file);
}
+ hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
+
+ url_only:
/* A URL->FILE mapping is not possible without a FILE->URL mapping.
If the latter were present, it should have been removed by the
- above `if'. */
- assert (!hash_table_contains (dl_url_file_map, url));
+ above `if'. So we could write:
+
+ assert (!hash_table_contains (dl_url_file_map, url));
+
+ The above is correct when running in recursive mode where the
+ same URL always resolves to the same file. But if you do
+ something like:
+
+ wget URL URL
+
+ then the first URL will resolve to "FILE", and the other to
+ "FILE.1". In that case, FILE.1 will not be found in
+ dl_file_url_map, but URL will still point to FILE in
+ dl_url_file_map. */
+ if (hash_table_get_pair (dl_url_file_map, url, &old_url, &old_file))
+ {
+ hash_table_remove (dl_url_file_map, url);
+ xfree (old_url);
+ xfree (old_file);
+ }
- hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
}
void
register_html (const char *url, const char *file)
{
- if (!opt.convert_links)
+ if (!downloaded_html_set)
+ downloaded_html_set = make_string_hash_table (0);
+ else if (hash_table_contains (downloaded_html_set, file))
return;
- downloaded_html_files = slist_prepend (downloaded_html_files, file);
+
+ /* The set and the list should use the same copy of FILE, but the
+ slist interface insists on strduping the string it gets. Oh
+ well. */
+ string_set_add (downloaded_html_set, file);
+ downloaded_html_list = slist_prepend (downloaded_html_list, file);
}
/* This function is called when the retrieval is done to convert the
convert_all_links (void)
{
slist *html;
- struct wget_timer *timer;
long msecs;
int file_count = 0;
- timer = wtimer_new ();
+ struct wget_timer *timer = wtimer_new ();
/* Destructively reverse downloaded_html_files to get it in the right order.
recursive_retrieve() used slist_prepend() consistently. */
- downloaded_html_files = slist_nreverse (downloaded_html_files);
+ downloaded_html_list = slist_nreverse (downloaded_html_list);
- for (html = downloaded_html_files; html; html = html->next)
+ for (html = downloaded_html_list; html; html = html->next)
{
struct urlpos *urls, *cur_url;
char *url;
+ char *file = html->string;
/* Determine the URL of the HTML file. get_urls_html will need
it. */
- url = hash_table_get (dl_file_url_map, html->string);
+ url = hash_table_get (dl_file_url_map, file);
if (!url)
{
- DEBUGP (("Apparently %s has been removed.\n", html->string));
+ DEBUGP (("Apparently %s has been removed.\n", file));
continue;
}
- DEBUGP (("Rescanning %s (from %s)\n", html->string, url));
+ DEBUGP (("Scanning %s (from %s)\n", file, url));
/* Parse the HTML file... */
- urls = get_urls_html (html->string, url, NULL);
+ urls = get_urls_html (file, url, NULL);
/* We don't respect meta_disallow_follow here because, even if
the file is not followed, we might still want to convert the
a URL was downloaded. Downloaded URLs will be converted
ABS2REL, whereas non-downloaded will be converted REL2ABS. */
local_name = hash_table_get (dl_url_file_map, u->url);
- if (local_name)
- DEBUGP (("%s marked for conversion, local %s\n",
- u->url, local_name));
/* Decide on the conversion type. */
if (local_name)
`--cut-dirs', etc.) */
cur_url->convert = CO_CONVERT_TO_RELATIVE;
cur_url->local_name = xstrdup (local_name);
+ DEBUGP (("will convert url %s to local %s\n", u->url, local_name));
}
else
{
if (!cur_url->link_complete_p)
cur_url->convert = CO_CONVERT_TO_COMPLETE;
cur_url->local_name = NULL;
+ DEBUGP (("will convert url %s to complete\n", u->url));
}
}
/* Convert the links in the file. */
- convert_links (html->string, urls);
+ convert_links (file, urls);
++file_count;
/* Free the data. */
hash_table_destroy (dl_url_file_map);
dl_url_file_map = NULL;
}
- slist_free (downloaded_html_files);
- downloaded_html_files = NULL;
+ if (downloaded_html_set)
+ string_set_free (downloaded_html_set);
+ slist_free (downloaded_html_list);
+ downloaded_html_list = NULL;
}