static struct hash_table *dl_file_url_map;
static struct hash_table *dl_url_file_map;
-/* List of HTML files downloaded in this Wget run. Used for link
- conversion after Wget is done. This list should only be traversed
- in order. If you need to check whether a file has been downloaded,
- use a hash table, e.g. dl_file_url_map. */
-static slist *downloaded_html_files;
+/* List of HTML files downloaded in this Wget run, used for link
+ conversion after Wget is done. The list and the set contain the
+ same information, except the list maintains the order. Perhaps I
+ should get rid of the list, it's there for historical reasons. */
+static slist *downloaded_html_list;
+static struct hash_table *downloaded_html_set;
static void register_delete_file PARAMS ((const char *));
\f
uerr_t status = RETROK;
/* The queue of URLs we need to load. */
- struct url_queue *queue = url_queue_new ();
+ struct url_queue *queue;
/* The URLs we do not wish to enqueue, because they are already in
the queue, but haven't been downloaded yet. */
- struct hash_table *blacklist = make_string_hash_table (0);
+ struct hash_table *blacklist;
- /* We'll need various components of this, so better get it over with
- now. */
- struct url *start_url_parsed = url_parse (start_url, NULL);
+ int up_error_code;
+ struct url *start_url_parsed = url_parse (start_url, &up_error_code);
- url_enqueue (queue, xstrdup (start_url), NULL, 0);
- string_set_add (blacklist, start_url);
+ if (!start_url_parsed)
+ {
+ logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
+ url_error (up_error_code));
+ return URLERROR;
+ }
+
+ queue = url_queue_new ();
+ blacklist = make_string_hash_table (0);
+
+ /* Enqueue the starting URL. Use start_url_parsed->url rather than
+ just URL so we enqueue the canonical form of the URL. */
+ url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0);
+ string_set_add (blacklist, start_url_parsed->url);
while (1)
{
the second time. */
if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
{
+ file = xstrdup (hash_table_get (dl_url_file_map, url));
+
DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
- url, (char *)hash_table_get (dl_url_file_map, url)));
+ url, file));
+
+ if (string_set_contains (downloaded_html_set, file))
+ descend = 1;
}
else
{
int oldrec = opt.recursive;
opt.recursive = 0;
- status = retrieve_url (url, &file, &redirected, NULL, &dt);
+ status = retrieve_url (url, &file, &redirected, referer, &dt);
opt.recursive = oldrec;
if (file && status == RETROK
/* 1. Schemes other than HTTP are normally not recursed into. */
if (u->scheme != SCHEME_HTTP
+#ifdef HAVE_SSL
+ && u->scheme != SCHEME_HTTPS
+#endif
&& !(u->scheme == SCHEME_FTP && opt.follow_ftp))
{
DEBUGP (("Not following non-HTTP schemes.\n"));
/* 2. If it is an absolute link and they are not followed, throw it
out. */
- if (u->scheme == SCHEME_HTTP)
+ if (u->scheme == SCHEME_HTTP
+#ifdef HAVE_SSL
+ || u->scheme == SCHEME_HTTPS
+#endif
+ )
if (opt.relative_only && !upos->link_relative_p)
{
DEBUGP (("It doesn't really look like a relative link.\n"));
/* 6. */
{
- char *suf;
/* Check for acceptance/rejection rules. We ignore these rules
for HTML documents because they might lead to other files which
need to be downloaded. Of course, we don't know which
* u->file is not "" (i.e. it is not a directory)
and either:
+ there is no file suffix,
- + or there is a suffix, but is not "html" or "htm",
+ + or there is a suffix, but is not "html" or "htm" or similar,
+ both:
- recursion is not infinite,
- and we are at its very end. */
if (u->file[0] != '\0'
- && ((suf = suffix (url)) == NULL
- || (0 != strcmp (suf, "html") && 0 != strcmp (suf, "htm"))
+ && (!has_html_suffix_p (url)
|| (opt.reclevel != INFINITE_RECURSION && depth >= opt.reclevel)))
{
if (!acceptable (u->file))
}
/* 8. */
- if (opt.use_robots && u->scheme == SCHEME_HTTP)
+ if (opt.use_robots && (u->scheme == SCHEME_HTTP
+#ifdef HAVE_SSL
+ || u->scheme == SCHEME_HTTPS
+#endif
+ )
+ )
{
struct robot_specs *specs = res_get_specs (u->host, u->port);
if (!specs)
void
register_html (const char *url, const char *file)
{
- if (!opt.convert_links)
+ if (!downloaded_html_set)
+ downloaded_html_set = make_string_hash_table (0);
+ else if (hash_table_contains (downloaded_html_set, file))
return;
- downloaded_html_files = slist_prepend (downloaded_html_files, file);
+
+ /* The set and the list should use the same copy of FILE, but the
+ slist interface insists on strduping the string it gets. Oh
+ well. */
+ string_set_add (downloaded_html_set, file);
+ downloaded_html_list = slist_prepend (downloaded_html_list, file);
}
/* This function is called when the retrieval is done to convert the
int file_count = 0;
struct wget_timer *timer = wtimer_new ();
- struct hash_table *seen = make_string_hash_table (0);
/* Destructively reverse downloaded_html_files to get it in the right order.
recursive_retrieve() used slist_prepend() consistently. */
- downloaded_html_files = slist_nreverse (downloaded_html_files);
+ downloaded_html_list = slist_nreverse (downloaded_html_list);
- for (html = downloaded_html_files; html; html = html->next)
+ for (html = downloaded_html_list; html; html = html->next)
{
struct urlpos *urls, *cur_url;
char *url;
char *file = html->string;
- /* Guard against duplicates. */
- if (string_set_contains (seen, file))
- continue;
- string_set_add (seen, file);
-
/* Determine the URL of the HTML file. get_urls_html will need
it. */
url = hash_table_get (dl_file_url_map, file);
wtimer_delete (timer);
logprintf (LOG_VERBOSE, _("Converted %d files in %.2f seconds.\n"),
file_count, (double)msecs / 1000);
-
- string_set_free (seen);
}
/* Cleanup the data structures associated with recursive retrieving
hash_table_destroy (dl_url_file_map);
dl_url_file_map = NULL;
}
- slist_free (downloaded_html_files);
- downloaded_html_files = NULL;
+ if (downloaded_html_set)
+ string_set_free (downloaded_html_set);
+ slist_free (downloaded_html_list);
+ downloaded_html_list = NULL;
}