GNU Wget is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
+ (at your option) any later version.
GNU Wget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
You should have received a copy of the GNU General Public License
along with Wget; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+In addition, as a special exception, the Free Software Foundation
+gives permission to link the code of its release of Wget with the
+OpenSSL project's "OpenSSL" library (or with modified versions of it
+that use the same license as the "OpenSSL" library), and distribute
+the linked executables. You must obey the GNU General Public License
+in all respects for all of the code used other than "OpenSSL". If you
+modify this file, you may extend this exception to your version of the
+file, but you are not obligated to do so. If you do not wish to do
+so, delete this exception statement from your version. */
#include <config.h>
#include "utils.h"
#include "retr.h"
#include "ftp.h"
-#include "fnmatch.h"
#include "host.h"
#include "hash.h"
#include "res.h"
+#include "convert.h"
#ifndef errno
extern int errno;
#endif
extern char *version_string;
+extern LARGE_INT total_downloaded_bytes;
-static struct hash_table *dl_file_url_map;
-static struct hash_table *dl_url_file_map;
-
-/* List of HTML files downloaded in this Wget run. Used for link
- conversion after Wget is done. This list should only be traversed
- in order. If you need to check whether a file has been downloaded,
- use a hash table, e.g. dl_file_url_map. */
-static slist *downloaded_html_files;
-
-static void register_delete_file PARAMS ((const char *));
+extern struct hash_table *dl_url_file_map;
+extern struct hash_table *downloaded_html_set;
\f
/* Functions for maintaining the URL queue. */
struct queue_element {
- const char *url;
- const char *referer;
- int depth;
- struct queue_element *next;
+ const char *url; /* the URL to download */
+ const char *referer; /* the referring document */
+ int depth; /* the depth */
+ unsigned int html_allowed :1; /* whether the document is allowed to
+ be treated as HTML. */
+
+ struct queue_element *next; /* next element in queue */
};
struct url_queue {
static struct url_queue *
url_queue_new (void)
{
- struct url_queue *queue = xmalloc (sizeof (*queue));
- memset (queue, '\0', sizeof (*queue));
+ struct url_queue *queue = xnew0 (struct url_queue);
return queue;
}
static void
url_enqueue (struct url_queue *queue,
- const char *url, const char *referer, int depth)
+ const char *url, const char *referer, int depth, int html_allowed)
{
- struct queue_element *qel = xmalloc (sizeof (*qel));
+ struct queue_element *qel = xnew (struct queue_element);
qel->url = url;
qel->referer = referer;
qel->depth = depth;
+ qel->html_allowed = html_allowed;
qel->next = NULL;
++queue->count;
static int
url_dequeue (struct url_queue *queue,
- const char **url, const char **referer, int *depth)
+ const char **url, const char **referer, int *depth,
+ int *html_allowed)
{
struct queue_element *qel = queue->head;
*url = qel->url;
*referer = qel->referer;
*depth = qel->depth;
+ *html_allowed = qel->html_allowed;
--queue->count;
uerr_t status = RETROK;
/* The queue of URLs we need to load. */
- struct url_queue *queue = url_queue_new ();
+ struct url_queue *queue;
/* The URLs we do not wish to enqueue, because they are already in
the queue, but haven't been downloaded yet. */
- struct hash_table *blacklist = make_string_hash_table (0);
+ struct hash_table *blacklist;
+
+ int up_error_code;
+ struct url *start_url_parsed = url_parse (start_url, &up_error_code);
+
+ if (!start_url_parsed)
+ {
+ logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
+ url_error (up_error_code));
+ return URLERROR;
+ }
- /* We'll need various components of this, so better get it over with
- now. */
- struct url *start_url_parsed = url_parse (start_url, NULL);
+ queue = url_queue_new ();
+ blacklist = make_string_hash_table (0);
- url_enqueue (queue, xstrdup (start_url), NULL, 0);
- string_set_add (blacklist, start_url);
+ /* Enqueue the starting URL. Use start_url_parsed->url rather than
+ just URL so we enqueue the canonical form of the URL. */
+ url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, 1);
+ string_set_add (blacklist, start_url_parsed->url);
while (1)
{
int descend = 0;
char *url, *referer, *file = NULL;
- int depth;
+ int depth, html_allowed;
boolean dash_p_leaf_HTML = FALSE;
- if (downloaded_exceeds_quota ())
+ if (opt.quota && total_downloaded_bytes > opt.quota)
break;
if (status == FWRITEERR)
break;
if (!url_dequeue (queue,
(const char **)&url, (const char **)&referer,
- &depth))
+ &depth, &html_allowed))
break;
/* ...and download it. Note that this download is in most cases
the second time. */
if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
{
+ file = xstrdup (hash_table_get (dl_url_file_map, url));
+
DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
- url, (char *)hash_table_get (dl_url_file_map, url)));
+ url, file));
+
+ if (html_allowed
+ && downloaded_html_set
+ && string_set_contains (downloaded_html_set, file))
+ descend = 1;
}
else
{
int oldrec = opt.recursive;
opt.recursive = 0;
- status = retrieve_url (url, &file, &redirected, NULL, &dt);
+ status = retrieve_url (url, &file, &redirected, referer, &dt);
opt.recursive = oldrec;
- if (file && status == RETROK
+ if (html_allowed && file && status == RETROK
&& (dt & RETROKF) && (dt & TEXTHTML))
descend = 1;
blacklist))
{
url_enqueue (queue, xstrdup (child->url->url),
- xstrdup (url), depth + 1);
+ xstrdup (url), depth + 1,
+ child->link_expect_html);
/* We blacklist the URL we have enqueued, because we
don't want to enqueue (and hence download) the
same URL twice. */
now. */
{
char *d1, *d2;
- int d3;
- while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3))
+ int d3, d4;
+ while (url_dequeue (queue,
+ (const char **)&d1, (const char **)&d2, &d3, &d4))
{
xfree (d1);
FREE_MAYBE (d2);
url_free (start_url_parsed);
string_set_free (blacklist);
- if (downloaded_exceeds_quota ())
+ if (opt.quota && total_downloaded_bytes > opt.quota)
return QUOTEXC;
else if (status == FWRITEERR)
return FWRITEERR;
{
struct url *u = upos->url;
const char *url = u->url;
+ int u_scheme_like_http;
DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
More time- and memory- consuming tests should be put later on
the list. */
+ /* Determine whether URL under consideration has a HTTP-like scheme. */
+ u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
+
/* 1. Schemes other than HTTP are normally not recursed into. */
- if (u->scheme != SCHEME_HTTP
- && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
+ if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
{
DEBUGP (("Not following non-HTTP schemes.\n"));
goto out;
/* 2. If it is an absolute link and they are not followed, throw it
out. */
- if (u->scheme == SCHEME_HTTP)
+ if (u_scheme_like_http)
if (opt.relative_only && !upos->link_relative_p)
{
DEBUGP (("It doesn't really look like a relative link.\n"));
opt.no_parent. Also ignore it for documents needed to display
the parent page when in -p mode. */
if (opt.no_parent
- && u->scheme == start_url_parsed->scheme
+ && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
&& 0 == strcasecmp (u->host, start_url_parsed->host)
&& u->port == start_url_parsed->port
&& !(opt.page_requisites && upos->link_inline_p))
}
}
- /* 6. */
- {
- char *suf;
- /* Check for acceptance/rejection rules. We ignore these rules
- for HTML documents because they might lead to other files which
- need to be downloaded. Of course, we don't know which
- documents are HTML before downloading them, so we guess.
-
- A file is subject to acceptance/rejection rules if:
-
- * u->file is not "" (i.e. it is not a directory)
- and either:
- + there is no file suffix,
- + or there is a suffix, but is not "html" or "htm",
- + both:
- - recursion is not infinite,
- - and we are at its very end. */
-
- if (u->file[0] != '\0'
- && ((suf = suffix (url)) == NULL
- || (0 != strcmp (suf, "html") && 0 != strcmp (suf, "htm"))
- || (opt.reclevel != INFINITE_RECURSION && depth >= opt.reclevel)))
- {
- if (!acceptable (u->file))
- {
- DEBUGP (("%s (%s) does not match acc/rej rules.\n",
- url, u->file));
- goto out;
- }
- }
- }
+ /* 6. Check for acceptance/rejection rules. We ignore these rules
+ for directories (no file name to match) and for HTML documents,
+ which might lead to other files that do need to be downloaded.
+ That is, unless we've exhausted the recursion depth anyway. */
+ if (u->file[0] != '\0'
+ && !(has_html_suffix_p (u->file)
+ && depth != INFINITE_RECURSION
+ && depth < opt.reclevel - 1))
+ {
+ if (!acceptable (u->file))
+ {
+ DEBUGP (("%s (%s) does not match acc/rej rules.\n",
+ url, u->file));
+ goto out;
+ }
+ }
/* 7. */
- if (u->scheme == parent->scheme)
+ if (schemes_are_similar_p (u->scheme, parent->scheme))
if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
{
DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
}
/* 8. */
- if (opt.use_robots && u->scheme == SCHEME_HTTP)
+ if (opt.use_robots && u_scheme_like_http)
{
struct robot_specs *specs = res_get_specs (u->host, u->port);
if (!specs)
new_parsed = url_parse (redirected, NULL);
assert (new_parsed != NULL);
- upos = xmalloc (sizeof (struct urlpos));
- memset (upos, 0, sizeof (*upos));
+ upos = xnew0 (struct urlpos);
upos->url = new_parsed;
success = download_child_p (upos, orig_parsed, depth,
return success;
}
-
-\f
-#define ENSURE_TABLES_EXIST do { \
- if (!dl_file_url_map) \
- dl_file_url_map = make_string_hash_table (0); \
- if (!dl_url_file_map) \
- dl_url_file_map = make_string_hash_table (0); \
-} while (0)
-
-static int
-dissociate_urls_from_file_mapper (void *key, void *value, void *arg)
-{
- char *mapping_url = (char *)key;
- char *mapping_file = (char *)value;
- char *file = (char *)arg;
-
- if (0 == strcmp (mapping_file, file))
- {
- hash_table_remove (dl_url_file_map, mapping_url);
- xfree (mapping_url);
- xfree (mapping_file);
- }
-
- /* Continue mapping. */
- return 0;
-}
-
-/* Remove all associations from various URLs to FILE from dl_url_file_map. */
-
-static void
-dissociate_urls_from_file (const char *file)
-{
- hash_table_map (dl_url_file_map, dissociate_urls_from_file_mapper,
- (char *)file);
-}
-
-/* Register that URL has been successfully downloaded to FILE. This
- is used by the link conversion code to convert references to URLs
- to references to local files. It is also being used to check if a
- URL has already been downloaded. */
-
-void
-register_download (const char *url, const char *file)
-{
- char *old_file, *old_url;
-
- ENSURE_TABLES_EXIST;
-
- /* With some forms of retrieval, it is possible, although not
- likely, for different URLs to resolve to the same file name. For
- example, "http://www.server.com/" and
- "http://www.server.com/index.html" will both resolve to the same
- file, "index.html". If both are downloaded, the second download
- will override the first one.
-
- If that happens, dissociate the old file name from the URL. */
-
- if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
- {
- if (0 == strcmp (url, old_url))
- /* We have somehow managed to download the same URL twice.
- Nothing to do. */
- return;
-
- hash_table_remove (dl_file_url_map, file);
- xfree (old_file);
- xfree (old_url);
-
- /* Remove all the URLs that point to this file. Yes, there can
- be more than one such URL, because we store redirections as
- multiple entries in dl_url_file_map. For example, if URL1
- redirects to URL2 which gets downloaded to FILE, we map both
- URL1 and URL2 to FILE in dl_url_file_map. (dl_file_url_map
- only points to URL2.) When another URL gets loaded to FILE,
- we want both URL1 and URL2 dissociated from it.
-
- This is a relatively expensive operation because it performs
- a linear search of the whole hash table, but it should be
- called very rarely, only when two URLs resolve to the same
- file name, *and* the "<file>.1" extensions are turned off.
- In other words, almost never. */
- dissociate_urls_from_file (file);
- }
-
- /* A URL->FILE mapping is not possible without a FILE->URL mapping.
- If the latter were present, it should have been removed by the
- above `if'. */
- assert (!hash_table_contains (dl_url_file_map, url));
-
- hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
- hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
-}
-
-/* Register that FROM has been redirected to TO. This assumes that TO
- is successfully downloaded and already registered using
- register_download() above. */
-
-void
-register_redirection (const char *from, const char *to)
-{
- char *file;
-
- ENSURE_TABLES_EXIST;
-
- file = hash_table_get (dl_url_file_map, to);
- assert (file != NULL);
- if (!hash_table_contains (dl_url_file_map, from))
- hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file));
-}
-
-/* Register that the file has been deleted. */
-
-static void
-register_delete_file (const char *file)
-{
- char *old_url, *old_file;
-
- ENSURE_TABLES_EXIST;
-
- if (!hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
- return;
-
- hash_table_remove (dl_file_url_map, file);
- xfree (old_file);
- xfree (old_url);
- dissociate_urls_from_file (file);
-}
-
-/* Register that FILE is an HTML file that has been downloaded. */
-
-void
-register_html (const char *url, const char *file)
-{
- if (!opt.convert_links)
- return;
- downloaded_html_files = slist_prepend (downloaded_html_files, file);
-}
-
-/* This function is called when the retrieval is done to convert the
- links that have been downloaded. It has to be called at the end of
- the retrieval, because only then does Wget know conclusively which
- URLs have been downloaded, and which not, so it can tell which
- direction to convert to.
-
- The "direction" means that the URLs to the files that have been
- downloaded get converted to the relative URL which will point to
- that file. And the other URLs get converted to the remote URL on
- the server.
-
- All the downloaded HTMLs are kept in downloaded_html_files, and
- downloaded URLs in urls_downloaded. All the information is
- extracted from these two lists. */
-
-void
-convert_all_links (void)
-{
- slist *html;
- struct wget_timer *timer;
- long msecs;
- int file_count = 0;
-
- timer = wtimer_new ();
-
- /* Destructively reverse downloaded_html_files to get it in the right order.
- recursive_retrieve() used slist_prepend() consistently. */
- downloaded_html_files = slist_nreverse (downloaded_html_files);
-
- for (html = downloaded_html_files; html; html = html->next)
- {
- struct urlpos *urls, *cur_url;
- char *url;
-
- /* Determine the URL of the HTML file. get_urls_html will need
- it. */
- url = hash_table_get (dl_file_url_map, html->string);
- if (!url)
- {
- DEBUGP (("Apparently %s has been removed.\n", html->string));
- continue;
- }
-
- DEBUGP (("Rescanning %s (from %s)\n", html->string, url));
-
- /* Parse the HTML file... */
- urls = get_urls_html (html->string, url, NULL);
-
- /* We don't respect meta_disallow_follow here because, even if
- the file is not followed, we might still want to convert the
- links that have been followed from other files. */
-
- for (cur_url = urls; cur_url; cur_url = cur_url->next)
- {
- char *local_name;
- struct url *u = cur_url->url;
-
- if (cur_url->link_base_p)
- {
- /* Base references have been resolved by our parser, so
- we turn the base URL into an empty string. (Perhaps
- we should remove the tag entirely?) */
- cur_url->convert = CO_NULLIFY_BASE;
- continue;
- }
-
- /* We decide the direction of conversion according to whether
- a URL was downloaded. Downloaded URLs will be converted
- ABS2REL, whereas non-downloaded will be converted REL2ABS. */
- local_name = hash_table_get (dl_url_file_map, u->url);
- if (local_name)
- DEBUGP (("%s marked for conversion, local %s\n",
- u->url, local_name));
-
- /* Decide on the conversion type. */
- if (local_name)
- {
- /* We've downloaded this URL. Convert it to relative
- form. We do this even if the URL already is in
- relative form, because our directory structure may
- not be identical to that on the server (think `-nd',
- `--cut-dirs', etc.) */
- cur_url->convert = CO_CONVERT_TO_RELATIVE;
- cur_url->local_name = xstrdup (local_name);
- }
- else
- {
- /* We haven't downloaded this URL. If it's not already
- complete (including a full host name), convert it to
- that form, so it can be reached while browsing this
- HTML locally. */
- if (!cur_url->link_complete_p)
- cur_url->convert = CO_CONVERT_TO_COMPLETE;
- cur_url->local_name = NULL;
- }
- }
-
- /* Convert the links in the file. */
- convert_links (html->string, urls);
- ++file_count;
-
- /* Free the data. */
- free_urlpos (urls);
- }
-
- msecs = wtimer_elapsed (timer);
- wtimer_delete (timer);
- logprintf (LOG_VERBOSE, _("Converted %d files in %.2f seconds.\n"),
- file_count, (double)msecs / 1000);
-}
-
-/* Cleanup the data structures associated with recursive retrieving
- (the variables above). */
-void
-recursive_cleanup (void)
-{
- if (dl_file_url_map)
- {
- free_keys_and_values (dl_file_url_map);
- hash_table_destroy (dl_file_url_map);
- dl_file_url_map = NULL;
- }
- if (dl_url_file_map)
- {
- free_keys_and_values (dl_url_file_map);
- hash_table_destroy (dl_url_file_map);
- dl_url_file_map = NULL;
- }
- slist_free (downloaded_html_files);
- downloaded_html_files = NULL;
-}