/* Handling of recursive HTTP retrieving.
- Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
+ 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This file is part of GNU Wget.
GNU Wget is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
+the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
GNU Wget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
-along with Wget; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+along with Wget. If not, see <http://www.gnu.org/licenses/>.
-#include <config.h>
+Additional permission under GNU GPL version 3 section 7
+
+If you modify this program, or any covered work, by linking or
+combining it with the OpenSSL project's OpenSSL library (or a
+modified version of that library), containing parts covered by the
+terms of the OpenSSL or SSLeay licenses, the Free Software Foundation
+grants you additional permission to convey the resulting work.
+Corresponding Source for a non-source form of such a combination
+shall include the source code for the parts of OpenSSL used as well
+as that of the covered work. */
+
+#include "wget.h"
#include <stdio.h>
#include <stdlib.h>
-#ifdef HAVE_STRING_H
-# include <string.h>
-#else
-# include <strings.h>
-#endif /* HAVE_STRING_H */
+#include <string.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif /* HAVE_UNISTD_H */
#include <errno.h>
#include <assert.h>
-#include <sys/types.h>
-#include "wget.h"
#include "url.h"
#include "recur.h"
#include "utils.h"
#include "retr.h"
#include "ftp.h"
-#include "fnmatch.h"
#include "host.h"
#include "hash.h"
#include "res.h"
-
-#ifndef errno
-extern int errno;
-#endif
-
-extern char *version_string;
-
-static struct hash_table *dl_file_url_map;
-static struct hash_table *dl_url_file_map;
-
-/* List of HTML files downloaded in this Wget run. Used for link
- conversion after Wget is done. This list should only be traversed
- in order. If you need to check whether a file has been downloaded,
- use a hash table, e.g. dl_file_url_map. */
-static slist *downloaded_html_files;
+#include "convert.h"
+#include "html-url.h"
+#include "css-url.h"
+#include "spider.h"
+#include "iri.h"
\f
/* Functions for maintaining the URL queue. */
struct queue_element {
- const char *url;
- const char *referer;
- int depth;
- struct queue_element *next;
+ const char *url; /* the URL to download */
+ const char *referer; /* the referring document */
+ int depth; /* the depth */
+ bool html_allowed; /* whether the document is allowed to
+ be treated as HTML. */
+ char *remote_encoding;
+ bool css_allowed; /* whether the document is allowed to
+ be treated as CSS. */
+ struct queue_element *next; /* next element in queue */
};
struct url_queue {
static struct url_queue *
url_queue_new (void)
{
- struct url_queue *queue = xmalloc (sizeof (*queue));
- memset (queue, '\0', sizeof (*queue));
+ struct url_queue *queue = xnew0 (struct url_queue);
return queue;
}
static void
url_enqueue (struct url_queue *queue,
- const char *url, const char *referer, int depth)
+ const char *url, const char *referer, int depth,
+ bool html_allowed, bool css_allowed)
{
- struct queue_element *qel = xmalloc (sizeof (*qel));
+ struct queue_element *qel = xnew (struct queue_element);
+ char *charset = get_current_charset ();
qel->url = url;
qel->referer = referer;
qel->depth = depth;
+ qel->html_allowed = html_allowed;
+ qel->css_allowed = css_allowed;
qel->next = NULL;
+ if (charset)
+ qel->remote_encoding = xstrdup (charset);
+ else
+ qel->remote_encoding = NULL;
+
++queue->count;
if (queue->count > queue->maxcount)
queue->maxcount = queue->count;
DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
+ /*printf ("[Enqueuing %s with %s\n", url, qel->remote_encoding);*/
+
if (queue->tail)
queue->tail->next = qel;
queue->tail = qel;
queue->head = queue->tail;
}
-/* Take a URL out of the queue. Return 1 if this operation succeeded,
- or 0 if the queue is empty. */
+/* Take a URL out of the queue. Return true if this operation
+ succeeded, or false if the queue is empty. */
-static int
+static bool
url_dequeue (struct url_queue *queue,
- const char **url, const char **referer, int *depth)
+ const char **url, const char **referer, int *depth,
+ bool *html_allowed, bool *css_allowed)
{
struct queue_element *qel = queue->head;
if (!qel)
- return 0;
+ return false;
queue->head = queue->head->next;
if (!queue->head)
queue->tail = NULL;
+ set_remote_charset (qel->remote_encoding);
+ if (qel->remote_encoding)
+ xfree (qel->remote_encoding);
+
*url = qel->url;
*referer = qel->referer;
*depth = qel->depth;
+ *html_allowed = qel->html_allowed;
+ *css_allowed = qel->css_allowed;
--queue->count;
DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
xfree (qel);
- return 1;
+ return true;
}
\f
-static int descend_url_p PARAMS ((const struct urlpos *, struct url *, int,
- struct url *, struct hash_table *));
-static int descend_redirect_p PARAMS ((const char *, const char *, int,
- struct url *, struct hash_table *));
+static bool download_child_p (const struct urlpos *, struct url *, int,
+ struct url *, struct hash_table *);
+static bool descend_redirect_p (const char *, const char *, int,
+ struct url *, struct hash_table *);
/* Retrieve a part of the web beginning with START_URL. This used to
7. if the URL is not one of those downloaded before, and if it
satisfies the criteria specified by the various command-line
- options, add it to the queue. */
+ options, add it to the queue. */
uerr_t
retrieve_tree (const char *start_url)
uerr_t status = RETROK;
/* The queue of URLs we need to load. */
- struct url_queue *queue = url_queue_new ();
+ struct url_queue *queue;
/* The URLs we do not wish to enqueue, because they are already in
the queue, but haven't been downloaded yet. */
- struct hash_table *blacklist = make_string_hash_table (0);
+ struct hash_table *blacklist;
+
+ int up_error_code;
+ struct url *start_url_parsed;
- /* We'll need various components of this, so better get it over with
- now. */
- struct url *start_url_parsed = url_parse (start_url, NULL);
+ set_ugly_no_encode (true);
+ start_url_parsed= url_parse (start_url, &up_error_code);
+ set_ugly_no_encode (false);
+ if (!start_url_parsed)
+ {
+ logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
+ url_error (up_error_code));
+ return URLERROR;
+ }
- url_enqueue (queue, xstrdup (start_url), NULL, 0);
- string_set_add (blacklist, start_url);
+ queue = url_queue_new ();
+ blacklist = make_string_hash_table (0);
+
+ /* Enqueue the starting URL. Use start_url_parsed->url rather than
+ just URL so we enqueue the canonical form of the URL. */
+ url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, true, false);
+ string_set_add (blacklist, start_url_parsed->url);
while (1)
{
- int descend = 0;
+ bool descend = false;
char *url, *referer, *file = NULL;
int depth;
- boolean dash_p_leaf_HTML = FALSE;
-
- if (downloaded_exceeds_quota ())
- break;
+ bool html_allowed, css_allowed;
+ bool is_css = false;
+ bool dash_p_leaf_HTML = false;
+ if (opt.quota && total_downloaded_bytes > opt.quota)
+ break;
if (status == FWRITEERR)
- break;
+ break;
- /* Get the next URL from the queue. */
+ /* Get the next URL from the queue... */
if (!url_dequeue (queue,
- (const char **)&url, (const char **)&referer,
- &depth))
- break;
-
- /* And download it. */
-
- {
- int dt = 0;
- char *redirected = NULL;
- int oldrec = opt.recursive;
-
- opt.recursive = 0;
- status = retrieve_url (url, &file, &redirected, NULL, &dt);
- opt.recursive = oldrec;
-
- if (file && status == RETROK
- && (dt & RETROKF) && (dt & TEXTHTML))
- descend = 1;
-
- if (redirected)
- {
- /* We have been redirected, possibly to another host, or
- different path, or wherever. Check whether we really
- want to follow it. */
- if (descend)
- {
- if (!descend_redirect_p (redirected, url, depth,
- start_url_parsed, blacklist))
- descend = 0;
- }
-
- xfree (url);
- url = redirected;
- }
- }
+ (const char **)&url, (const char **)&referer,
+ &depth, &html_allowed, &css_allowed))
+ break;
+
+ /* ...and download it. Note that this download is in most cases
+ unconditional, as download_child_p already makes sure a file
+ doesn't get enqueued twice -- and yet this check is here, and
+ not in download_child_p. This is so that if you run `wget -r
+ URL1 URL2', and a random URL is encountered once under URL1
+ and again under URL2, but at a different (possibly smaller)
+ depth, we want the URL's children to be taken into account
+ the second time. */
+ if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
+ {
+ file = xstrdup (hash_table_get (dl_url_file_map, url));
+
+ DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
+ url, file));
+
+ /* this sucks, needs to be combined! */
+ if (html_allowed
+ && downloaded_html_set
+ && string_set_contains (downloaded_html_set, file))
+ {
+ descend = true;
+ is_css = false;
+ }
+ if (css_allowed
+ && downloaded_css_set
+ && string_set_contains (downloaded_css_set, file))
+ {
+ descend = true;
+ is_css = true;
+ }
+ }
+ else
+ {
+ int dt = 0;
+ char *redirected = NULL;
+
+ status = retrieve_url (url, &file, &redirected, referer, &dt, false);
+
+ if (html_allowed && file && status == RETROK
+ && (dt & RETROKF) && (dt & TEXTHTML))
+ {
+ descend = true;
+ is_css = false;
+ }
+
+ /* a little different, css_allowed can override content type
+ lots of web servers serve css with an incorrect content type
+ */
+ if (file && status == RETROK
+ && (dt & RETROKF) &&
+ ((dt & TEXTCSS) || css_allowed))
+ {
+ descend = true;
+ is_css = true;
+ }
+
+ if (redirected)
+ {
+ /* We have been redirected, possibly to another host, or
+ different path, or wherever. Check whether we really
+ want to follow it. */
+ if (descend)
+ {
+ if (!descend_redirect_p (redirected, url, depth,
+ start_url_parsed, blacklist))
+ descend = false;
+ else
+ /* Make sure that the old pre-redirect form gets
+ blacklisted. */
+ string_set_add (blacklist, url);
+ }
+
+ xfree (url);
+ url = redirected;
+ }
+ }
+
+ if (opt.spider)
+ {
+ visited_url (url, referer);
+ }
if (descend
- && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
- {
- if (opt.page_requisites && depth == opt.reclevel)
- /* When -p is specified, we can do one more partial
- recursion from the "leaf nodes" on the HTML document
- tree. The recursion is partial in that we won't
- traverse any <A> or <AREA> tags, nor any <LINK> tags
- except for <LINK REL="stylesheet">. */
- dash_p_leaf_HTML = TRUE;
- else
- {
- /* Either -p wasn't specified or it was and we've
- already gone the one extra (pseudo-)level that it
- affords us, so we need to bail out. */
- DEBUGP (("Not descending further; at depth %d, max. %d.\n",
- depth, opt.reclevel));
- descend = 0;
- }
- }
-
- /* If the downloaded document was HTML, parse it and enqueue the
- links it contains. */
+ && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
+ {
+ if (opt.page_requisites
+ && (depth == opt.reclevel || depth == opt.reclevel + 1))
+ {
+ /* When -p is specified, we are allowed to exceed the
+ maximum depth, but only for the "inline" links,
+ i.e. those that are needed to display the page.
+ Originally this could exceed the depth at most by
+ one, but we allow one more level so that the leaf
+ pages that contain frames can be loaded
+ correctly. */
+ dash_p_leaf_HTML = true;
+ }
+ else
+ {
+ /* Either -p wasn't specified or it was and we've
+ already spent the two extra (pseudo-)levels that it
+ affords us, so we need to bail out. */
+ DEBUGP (("Not descending further; at depth %d, max. %d.\n",
+ depth, opt.reclevel));
+ descend = false;
+ }
+ }
+
+ /* If the downloaded document was HTML or CSS, parse it and enqueue the
+ links it contains. */
if (descend)
- {
- int meta_disallow_follow = 0;
- struct urlpos *children = get_urls_html (file, url, dash_p_leaf_HTML,
- &meta_disallow_follow);
-
- if (opt.use_robots && meta_disallow_follow)
- {
- free_urlpos (children);
- children = NULL;
- }
-
- if (children)
- {
- struct urlpos *child = children;
- struct url *url_parsed = url_parsed = url_parse (url, NULL);
- assert (url_parsed != NULL);
-
- for (; child; child = child->next)
- {
- if (child->ignore_when_downloading)
- continue;
- if (descend_url_p (child, url_parsed, depth, start_url_parsed,
- blacklist))
- {
- url_enqueue (queue, xstrdup (child->url->url),
- xstrdup (url), depth + 1);
- /* We blacklist the URL we have enqueued, because we
- don't want to enqueue (and hence download) the
- same URL twice. */
- string_set_add (blacklist, child->url->url);
- }
- }
-
- url_free (url_parsed);
- free_urlpos (children);
- }
- }
-
- if (opt.delete_after || (file && !acceptable (file)))
- {
- /* Either --delete-after was specified, or we loaded this
- otherwise rejected (e.g. by -R) HTML file just so we
- could harvest its hyperlinks -- in either case, delete
- the local file. */
- DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
- opt.delete_after ? "--delete-after" :
- "recursive rejection criteria"));
- logprintf (LOG_VERBOSE,
- (opt.delete_after
- ? _("Removing %s.\n")
- : _("Removing %s since it should be rejected.\n")),
- file);
- if (unlink (file))
- logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
- }
+ {
+ bool meta_disallow_follow = false;
+ struct urlpos *children
+ = is_css ? get_urls_css_file (file, url) :
+ get_urls_html (file, url, &meta_disallow_follow);
+
+ if (opt.use_robots && meta_disallow_follow)
+ {
+ free_urlpos (children);
+ children = NULL;
+ }
+
+ if (children)
+ {
+ struct urlpos *child = children;
+ set_ugly_no_encode (true);
+ struct url *url_parsed = url_parse (url, NULL);
+ set_ugly_no_encode (false);
+ char *referer_url = url;
+ bool strip_auth = (url_parsed != NULL
+ && url_parsed->user != NULL);
+ assert (url_parsed != NULL);
+
+ /* Strip auth info if present */
+ if (strip_auth)
+ referer_url = url_string (url_parsed, URL_AUTH_HIDE);
+
+ for (; child; child = child->next)
+ {
+ if (child->ignore_when_downloading)
+ continue;
+ if (dash_p_leaf_HTML && !child->link_inline_p)
+ continue;
+ if (download_child_p (child, url_parsed, depth, start_url_parsed,
+ blacklist))
+ {
+ url_enqueue (queue, xstrdup (child->url->url),
+ xstrdup (referer_url), depth + 1,
+ child->link_expect_html,
+ child->link_expect_css);
+ /* We blacklist the URL we have enqueued, because we
+ don't want to enqueue (and hence download) the
+ same URL twice. */
+ string_set_add (blacklist, child->url->url);
+ }
+ }
+
+ if (strip_auth)
+ xfree (referer_url);
+ url_free (url_parsed);
+ free_urlpos (children);
+ }
+ }
+
+ if (file
+ && (opt.delete_after
+ || opt.spider /* opt.recursive is implicitely true */
+ || !acceptable (file)))
+ {
+ /* Either --delete-after was specified, or we loaded this
+ (otherwise unneeded because of --spider or rejected by -R)
+ HTML file just to harvest its hyperlinks -- in either case,
+ delete the local file. */
+ DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
+ opt.delete_after ? "--delete-after" :
+ (opt.spider ? "--spider" :
+ "recursive rejection criteria")));
+ logprintf (LOG_VERBOSE,
+ (opt.delete_after || opt.spider
+ ? _("Removing %s.\n")
+ : _("Removing %s since it should be rejected.\n")),
+ file);
+ if (unlink (file))
+ logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
+ logputs (LOG_VERBOSE, "\n");
+ register_delete_file (file);
+ }
xfree (url);
- FREE_MAYBE (referer);
- FREE_MAYBE (file);
+ xfree_null (referer);
+ xfree_null (file);
}
/* If anything is left of the queue due to a premature exit, free it
{
char *d1, *d2;
int d3;
- while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3))
+ bool d4, d5;
+ while (url_dequeue (queue,
+ (const char **)&d1, (const char **)&d2, &d3, &d4, &d5))
{
- xfree (d1);
- FREE_MAYBE (d2);
+ xfree (d1);
+ xfree_null (d2);
}
}
url_queue_delete (queue);
url_free (start_url_parsed);
string_set_free (blacklist);
- if (downloaded_exceeds_quota ())
+ if (opt.quota && total_downloaded_bytes > opt.quota)
return QUOTEXC;
else if (status == FWRITEERR)
return FWRITEERR;
by storing these URLs to BLACKLIST. This may or may not help. It
will help if those URLs are encountered many times. */
-static int
-descend_url_p (const struct urlpos *upos, struct url *parent, int depth,
- struct url *start_url_parsed, struct hash_table *blacklist)
+static bool
+download_child_p (const struct urlpos *upos, struct url *parent, int depth,
+ struct url *start_url_parsed, struct hash_table *blacklist)
{
struct url *u = upos->url;
const char *url = u->url;
+ bool u_scheme_like_http;
DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
if (string_set_contains (blacklist, url))
{
+ if (opt.spider)
+ {
+ char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD);
+ DEBUGP (("download_child_p: parent->url is: %s\n", quote (parent->url)));
+ visited_url (url, referrer);
+ xfree (referrer);
+ }
DEBUGP (("Already on the black list.\n"));
goto out;
}
More time- and memory- consuming tests should be put later on
the list. */
+ /* Determine whether URL under consideration has a HTTP-like scheme. */
+ u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
+
/* 1. Schemes other than HTTP are normally not recursed into. */
- if (u->scheme != SCHEME_HTTP
- && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
+ if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
{
DEBUGP (("Not following non-HTTP schemes.\n"));
goto out;
/* 2. If it is an absolute link and they are not followed, throw it
out. */
- if (u->scheme == SCHEME_HTTP)
+ if (u_scheme_like_http)
if (opt.relative_only && !upos->link_relative_p)
{
- DEBUGP (("It doesn't really look like a relative link.\n"));
- goto out;
+ DEBUGP (("It doesn't really look like a relative link.\n"));
+ goto out;
}
/* 3. If its domain is not to be accepted/looked-up, chuck it
/* 4. Check for parent directory.
If we descended to a different host or changed the scheme, ignore
- opt.no_parent. Also ignore it for -p leaf retrievals. */
+ opt.no_parent. Also ignore it for documents needed to display
+ the parent page when in -p mode. */
if (opt.no_parent
- && u->scheme == parent->scheme
- && 0 == strcasecmp (u->host, parent->host)
- && u->port == parent->port)
+ && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
+ && 0 == strcasecmp (u->host, start_url_parsed->host)
+ && u->port == start_url_parsed->port
+ && !(opt.page_requisites && upos->link_inline_p))
{
- if (!frontcmp (parent->dir, u->dir))
- {
- DEBUGP (("Trying to escape the root directory with no_parent in effect.\n"));
- goto out;
- }
+ if (!subdir_p (start_url_parsed->dir, u->dir))
+ {
+ DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
+ u->dir, start_url_parsed->dir));
+ goto out;
+ }
}
/* 5. If the file does not match the acceptance list, or is on the
exclusion and inclusion lists. */
if (opt.includes || opt.excludes)
{
- if (!accdir (u->dir, ALLABS))
- {
- DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
- goto out;
- }
+ if (!accdir (u->dir))
+ {
+ DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
+ goto out;
+ }
}
- /* 6. */
- {
- char *suf;
- /* Check for acceptance/rejection rules. We ignore these rules
- for HTML documents because they might lead to other files which
- need to be downloaded. Of course, we don't know which
- documents are HTML before downloading them, so we guess.
-
- A file is subject to acceptance/rejection rules if:
-
- * u->file is not "" (i.e. it is not a directory)
- and either:
- + there is no file suffix,
- + or there is a suffix, but is not "html" or "htm",
- + both:
- - recursion is not infinite,
- - and we are at its very end. */
-
- if (u->file[0] != '\0'
- && ((suf = suffix (url)) == NULL
- || (0 != strcmp (suf, "html") && 0 != strcmp (suf, "htm"))
- || (opt.reclevel == INFINITE_RECURSION && depth >= opt.reclevel)))
- {
- if (!acceptable (u->file))
- {
- DEBUGP (("%s (%s) does not match acc/rej rules.\n",
- url, u->file));
- goto out;
- }
- }
- }
+ /* 6. Check for acceptance/rejection rules. We ignore these rules
+ for directories (no file name to match) and for non-leaf HTMLs,
+ which can lead to other files that do need to be downloaded. (-p
+ automatically implies non-leaf because with -p we can, if
+ necesary, overstep the maximum depth to get the page requisites.) */
+ if (u->file[0] != '\0'
+ && !(has_html_suffix_p (u->file)
+ /* The exception only applies to non-leaf HTMLs (but -p
+ always implies non-leaf because we can overstep the
+ maximum depth to get the requisites): */
+ && (/* non-leaf */
+ opt.reclevel == INFINITE_RECURSION
+ /* also non-leaf */
+ || depth < opt.reclevel - 1
+ /* -p, which implies non-leaf (see above) */
+ || opt.page_requisites)))
+ {
+ if (!acceptable (u->file))
+ {
+ DEBUGP (("%s (%s) does not match acc/rej rules.\n",
+ url, u->file));
+ goto out;
+ }
+ }
/* 7. */
- if (u->scheme == parent->scheme)
+ if (schemes_are_similar_p (u->scheme, parent->scheme))
if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
{
- DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
- u->host, parent->host));
- goto out;
+ DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
+ u->host, parent->host));
+ goto out;
}
/* 8. */
- if (opt.use_robots && u->scheme == SCHEME_HTTP)
+ if (opt.use_robots && u_scheme_like_http)
{
struct robot_specs *specs = res_get_specs (u->host, u->port);
if (!specs)
- {
- char *rfile;
- if (res_retrieve_file (url, &rfile))
- {
- specs = res_parse_from_file (rfile);
- xfree (rfile);
- }
- else
- {
- /* If we cannot get real specs, at least produce
- dummy ones so that we can register them and stop
- trying to retrieve them. */
- specs = res_parse ("", 0);
- }
- res_register_specs (u->host, u->port, specs);
- }
+ {
+ char *rfile;
+ if (res_retrieve_file (url, &rfile))
+ {
+ specs = res_parse_from_file (rfile);
+
+ /* Delete the robots.txt file if we chose to either delete the
+ files after downloading or we're just running a spider. */
+ if (opt.delete_after || opt.spider)
+ {
+ logprintf (LOG_VERBOSE, "Removing %s.\n", rfile);
+ if (unlink (rfile))
+ logprintf (LOG_NOTQUIET, "unlink: %s\n",
+ strerror (errno));
+ }
+
+ xfree (rfile);
+ }
+ else
+ {
+ /* If we cannot get real specs, at least produce
+ dummy ones so that we can register them and stop
+ trying to retrieve them. */
+ specs = res_parse ("", 0);
+ }
+ res_register_specs (u->host, u->port, specs);
+ }
/* Now that we have (or don't have) robots.txt specs, we can
- check what they say. */
+ check what they say. */
if (!res_match_path (specs, u->path))
- {
- DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
- string_set_add (blacklist, url);
- goto out;
- }
+ {
+ DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
+ string_set_add (blacklist, url);
+ goto out;
+ }
}
/* The URL has passed all the tests. It can be placed in the
download queue. */
DEBUGP (("Decided to load it.\n"));
- return 1;
+ return true;
out:
DEBUGP (("Decided NOT to load it.\n"));
- return 0;
+ return false;
}
-/* This function determines whether we should descend the children of
- the URL whose download resulted in a redirection, possibly to
- another host, etc. It is needed very rarely, and thus it is merely
- a simple-minded wrapper around descend_url_p. */
+/* This function determines whether we will consider downloading the
+ children of a URL whose download resulted in a redirection,
+ possibly to another host, etc. It is needed very rarely, and thus
+ it is merely a simple-minded wrapper around download_child_p. */
-static int
+static bool
descend_redirect_p (const char *redirected, const char *original, int depth,
- struct url *start_url_parsed, struct hash_table *blacklist)
+ struct url *start_url_parsed, struct hash_table *blacklist)
{
struct url *orig_parsed, *new_parsed;
struct urlpos *upos;
- int success;
+ bool success;
+ set_ugly_no_encode (true);
orig_parsed = url_parse (original, NULL);
assert (orig_parsed != NULL);
new_parsed = url_parse (redirected, NULL);
assert (new_parsed != NULL);
+ set_ugly_no_encode (false);
- upos = xmalloc (sizeof (struct urlpos));
- memset (upos, 0, sizeof (*upos));
+ upos = xnew0 (struct urlpos);
upos->url = new_parsed;
- success = descend_url_p (upos, orig_parsed, depth,
- start_url_parsed, blacklist);
+ success = download_child_p (upos, orig_parsed, depth,
+ start_url_parsed, blacklist);
url_free (orig_parsed);
url_free (new_parsed);
return success;
}
-\f
-/* Register that URL has been successfully downloaded to FILE. */
-
-void
-register_download (const char *url, const char *file)
-{
- if (!opt.convert_links)
- return;
- if (!dl_file_url_map)
- dl_file_url_map = make_string_hash_table (0);
- if (!dl_url_file_map)
- dl_url_file_map = make_string_hash_table (0);
-
- if (!hash_table_contains (dl_file_url_map, file))
- hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
- if (!hash_table_contains (dl_url_file_map, url))
- hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
-}
-
-/* Register that FROM has been redirected to TO. This assumes that TO
- is successfully downloaded and already registered using
- register_download() above. */
-
-void
-register_redirection (const char *from, const char *to)
-{
- char *file;
-
- if (!opt.convert_links)
- return;
-
- file = hash_table_get (dl_url_file_map, to);
- assert (file != NULL);
- if (!hash_table_contains (dl_url_file_map, from))
- hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file));
-}
-
-/* Register that URL corresponds to the HTML file FILE. */
-
-void
-register_html (const char *url, const char *file)
-{
- if (!opt.convert_links)
- return;
- downloaded_html_files = slist_prepend (downloaded_html_files, file);
-}
-
-/* This function is called when the retrieval is done to convert the
- links that have been downloaded. It has to be called at the end of
- the retrieval, because only then does Wget know conclusively which
- URLs have been downloaded, and which not, so it can tell which
- direction to convert to.
-
- The "direction" means that the URLs to the files that have been
- downloaded get converted to the relative URL which will point to
- that file. And the other URLs get converted to the remote URL on
- the server.
-
- All the downloaded HTMLs are kept in downloaded_html_files, and
- downloaded URLs in urls_downloaded. All the information is
- extracted from these two lists. */
-
-void
-convert_all_links (void)
-{
- slist *html;
- struct wget_timer *timer;
- long msecs;
- int file_count = 0;
-
- timer = wtimer_new ();
-
- /* Destructively reverse downloaded_html_files to get it in the right order.
- recursive_retrieve() used slist_prepend() consistently. */
- downloaded_html_files = slist_nreverse (downloaded_html_files);
-
- for (html = downloaded_html_files; html; html = html->next)
- {
- struct urlpos *urls, *cur_url;
- char *url;
-
- DEBUGP (("Rescanning %s\n", html->string));
-
- /* Determine the URL of the HTML file. get_urls_html will need
- it. */
- url = hash_table_get (dl_file_url_map, html->string);
- if (url)
- DEBUGP (("It should correspond to %s.\n", url));
- else
- DEBUGP (("I cannot find the corresponding URL.\n"));
-
- /* Parse the HTML file... */
- urls = get_urls_html (html->string, url, FALSE, NULL);
-
- /* We don't respect meta_disallow_follow here because, even if
- the file is not followed, we might still want to convert the
- links that have been followed from other files. */
-
- for (cur_url = urls; cur_url; cur_url = cur_url->next)
- {
- char *local_name;
- struct url *u = cur_url->url;
-
- if (cur_url->link_base_p)
- {
- /* Base references have been resolved by our parser, so
- we turn the base URL into an empty string. (Perhaps
- we should remove the tag entirely?) */
- cur_url->convert = CO_NULLIFY_BASE;
- continue;
- }
-
- /* We decide the direction of conversion according to whether
- a URL was downloaded. Downloaded URLs will be converted
- ABS2REL, whereas non-downloaded will be converted REL2ABS. */
- local_name = hash_table_get (dl_url_file_map, u->url);
- if (local_name)
- DEBUGP (("%s marked for conversion, local %s\n",
- u->url, local_name));
-
- /* Decide on the conversion type. */
- if (local_name)
- {
- /* We've downloaded this URL. Convert it to relative
- form. We do this even if the URL already is in
- relative form, because our directory structure may
- not be identical to that on the server (think `-nd',
- `--cut-dirs', etc.) */
- cur_url->convert = CO_CONVERT_TO_RELATIVE;
- cur_url->local_name = xstrdup (local_name);
- }
- else
- {
- /* We haven't downloaded this URL. If it's not already
- complete (including a full host name), convert it to
- that form, so it can be reached while browsing this
- HTML locally. */
- if (!cur_url->link_complete_p)
- cur_url->convert = CO_CONVERT_TO_COMPLETE;
- cur_url->local_name = NULL;
- }
- }
-
- /* Convert the links in the file. */
- convert_links (html->string, urls);
- ++file_count;
-
- /* Free the data. */
- free_urlpos (urls);
- }
-
- msecs = wtimer_elapsed (timer);
- wtimer_delete (timer);
- logprintf (LOG_VERBOSE, _("Converted %d files in %.2f seconds.\n"),
- file_count, (double)msecs / 1000);
-}
-
-/* Cleanup the data structures associated with recursive retrieving
- (the variables above). */
-void
-recursive_cleanup (void)
-{
- if (dl_file_url_map)
- {
- free_keys_and_values (dl_file_url_map);
- hash_table_destroy (dl_file_url_map);
- dl_file_url_map = NULL;
- }
- if (dl_url_file_map)
- {
- free_keys_and_values (dl_url_file_map);
- hash_table_destroy (dl_url_file_map);
- dl_url_file_map = NULL;
- }
- slist_free (downloaded_html_files);
- downloaded_html_files = NULL;
-}
+/* vim:set sts=2 sw=2 cino+={s: */