X-Git-Url: http://sjero.net/git/?p=wget;a=blobdiff_plain;f=src%2Frecur.c;h=66ef2e0fd9b566d0c9c809562daa2d4ee32ddb70;hp=da27741517bc0bc6dc2a41ed67baeb9099ae78b4;hb=b014f8fae9291e7504c0cca2dd8b9a0035466c03;hpb=f8b4b8bd12b0f5c76b7eb6ed908e3cfe0cdc0843 diff --git a/src/recur.c b/src/recur.c index da277415..66ef2e0f 100644 --- a/src/recur.c +++ b/src/recur.c @@ -1,12 +1,13 @@ /* Handling of recursive HTTP retrieving. - Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc. + Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This file is part of GNU Wget. GNU Wget is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. +the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. GNU Wget is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -14,61 +15,55 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with Wget; if not, write to the Free Software -Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +along with Wget. If not, see . -#include +Additional permission under GNU GPL version 3 section 7 + +If you modify this program, or any covered work, by linking or +combining it with the OpenSSL project's OpenSSL library (or a +modified version of that library), containing parts covered by the +terms of the OpenSSL or SSLeay licenses, the Free Software Foundation +grants you additional permission to convey the resulting work. +Corresponding Source for a non-source form of such a combination +shall include the source code for the parts of OpenSSL used as well +as that of the covered work. */ + +#include "wget.h" #include #include -#ifdef HAVE_STRING_H -# include -#else -# include -#endif /* HAVE_STRING_H */ +#include #ifdef HAVE_UNISTD_H # include #endif /* HAVE_UNISTD_H */ #include #include -#include -#include "wget.h" #include "url.h" #include "recur.h" #include "utils.h" #include "retr.h" #include "ftp.h" -#include "fnmatch.h" #include "host.h" #include "hash.h" #include "res.h" - -#ifndef errno -extern int errno; -#endif - -extern char *version_string; - -static struct hash_table *dl_file_url_map; -static struct hash_table *dl_url_file_map; - -/* List of HTML files downloaded in this Wget run, used for link - conversion after Wget is done. The list and the set contain the - same information, except the list maintains the order. Perhaps I - should get rid of the list, it's there for historical reasons. */ -static slist *downloaded_html_list; -static struct hash_table *downloaded_html_set; - -static void register_delete_file PARAMS ((const char *)); +#include "convert.h" +#include "html-url.h" +#include "css-url.h" +#include "spider.h" /* Functions for maintaining the URL queue. */ struct queue_element { - const char *url; - const char *referer; - int depth; - struct queue_element *next; + const char *url; /* the URL to download */ + const char *referer; /* the referring document */ + int depth; /* the depth */ + bool html_allowed; /* whether the document is allowed to + be treated as HTML. */ + struct iri *iri; /* sXXXav */ + bool css_allowed; /* whether the document is allowed to + be treated as CSS. */ + struct queue_element *next; /* next element in queue */ }; struct url_queue { @@ -82,8 +77,7 @@ struct url_queue { static struct url_queue * url_queue_new (void) { - struct url_queue *queue = xmalloc (sizeof (*queue)); - memset (queue, '\0', sizeof (*queue)); + struct url_queue *queue = xnew0 (struct url_queue); return queue; } @@ -100,22 +94,31 @@ url_queue_delete (struct url_queue *queue) into it. */ static void -url_enqueue (struct url_queue *queue, - const char *url, const char *referer, int depth) +url_enqueue (struct url_queue *queue, struct iri *i, + const char *url, const char *referer, int depth, + bool html_allowed, bool css_allowed) { - struct queue_element *qel = xmalloc (sizeof (*qel)); + struct queue_element *qel = xnew (struct queue_element); + qel->iri = i; qel->url = url; qel->referer = referer; qel->depth = depth; + qel->html_allowed = html_allowed; + qel->css_allowed = css_allowed; qel->next = NULL; ++queue->count; if (queue->count > queue->maxcount) queue->maxcount = queue->count; - DEBUGP (("Enqueuing %s at depth %d\n", url, depth)); + DEBUGP (("Enqueuing %s at depth %d\n", + quotearg_n_style (0, escape_quoting_style, url), depth)); DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount)); + if (i) + DEBUGP (("[IRI Enqueuing %s with %s\n", quote_n (0, url), + i->uri_encoding ? quote_n (1, i->uri_encoding) : "None")); + if (queue->tail) queue->tail->next = qel; queue->tail = qel; @@ -124,39 +127,44 @@ url_enqueue (struct url_queue *queue, queue->head = queue->tail; } -/* Take a URL out of the queue. Return 1 if this operation succeeded, - or 0 if the queue is empty. */ +/* Take a URL out of the queue. Return true if this operation + succeeded, or false if the queue is empty. */ -static int -url_dequeue (struct url_queue *queue, - const char **url, const char **referer, int *depth) +static bool +url_dequeue (struct url_queue *queue, struct iri **i, + const char **url, const char **referer, int *depth, + bool *html_allowed, bool *css_allowed) { struct queue_element *qel = queue->head; if (!qel) - return 0; + return false; queue->head = queue->head->next; if (!queue->head) queue->tail = NULL; + *i = qel->iri; *url = qel->url; *referer = qel->referer; *depth = qel->depth; + *html_allowed = qel->html_allowed; + *css_allowed = qel->css_allowed; --queue->count; - DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth)); + DEBUGP (("Dequeuing %s at depth %d\n", + quotearg_n_style (0, escape_quoting_style, qel->url), qel->depth)); DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount)); xfree (qel); - return 1; + return true; } -static int download_child_p PARAMS ((const struct urlpos *, struct url *, int, - struct url *, struct hash_table *)); -static int descend_redirect_p PARAMS ((const char *, const char *, int, - struct url *, struct hash_table *)); +static bool download_child_p (const struct urlpos *, struct url *, int, + struct url *, struct hash_table *, struct iri *); +static bool descend_redirect_p (const char *, struct url *, int, + struct url *, struct hash_table *, struct iri *); /* Retrieve a part of the web beginning with START_URL. This used to @@ -178,10 +186,10 @@ static int descend_redirect_p PARAMS ((const char *, const char *, int, 7. if the URL is not one of those downloaded before, and if it satisfies the criteria specified by the various command-line - options, add it to the queue. */ + options, add it to the queue. */ uerr_t -retrieve_tree (const char *start_url) +retrieve_tree (struct url *start_url_parsed, struct iri *pi) { uerr_t status = RETROK; @@ -193,187 +201,253 @@ retrieve_tree (const char *start_url) struct hash_table *blacklist; int up_error_code; - struct url *start_url_parsed = url_parse (start_url, &up_error_code); + struct iri *i = iri_new (); - if (!start_url_parsed) +#define COPYSTR(x) (x) ? xstrdup(x) : NULL; + /* Duplicate pi struct if not NULL */ + if (pi) { - logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url, - url_error (up_error_code)); - return URLERROR; + i->uri_encoding = COPYSTR (pi->uri_encoding); + i->content_encoding = COPYSTR (pi->content_encoding); + i->utf8_encode = pi->utf8_encode; } + else + set_uri_encoding (i, opt.locale, true); +#undef COPYSTR queue = url_queue_new (); blacklist = make_string_hash_table (0); /* Enqueue the starting URL. Use start_url_parsed->url rather than just URL so we enqueue the canonical form of the URL. */ - url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0); + url_enqueue (queue, i, xstrdup (start_url_parsed->url), NULL, 0, true, + false); string_set_add (blacklist, start_url_parsed->url); while (1) { - int descend = 0; + bool descend = false; char *url, *referer, *file = NULL; int depth; - boolean dash_p_leaf_HTML = FALSE; + bool html_allowed, css_allowed; + bool is_css = false; + bool dash_p_leaf_HTML = false; - if (downloaded_exceeds_quota ()) - break; + if (opt.quota && total_downloaded_bytes > opt.quota) + break; if (status == FWRITEERR) - break; + break; /* Get the next URL from the queue... */ - if (!url_dequeue (queue, - (const char **)&url, (const char **)&referer, - &depth)) - break; + if (!url_dequeue (queue, (struct iri **) &i, + (const char **)&url, (const char **)&referer, + &depth, &html_allowed, &css_allowed)) + break; /* ...and download it. Note that this download is in most cases - unconditional, as download_child_p already makes sure a file - doesn't get enqueued twice -- and yet this check is here, and - not in download_child_p. This is so that if you run `wget -r - URL1 URL2', and a random URL is encountered once under URL1 - and again under URL2, but at a different (possibly smaller) - depth, we want the URL's children to be taken into account - the second time. */ + unconditional, as download_child_p already makes sure a file + doesn't get enqueued twice -- and yet this check is here, and + not in download_child_p. This is so that if you run `wget -r + URL1 URL2', and a random URL is encountered once under URL1 + and again under URL2, but at a different (possibly smaller) + depth, we want the URL's children to be taken into account + the second time. */ if (dl_url_file_map && hash_table_contains (dl_url_file_map, url)) - { - file = xstrdup (hash_table_get (dl_url_file_map, url)); - - DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n", - url, file)); - - if (string_set_contains (downloaded_html_set, file)) - descend = 1; - } + { + file = xstrdup (hash_table_get (dl_url_file_map, url)); + + DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n", + url, file)); + + /* this sucks, needs to be combined! */ + if (html_allowed + && downloaded_html_set + && string_set_contains (downloaded_html_set, file)) + { + descend = true; + is_css = false; + } + if (css_allowed + && downloaded_css_set + && string_set_contains (downloaded_css_set, file)) + { + descend = true; + is_css = true; + } + } else - { - int dt = 0; - char *redirected = NULL; - int oldrec = opt.recursive; - - opt.recursive = 0; - status = retrieve_url (url, &file, &redirected, referer, &dt); - opt.recursive = oldrec; - - if (file && status == RETROK - && (dt & RETROKF) && (dt & TEXTHTML)) - descend = 1; - - if (redirected) - { - /* We have been redirected, possibly to another host, or - different path, or wherever. Check whether we really - want to follow it. */ - if (descend) - { - if (!descend_redirect_p (redirected, url, depth, - start_url_parsed, blacklist)) - descend = 0; - else - /* Make sure that the old pre-redirect form gets - blacklisted. */ - string_set_add (blacklist, url); - } - - xfree (url); - url = redirected; - } - } + { + int dt = 0, url_err; + char *redirected = NULL; + struct url *url_parsed = url_parse (url, &url_err, i, true); + + status = retrieve_url (url_parsed, url, &file, &redirected, referer, + &dt, false, i, true); + + if (html_allowed && file && status == RETROK + && (dt & RETROKF) && (dt & TEXTHTML)) + { + descend = true; + is_css = false; + } + + /* a little different, css_allowed can override content type + lots of web servers serve css with an incorrect content type + */ + if (file && status == RETROK + && (dt & RETROKF) && + ((dt & TEXTCSS) || css_allowed)) + { + descend = true; + is_css = true; + } + + if (redirected) + { + /* We have been redirected, possibly to another host, or + different path, or wherever. Check whether we really + want to follow it. */ + if (descend) + { + if (!descend_redirect_p (redirected, url_parsed, depth, + start_url_parsed, blacklist, i)) + descend = false; + else + /* Make sure that the old pre-redirect form gets + blacklisted. */ + string_set_add (blacklist, url); + } + + xfree (url); + url = redirected; + } + else + { + xfree (url); + url = xstrdup (url_parsed->url); + } + url_free(url_parsed); + } + + if (opt.spider) + { + visited_url (url, referer); + } if (descend - && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION) - { - if (opt.page_requisites - && (depth == opt.reclevel || depth == opt.reclevel + 1)) - { - /* When -p is specified, we are allowed to exceed the - maximum depth, but only for the "inline" links, - i.e. those that are needed to display the page. - Originally this could exceed the depth at most by - one, but we allow one more level so that the leaf - pages that contain frames can be loaded - correctly. */ - dash_p_leaf_HTML = TRUE; - } - else - { - /* Either -p wasn't specified or it was and we've - already spent the two extra (pseudo-)levels that it - affords us, so we need to bail out. */ - DEBUGP (("Not descending further; at depth %d, max. %d.\n", - depth, opt.reclevel)); - descend = 0; - } - } - - /* If the downloaded document was HTML, parse it and enqueue the - links it contains. */ + && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION) + { + if (opt.page_requisites + && (depth == opt.reclevel || depth == opt.reclevel + 1)) + { + /* When -p is specified, we are allowed to exceed the + maximum depth, but only for the "inline" links, + i.e. those that are needed to display the page. + Originally this could exceed the depth at most by + one, but we allow one more level so that the leaf + pages that contain frames can be loaded + correctly. */ + dash_p_leaf_HTML = true; + } + else + { + /* Either -p wasn't specified or it was and we've + already spent the two extra (pseudo-)levels that it + affords us, so we need to bail out. */ + DEBUGP (("Not descending further; at depth %d, max. %d.\n", + depth, opt.reclevel)); + descend = false; + } + } + + /* If the downloaded document was HTML or CSS, parse it and enqueue the + links it contains. */ if (descend) - { - int meta_disallow_follow = 0; - struct urlpos *children - = get_urls_html (file, url, &meta_disallow_follow); - - if (opt.use_robots && meta_disallow_follow) - { - free_urlpos (children); - children = NULL; - } - - if (children) - { - struct urlpos *child = children; - struct url *url_parsed = url_parsed = url_parse (url, NULL); - assert (url_parsed != NULL); - - for (; child; child = child->next) - { - if (child->ignore_when_downloading) - continue; - if (dash_p_leaf_HTML && !child->link_inline_p) - continue; - if (download_child_p (child, url_parsed, depth, start_url_parsed, - blacklist)) - { - url_enqueue (queue, xstrdup (child->url->url), - xstrdup (url), depth + 1); - /* We blacklist the URL we have enqueued, because we - don't want to enqueue (and hence download) the - same URL twice. */ - string_set_add (blacklist, child->url->url); - } - } - - url_free (url_parsed); - free_urlpos (children); - } - } - - if (opt.delete_after || (file && !acceptable (file))) - { - /* Either --delete-after was specified, or we loaded this - otherwise rejected (e.g. by -R) HTML file just so we - could harvest its hyperlinks -- in either case, delete - the local file. */ - DEBUGP (("Removing file due to %s in recursive_retrieve():\n", - opt.delete_after ? "--delete-after" : - "recursive rejection criteria")); - logprintf (LOG_VERBOSE, - (opt.delete_after - ? _("Removing %s.\n") - : _("Removing %s since it should be rejected.\n")), - file); - if (unlink (file)) - logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno)); - register_delete_file (file); - } + { + bool meta_disallow_follow = false; + struct urlpos *children + = is_css ? get_urls_css_file (file, url) : + get_urls_html (file, url, &meta_disallow_follow, i); + + if (opt.use_robots && meta_disallow_follow) + { + free_urlpos (children); + children = NULL; + } + + if (children) + { + struct urlpos *child = children; + struct url *url_parsed = url_parse (url, NULL, i, true); + struct iri *ci; + char *referer_url = url; + bool strip_auth = (url_parsed != NULL + && url_parsed->user != NULL); + assert (url_parsed != NULL); + + /* Strip auth info if present */ + if (strip_auth) + referer_url = url_string (url_parsed, URL_AUTH_HIDE); + + for (; child; child = child->next) + { + if (child->ignore_when_downloading) + continue; + if (dash_p_leaf_HTML && !child->link_inline_p) + continue; + if (download_child_p (child, url_parsed, depth, start_url_parsed, + blacklist, i)) + { + ci = iri_new (); + set_uri_encoding (ci, i->content_encoding, false); + url_enqueue (queue, ci, xstrdup (child->url->url), + xstrdup (referer_url), depth + 1, + child->link_expect_html, + child->link_expect_css); + /* We blacklist the URL we have enqueued, because we + don't want to enqueue (and hence download) the + same URL twice. */ + string_set_add (blacklist, child->url->url); + } + } + + if (strip_auth) + xfree (referer_url); + url_free (url_parsed); + free_urlpos (children); + } + } + + if (file + && (opt.delete_after + || opt.spider /* opt.recursive is implicitely true */ + || !acceptable (file))) + { + /* Either --delete-after was specified, or we loaded this + (otherwise unneeded because of --spider or rejected by -R) + HTML file just to harvest its hyperlinks -- in either case, + delete the local file. */ + DEBUGP (("Removing file due to %s in recursive_retrieve():\n", + opt.delete_after ? "--delete-after" : + (opt.spider ? "--spider" : + "recursive rejection criteria"))); + logprintf (LOG_VERBOSE, + (opt.delete_after || opt.spider + ? _("Removing %s.\n") + : _("Removing %s since it should be rejected.\n")), + file); + if (unlink (file)) + logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno)); + logputs (LOG_VERBOSE, "\n"); + register_delete_file (file); + } xfree (url); - FREE_MAYBE (referer); - FREE_MAYBE (file); + xfree_null (referer); + xfree_null (file); + iri_free (i); } /* If anything is left of the queue due to a premature exit, free it @@ -381,19 +455,21 @@ retrieve_tree (const char *start_url) { char *d1, *d2; int d3; - while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3)) + bool d4, d5; + struct iri *d6; + while (url_dequeue (queue, (struct iri **)&d6, + (const char **)&d1, (const char **)&d2, &d3, &d4, &d5)) { - xfree (d1); - FREE_MAYBE (d2); + iri_free (d6); + xfree (d1); + xfree_null (d2); } } url_queue_delete (queue); - if (start_url_parsed) - url_free (start_url_parsed); string_set_free (blacklist); - if (downloaded_exceeds_quota ()) + if (opt.quota && total_downloaded_bytes > opt.quota) return QUOTEXC; else if (status == FWRITEERR) return FWRITEERR; @@ -409,17 +485,26 @@ retrieve_tree (const char *start_url) by storing these URLs to BLACKLIST. This may or may not help. It will help if those URLs are encountered many times. */ -static int +static bool download_child_p (const struct urlpos *upos, struct url *parent, int depth, - struct url *start_url_parsed, struct hash_table *blacklist) + struct url *start_url_parsed, struct hash_table *blacklist, + struct iri *iri) { struct url *u = upos->url; const char *url = u->url; + bool u_scheme_like_http; DEBUGP (("Deciding whether to enqueue \"%s\".\n", url)); if (string_set_contains (blacklist, url)) { + if (opt.spider) + { + char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD); + DEBUGP (("download_child_p: parent->url is: %s\n", quote (parent->url))); + visited_url (url, referrer); + xfree (referrer); + } DEBUGP (("Already on the black list.\n")); goto out; } @@ -445,12 +530,11 @@ download_child_p (const struct urlpos *upos, struct url *parent, int depth, More time- and memory- consuming tests should be put later on the list. */ + /* Determine whether URL under consideration has a HTTP-like scheme. */ + u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP); + /* 1. Schemes other than HTTP are normally not recursed into. */ - if (u->scheme != SCHEME_HTTP -#ifdef HAVE_SSL - && u->scheme != SCHEME_HTTPS -#endif - && !(u->scheme == SCHEME_FTP && opt.follow_ftp)) + if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp)) { DEBUGP (("Not following non-HTTP schemes.\n")); goto out; @@ -458,15 +542,11 @@ download_child_p (const struct urlpos *upos, struct url *parent, int depth, /* 2. If it is an absolute link and they are not followed, throw it out. */ - if (u->scheme == SCHEME_HTTP -#ifdef HAVE_SSL - || u->scheme == SCHEME_HTTPS -#endif - ) + if (u_scheme_like_http) if (opt.relative_only && !upos->link_relative_p) { - DEBUGP (("It doesn't really look like a relative link.\n")); - goto out; + DEBUGP (("It doesn't really look like a relative link.\n")); + goto out; } /* 3. If its domain is not to be accepted/looked-up, chuck it @@ -483,17 +563,17 @@ download_child_p (const struct urlpos *upos, struct url *parent, int depth, opt.no_parent. Also ignore it for documents needed to display the parent page when in -p mode. */ if (opt.no_parent - && u->scheme == start_url_parsed->scheme + && schemes_are_similar_p (u->scheme, start_url_parsed->scheme) && 0 == strcasecmp (u->host, start_url_parsed->host) && u->port == start_url_parsed->port && !(opt.page_requisites && upos->link_inline_p)) { - if (!frontcmp (start_url_parsed->dir, u->dir)) - { - DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n", - u->dir, start_url_parsed->dir)); - goto out; - } + if (!subdir_p (start_url_parsed->dir, u->dir)) + { + DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n", + u->dir, start_url_parsed->dir)); + goto out; + } } /* 5. If the file does not match the acceptance list, or is on the @@ -501,89 +581,100 @@ download_child_p (const struct urlpos *upos, struct url *parent, int depth, exclusion and inclusion lists. */ if (opt.includes || opt.excludes) { - if (!accdir (u->dir, ALLABS)) - { - DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir)); - goto out; - } + if (!accdir (u->dir)) + { + DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir)); + goto out; + } } - /* 6. */ - { - /* Check for acceptance/rejection rules. We ignore these rules - for directories (no file name to match) and for HTML documents, - which might lead to other files that do need to be downloaded. - That is, unless we've exhausted the recursion depth anyway. */ - if (u->file[0] != '\0' - && !(has_html_suffix_p (u->file) - && depth < opt.reclevel - 1 - && depth != INFINITE_RECURSION)) - { - if (!acceptable (u->file)) - { - DEBUGP (("%s (%s) does not match acc/rej rules.\n", - url, u->file)); - goto out; - } - } - } + /* 6. Check for acceptance/rejection rules. We ignore these rules + for directories (no file name to match) and for non-leaf HTMLs, + which can lead to other files that do need to be downloaded. (-p + automatically implies non-leaf because with -p we can, if + necesary, overstep the maximum depth to get the page requisites.) */ + if (u->file[0] != '\0' + && !(has_html_suffix_p (u->file) + /* The exception only applies to non-leaf HTMLs (but -p + always implies non-leaf because we can overstep the + maximum depth to get the requisites): */ + && (/* non-leaf */ + opt.reclevel == INFINITE_RECURSION + /* also non-leaf */ + || depth < opt.reclevel - 1 + /* -p, which implies non-leaf (see above) */ + || opt.page_requisites))) + { + if (!acceptable (u->file)) + { + DEBUGP (("%s (%s) does not match acc/rej rules.\n", + url, u->file)); + goto out; + } + } /* 7. */ - if (u->scheme == parent->scheme) + if (schemes_are_similar_p (u->scheme, parent->scheme)) if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host)) { - DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n", - u->host, parent->host)); - goto out; + DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n", + u->host, parent->host)); + goto out; } /* 8. */ - if (opt.use_robots && (u->scheme == SCHEME_HTTP -#ifdef HAVE_SSL - || u->scheme == SCHEME_HTTPS -#endif - ) - ) + if (opt.use_robots && u_scheme_like_http) { struct robot_specs *specs = res_get_specs (u->host, u->port); if (!specs) - { - char *rfile; - if (res_retrieve_file (url, &rfile)) - { - specs = res_parse_from_file (rfile); - xfree (rfile); - } - else - { - /* If we cannot get real specs, at least produce - dummy ones so that we can register them and stop - trying to retrieve them. */ - specs = res_parse ("", 0); - } - res_register_specs (u->host, u->port, specs); - } + { + char *rfile; + if (res_retrieve_file (url, &rfile, iri)) + { + specs = res_parse_from_file (rfile); + + /* Delete the robots.txt file if we chose to either delete the + files after downloading or we're just running a spider. */ + if (opt.delete_after || opt.spider) + { + logprintf (LOG_VERBOSE, "Removing %s.\n", rfile); + if (unlink (rfile)) + logprintf (LOG_NOTQUIET, "unlink: %s\n", + strerror (errno)); + } + + xfree (rfile); + } + else + { + /* If we cannot get real specs, at least produce + dummy ones so that we can register them and stop + trying to retrieve them. */ + specs = res_parse ("", 0); + } + res_register_specs (u->host, u->port, specs); + } /* Now that we have (or don't have) robots.txt specs, we can - check what they say. */ + check what they say. */ if (!res_match_path (specs, u->path)) - { - DEBUGP (("Not following %s because robots.txt forbids it.\n", url)); - string_set_add (blacklist, url); - goto out; - } + { + DEBUGP (("Not following %s because robots.txt forbids it.\n", url)); + string_set_add (blacklist, url); + goto out; + } } /* The URL has passed all the tests. It can be placed in the download queue. */ DEBUGP (("Decided to load it.\n")); - return 1; + return true; out: DEBUGP (("Decided NOT to load it.\n")); - return 0; + return false; } /* This function determines whether we will consider downloading the @@ -591,28 +682,26 @@ download_child_p (const struct urlpos *upos, struct url *parent, int depth, possibly to another host, etc. It is needed very rarely, and thus it is merely a simple-minded wrapper around download_child_p. */ -static int -descend_redirect_p (const char *redirected, const char *original, int depth, - struct url *start_url_parsed, struct hash_table *blacklist) +static bool +descend_redirect_p (const char *redirected, struct url *orig_parsed, int depth, + struct url *start_url_parsed, struct hash_table *blacklist, + struct iri *iri) { - struct url *orig_parsed, *new_parsed; + struct url *new_parsed; struct urlpos *upos; - int success; + bool success; - orig_parsed = url_parse (original, NULL); assert (orig_parsed != NULL); - new_parsed = url_parse (redirected, NULL); + new_parsed = url_parse (redirected, NULL, NULL, false); assert (new_parsed != NULL); - upos = xmalloc (sizeof (struct urlpos)); - memset (upos, 0, sizeof (*upos)); + upos = xnew0 (struct urlpos); upos->url = new_parsed; success = download_child_p (upos, orig_parsed, depth, - start_url_parsed, blacklist); + start_url_parsed, blacklist, iri); - url_free (orig_parsed); url_free (new_parsed); xfree (upos); @@ -622,357 +711,4 @@ descend_redirect_p (const char *redirected, const char *original, int depth, return success; } - -#define ENSURE_TABLES_EXIST do { \ - if (!dl_file_url_map) \ - dl_file_url_map = make_string_hash_table (0); \ - if (!dl_url_file_map) \ - dl_url_file_map = make_string_hash_table (0); \ -} while (0) - -/* Return 1 if S1 and S2 are the same, except for "/index.html". The - three cases in which it returns one are (substitute any substring - for "foo"): - - m("foo/index.html", "foo/") ==> 1 - m("foo/", "foo/index.html") ==> 1 - m("foo", "foo/index.html") ==> 1 - m("foo", "foo/" ==> 1 - m("foo", "foo") ==> 1 */ - -static int -match_except_index (const char *s1, const char *s2) -{ - int i; - const char *lng; - - /* Skip common substring. */ - for (i = 0; *s1 && *s2 && *s1 == *s2; s1++, s2++, i++) - ; - if (i == 0) - /* Strings differ at the very beginning -- bail out. We need to - check this explicitly to avoid `lng - 1' reading outside the - array. */ - return 0; - - if (!*s1 && !*s2) - /* Both strings hit EOF -- strings are equal. */ - return 1; - else if (*s1 && *s2) - /* Strings are randomly different, e.g. "/foo/bar" and "/foo/qux". */ - return 0; - else if (*s1) - /* S1 is the longer one. */ - lng = s1; - else - /* S2 is the longer one. */ - lng = s2; - - /* foo */ /* foo/ */ - /* foo/index.html */ /* or */ /* foo/index.html */ - /* ^ */ /* ^ */ - - if (*lng != '/') - /* The right-hand case. */ - --lng; - - if (*lng == '/' && *(lng + 1) == '\0') - /* foo */ - /* foo/ */ - return 1; - - return 0 == strcmp (lng, "/index.html"); -} - -static int -dissociate_urls_from_file_mapper (void *key, void *value, void *arg) -{ - char *mapping_url = (char *)key; - char *mapping_file = (char *)value; - char *file = (char *)arg; - - if (0 == strcmp (mapping_file, file)) - { - hash_table_remove (dl_url_file_map, mapping_url); - xfree (mapping_url); - xfree (mapping_file); - } - - /* Continue mapping. */ - return 0; -} - -/* Remove all associations from various URLs to FILE from dl_url_file_map. */ - -static void -dissociate_urls_from_file (const char *file) -{ - hash_table_map (dl_url_file_map, dissociate_urls_from_file_mapper, - (char *)file); -} - -/* Register that URL has been successfully downloaded to FILE. This - is used by the link conversion code to convert references to URLs - to references to local files. It is also being used to check if a - URL has already been downloaded. */ - -void -register_download (const char *url, const char *file) -{ - char *old_file, *old_url; - - ENSURE_TABLES_EXIST; - - /* With some forms of retrieval, it is possible, although not likely - or particularly desirable. If both are downloaded, the second - download will override the first one. When that happens, - dissociate the old file name from the URL. */ - - if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url)) - { - if (0 == strcmp (url, old_url)) - /* We have somehow managed to download the same URL twice. - Nothing to do. */ - return; - - if (match_except_index (url, old_url) - && !hash_table_contains (dl_url_file_map, url)) - /* The two URLs differ only in the "index.html" ending. For - example, one is "http://www.server.com/", and the other is - "http://www.server.com/index.html". Don't remove the old - one, just add the new one as a non-canonical entry. */ - goto url_only; - - hash_table_remove (dl_file_url_map, file); - xfree (old_file); - xfree (old_url); - - /* Remove all the URLs that point to this file. Yes, there can - be more than one such URL, because we store redirections as - multiple entries in dl_url_file_map. For example, if URL1 - redirects to URL2 which gets downloaded to FILE, we map both - URL1 and URL2 to FILE in dl_url_file_map. (dl_file_url_map - only points to URL2.) When another URL gets loaded to FILE, - we want both URL1 and URL2 dissociated from it. - - This is a relatively expensive operation because it performs - a linear search of the whole hash table, but it should be - called very rarely, only when two URLs resolve to the same - file name, *and* the ".1" extensions are turned off. - In other words, almost never. */ - dissociate_urls_from_file (file); - } - - hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url)); - - url_only: - /* A URL->FILE mapping is not possible without a FILE->URL mapping. - If the latter were present, it should have been removed by the - above `if'. So we could write: - - assert (!hash_table_contains (dl_url_file_map, url)); - - The above is correct when running in recursive mode where the - same URL always resolves to the same file. But if you do - something like: - - wget URL URL - - then the first URL will resolve to "FILE", and the other to - "FILE.1". In that case, FILE.1 will not be found in - dl_file_url_map, but URL will still point to FILE in - dl_url_file_map. */ - if (hash_table_get_pair (dl_url_file_map, url, &old_url, &old_file)) - { - hash_table_remove (dl_url_file_map, url); - xfree (old_url); - xfree (old_file); - } - - hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file)); -} - -/* Register that FROM has been redirected to TO. This assumes that TO - is successfully downloaded and already registered using - register_download() above. */ - -void -register_redirection (const char *from, const char *to) -{ - char *file; - - ENSURE_TABLES_EXIST; - - file = hash_table_get (dl_url_file_map, to); - assert (file != NULL); - if (!hash_table_contains (dl_url_file_map, from)) - hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file)); -} - -/* Register that the file has been deleted. */ - -static void -register_delete_file (const char *file) -{ - char *old_url, *old_file; - - ENSURE_TABLES_EXIST; - - if (!hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url)) - return; - - hash_table_remove (dl_file_url_map, file); - xfree (old_file); - xfree (old_url); - dissociate_urls_from_file (file); -} - -/* Register that FILE is an HTML file that has been downloaded. */ - -void -register_html (const char *url, const char *file) -{ - if (!downloaded_html_set) - downloaded_html_set = make_string_hash_table (0); - else if (hash_table_contains (downloaded_html_set, file)) - return; - - /* The set and the list should use the same copy of FILE, but the - slist interface insists on strduping the string it gets. Oh - well. */ - string_set_add (downloaded_html_set, file); - downloaded_html_list = slist_prepend (downloaded_html_list, file); -} - -/* This function is called when the retrieval is done to convert the - links that have been downloaded. It has to be called at the end of - the retrieval, because only then does Wget know conclusively which - URLs have been downloaded, and which not, so it can tell which - direction to convert to. - - The "direction" means that the URLs to the files that have been - downloaded get converted to the relative URL which will point to - that file. And the other URLs get converted to the remote URL on - the server. - - All the downloaded HTMLs are kept in downloaded_html_files, and - downloaded URLs in urls_downloaded. All the information is - extracted from these two lists. */ - -void -convert_all_links (void) -{ - slist *html; - long msecs; - int file_count = 0; - - struct wget_timer *timer = wtimer_new (); - - /* Destructively reverse downloaded_html_files to get it in the right order. - recursive_retrieve() used slist_prepend() consistently. */ - downloaded_html_list = slist_nreverse (downloaded_html_list); - - for (html = downloaded_html_list; html; html = html->next) - { - struct urlpos *urls, *cur_url; - char *url; - char *file = html->string; - - /* Determine the URL of the HTML file. get_urls_html will need - it. */ - url = hash_table_get (dl_file_url_map, file); - if (!url) - { - DEBUGP (("Apparently %s has been removed.\n", file)); - continue; - } - - DEBUGP (("Scanning %s (from %s)\n", file, url)); - - /* Parse the HTML file... */ - urls = get_urls_html (file, url, NULL); - - /* We don't respect meta_disallow_follow here because, even if - the file is not followed, we might still want to convert the - links that have been followed from other files. */ - - for (cur_url = urls; cur_url; cur_url = cur_url->next) - { - char *local_name; - struct url *u = cur_url->url; - - if (cur_url->link_base_p) - { - /* Base references have been resolved by our parser, so - we turn the base URL into an empty string. (Perhaps - we should remove the tag entirely?) */ - cur_url->convert = CO_NULLIFY_BASE; - continue; - } - - /* We decide the direction of conversion according to whether - a URL was downloaded. Downloaded URLs will be converted - ABS2REL, whereas non-downloaded will be converted REL2ABS. */ - local_name = hash_table_get (dl_url_file_map, u->url); - - /* Decide on the conversion type. */ - if (local_name) - { - /* We've downloaded this URL. Convert it to relative - form. We do this even if the URL already is in - relative form, because our directory structure may - not be identical to that on the server (think `-nd', - `--cut-dirs', etc.) */ - cur_url->convert = CO_CONVERT_TO_RELATIVE; - cur_url->local_name = xstrdup (local_name); - DEBUGP (("will convert url %s to local %s\n", u->url, local_name)); - } - else - { - /* We haven't downloaded this URL. If it's not already - complete (including a full host name), convert it to - that form, so it can be reached while browsing this - HTML locally. */ - if (!cur_url->link_complete_p) - cur_url->convert = CO_CONVERT_TO_COMPLETE; - cur_url->local_name = NULL; - DEBUGP (("will convert url %s to complete\n", u->url)); - } - } - - /* Convert the links in the file. */ - convert_links (file, urls); - ++file_count; - - /* Free the data. */ - free_urlpos (urls); - } - - msecs = wtimer_elapsed (timer); - wtimer_delete (timer); - logprintf (LOG_VERBOSE, _("Converted %d files in %.2f seconds.\n"), - file_count, (double)msecs / 1000); -} - -/* Cleanup the data structures associated with recursive retrieving - (the variables above). */ -void -recursive_cleanup (void) -{ - if (dl_file_url_map) - { - free_keys_and_values (dl_file_url_map); - hash_table_destroy (dl_file_url_map); - dl_file_url_map = NULL; - } - if (dl_url_file_map) - { - free_keys_and_values (dl_url_file_map); - hash_table_destroy (dl_url_file_map); - dl_url_file_map = NULL; - } - if (downloaded_html_set) - string_set_free (downloaded_html_set); - slist_free (downloaded_html_list); - downloaded_html_list = NULL; -} +/* vim:set sts=2 sw=2 cino+={s: */