1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation,
6 This file is part of GNU Wget.
8 GNU Wget is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 GNU Wget is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with Wget. If not, see <http://www.gnu.org/licenses/>.
21 Additional permission under GNU GPL version 3 section 7
23 If you modify this program, or any covered work, by linking or
24 combining it with the OpenSSL project's OpenSSL library (or a
25 modified version of that library), containing parts covered by the
26 terms of the OpenSSL or SSLeay licenses, the Free Software Foundation
27 grants you additional permission to convey the resulting work.
28 Corresponding Source for a non-source form of such a combination
29 shall include the source code for the parts of OpenSSL used as well
30 as that of the covered work. */
54 /* Functions for maintaining the URL queue. */
56 struct queue_element {
57 const char *url; /* the URL to download */
58 const char *referer; /* the referring document */
59 int depth; /* the depth */
60 bool html_allowed; /* whether the document is allowed to
61 be treated as HTML. */
62 struct iri *iri; /* sXXXav */
63 bool css_allowed; /* whether the document is allowed to
65 struct queue_element *next; /* next element in queue */
69 struct queue_element *head;
70 struct queue_element *tail;
74 /* Create a URL queue. */
76 static struct url_queue *
79 struct url_queue *queue = xnew0 (struct url_queue);
83 /* Delete a URL queue. */
86 url_queue_delete (struct url_queue *queue)
91 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
92 retrieved ("dequeued") from the queue in the order they were placed
96 url_enqueue (struct url_queue *queue, struct iri *i,
97 const char *url, const char *referer, int depth,
98 bool html_allowed, bool css_allowed)
100 struct queue_element *qel = xnew (struct queue_element);
103 qel->referer = referer;
105 qel->html_allowed = html_allowed;
106 qel->css_allowed = css_allowed;
110 if (queue->count > queue->maxcount)
111 queue->maxcount = queue->count;
113 DEBUGP (("Enqueuing %s at depth %d\n",
114 quotearg_n_style (0, escape_quoting_style, url), depth));
115 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
118 DEBUGP (("[IRI Enqueuing %s with %s\n", quote_n (0, url),
119 i->uri_encoding ? quote_n (1, i->uri_encoding) : "None"));
122 queue->tail->next = qel;
126 queue->head = queue->tail;
129 /* Take a URL out of the queue. Return true if this operation
130 succeeded, or false if the queue is empty. */
133 url_dequeue (struct url_queue *queue, struct iri **i,
134 const char **url, const char **referer, int *depth,
135 bool *html_allowed, bool *css_allowed)
137 struct queue_element *qel = queue->head;
142 queue->head = queue->head->next;
148 *referer = qel->referer;
150 *html_allowed = qel->html_allowed;
151 *css_allowed = qel->css_allowed;
155 DEBUGP (("Dequeuing %s at depth %d\n",
156 quotearg_n_style (0, escape_quoting_style, qel->url), qel->depth));
157 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
163 static bool download_child_p (const struct urlpos *, struct url *, int,
164 struct url *, struct hash_table *, struct iri *);
165 static bool descend_redirect_p (const char *, struct url *, int,
166 struct url *, struct hash_table *, struct iri *);
169 /* Retrieve a part of the web beginning with START_URL. This used to
170 be called "recursive retrieval", because the old function was
171 recursive and implemented depth-first search. retrieve_tree on the
172 other hand implements breadth-search traversal of the tree, which
173 results in much nicer ordering of downloads.
175 The algorithm this function uses is simple:
177 1. put START_URL in the queue.
178 2. while there are URLs in the queue:
180 3. get next URL from the queue.
182 5. if the URL is HTML and its depth does not exceed maximum depth,
183 get the list of URLs embedded therein.
184 6. for each of those URLs do the following:
186 7. if the URL is not one of those downloaded before, and if it
187 satisfies the criteria specified by the various command-line
188 options, add it to the queue. */
191 retrieve_tree (struct url *start_url_parsed, struct iri *pi)
193 uerr_t status = RETROK;
195 /* The queue of URLs we need to load. */
196 struct url_queue *queue;
198 /* The URLs we do not wish to enqueue, because they are already in
199 the queue, but haven't been downloaded yet. */
200 struct hash_table *blacklist;
202 struct iri *i = iri_new ();
204 #define COPYSTR(x) (x) ? xstrdup(x) : NULL;
205 /* Duplicate pi struct if not NULL */
208 i->uri_encoding = COPYSTR (pi->uri_encoding);
209 i->content_encoding = COPYSTR (pi->content_encoding);
210 i->utf8_encode = pi->utf8_encode;
213 set_uri_encoding (i, opt.locale, true);
216 queue = url_queue_new ();
217 blacklist = make_string_hash_table (0);
219 /* Enqueue the starting URL. Use start_url_parsed->url rather than
220 just URL so we enqueue the canonical form of the URL. */
221 url_enqueue (queue, i, xstrdup (start_url_parsed->url), NULL, 0, true,
223 string_set_add (blacklist, start_url_parsed->url);
227 bool descend = false;
228 char *url, *referer, *file = NULL;
230 bool html_allowed, css_allowed;
232 bool dash_p_leaf_HTML = false;
234 if (opt.quota && total_downloaded_bytes > opt.quota)
236 if (status == FWRITEERR)
239 /* Get the next URL from the queue... */
241 if (!url_dequeue (queue, (struct iri **) &i,
242 (const char **)&url, (const char **)&referer,
243 &depth, &html_allowed, &css_allowed))
246 /* ...and download it. Note that this download is in most cases
247 unconditional, as download_child_p already makes sure a file
248 doesn't get enqueued twice -- and yet this check is here, and
249 not in download_child_p. This is so that if you run `wget -r
250 URL1 URL2', and a random URL is encountered once under URL1
251 and again under URL2, but at a different (possibly smaller)
252 depth, we want the URL's children to be taken into account
254 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
258 file = xstrdup (hash_table_get (dl_url_file_map, url));
260 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
263 if ((is_css_bool = (css_allowed
264 && downloaded_css_set
265 && string_set_contains (downloaded_css_set, file)))
267 && downloaded_html_set
268 && string_set_contains (downloaded_html_set, file)))
271 is_css = is_css_bool;
277 char *redirected = NULL;
278 struct url *url_parsed = url_parse (url, &url_err, i, true);
280 status = retrieve_url (url_parsed, url, &file, &redirected, referer,
281 &dt, false, i, true);
283 if (html_allowed && file && status == RETROK
284 && (dt & RETROKF) && (dt & TEXTHTML))
290 /* a little different, css_allowed can override content type
291 lots of web servers serve css with an incorrect content type
293 if (file && status == RETROK
295 ((dt & TEXTCSS) || css_allowed))
303 /* We have been redirected, possibly to another host, or
304 different path, or wherever. Check whether we really
305 want to follow it. */
308 if (!descend_redirect_p (redirected, url_parsed, depth,
309 start_url_parsed, blacklist, i))
312 /* Make sure that the old pre-redirect form gets
314 string_set_add (blacklist, url);
323 url = xstrdup (url_parsed->url);
325 url_free(url_parsed);
330 visited_url (url, referer);
334 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
336 if (opt.page_requisites
337 && (depth == opt.reclevel || depth == opt.reclevel + 1))
339 /* When -p is specified, we are allowed to exceed the
340 maximum depth, but only for the "inline" links,
341 i.e. those that are needed to display the page.
342 Originally this could exceed the depth at most by
343 one, but we allow one more level so that the leaf
344 pages that contain frames can be loaded
346 dash_p_leaf_HTML = true;
350 /* Either -p wasn't specified or it was and we've
351 already spent the two extra (pseudo-)levels that it
352 affords us, so we need to bail out. */
353 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
354 depth, opt.reclevel));
359 /* If the downloaded document was HTML or CSS, parse it and enqueue the
360 links it contains. */
364 bool meta_disallow_follow = false;
365 struct urlpos *children
366 = is_css ? get_urls_css_file (file, url) :
367 get_urls_html (file, url, &meta_disallow_follow, i);
369 if (opt.use_robots && meta_disallow_follow)
371 free_urlpos (children);
377 struct urlpos *child = children;
378 struct url *url_parsed = url_parse (url, NULL, i, true);
380 char *referer_url = url;
381 bool strip_auth = (url_parsed != NULL
382 && url_parsed->user != NULL);
383 assert (url_parsed != NULL);
385 /* Strip auth info if present */
387 referer_url = url_string (url_parsed, URL_AUTH_HIDE);
389 for (; child; child = child->next)
391 if (child->ignore_when_downloading)
393 if (dash_p_leaf_HTML && !child->link_inline_p)
395 if (download_child_p (child, url_parsed, depth, start_url_parsed,
399 set_uri_encoding (ci, i->content_encoding, false);
400 url_enqueue (queue, ci, xstrdup (child->url->url),
401 xstrdup (referer_url), depth + 1,
402 child->link_expect_html,
403 child->link_expect_css);
404 /* We blacklist the URL we have enqueued, because we
405 don't want to enqueue (and hence download) the
407 string_set_add (blacklist, child->url->url);
413 url_free (url_parsed);
414 free_urlpos (children);
420 || opt.spider /* opt.recursive is implicitely true */
421 || !acceptable (file)))
423 /* Either --delete-after was specified, or we loaded this
424 (otherwise unneeded because of --spider or rejected by -R)
425 HTML file just to harvest its hyperlinks -- in either case,
426 delete the local file. */
427 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
428 opt.delete_after ? "--delete-after" :
429 (opt.spider ? "--spider" :
430 "recursive rejection criteria")));
431 logprintf (LOG_VERBOSE,
432 (opt.delete_after || opt.spider
433 ? _("Removing %s.\n")
434 : _("Removing %s since it should be rejected.\n")),
437 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
438 logputs (LOG_VERBOSE, "\n");
439 register_delete_file (file);
443 xfree_null (referer);
448 /* If anything is left of the queue due to a premature exit, free it
455 while (url_dequeue (queue, (struct iri **)&d6,
456 (const char **)&d1, (const char **)&d2, &d3, &d4, &d5))
463 url_queue_delete (queue);
465 string_set_free (blacklist);
467 if (opt.quota && total_downloaded_bytes > opt.quota)
469 else if (status == FWRITEERR)
475 /* Based on the context provided by retrieve_tree, decide whether a
476 URL is to be descended to. This is only ever called from
477 retrieve_tree, but is in a separate function for clarity.
479 The most expensive checks (such as those for robots) are memoized
480 by storing these URLs to BLACKLIST. This may or may not help. It
481 will help if those URLs are encountered many times. */
484 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
485 struct url *start_url_parsed, struct hash_table *blacklist,
488 struct url *u = upos->url;
489 const char *url = u->url;
490 bool u_scheme_like_http;
492 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
494 if (string_set_contains (blacklist, url))
498 char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD);
499 DEBUGP (("download_child_p: parent->url is: %s\n", quote (parent->url)));
500 visited_url (url, referrer);
503 DEBUGP (("Already on the black list.\n"));
507 /* Several things to check for:
508 1. if scheme is not http, and we don't load it
509 2. check for relative links (if relative_only is set)
511 4. check for no-parent
512 5. check for excludes && includes
514 7. check for same host (if spanhost is unset), with possible
515 gethostbyname baggage
516 8. check for robots.txt
518 Addendum: If the URL is FTP, and it is to be loaded, only the
519 domain and suffix settings are "stronger".
521 Note that .html files will get loaded regardless of suffix rules
522 (but that is remedied later with unlink) unless the depth equals
525 More time- and memory- consuming tests should be put later on
528 /* Determine whether URL under consideration has a HTTP-like scheme. */
529 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
531 /* 1. Schemes other than HTTP are normally not recursed into. */
532 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
534 DEBUGP (("Not following non-HTTP schemes.\n"));
538 /* 2. If it is an absolute link and they are not followed, throw it
540 if (u_scheme_like_http)
541 if (opt.relative_only && !upos->link_relative_p)
543 DEBUGP (("It doesn't really look like a relative link.\n"));
547 /* 3. If its domain is not to be accepted/looked-up, chuck it
549 if (!accept_domain (u))
551 DEBUGP (("The domain was not accepted.\n"));
555 /* 4. Check for parent directory.
557 If we descended to a different host or changed the scheme, ignore
558 opt.no_parent. Also ignore it for documents needed to display
559 the parent page when in -p mode. */
561 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
562 && 0 == strcasecmp (u->host, start_url_parsed->host)
563 && (u->scheme != start_url_parsed->scheme
564 || u->port == start_url_parsed->port)
565 && !(opt.page_requisites && upos->link_inline_p))
567 if (!subdir_p (start_url_parsed->dir, u->dir))
569 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
570 u->dir, start_url_parsed->dir));
575 /* 5. If the file does not match the acceptance list, or is on the
576 rejection list, chuck it out. The same goes for the directory
577 exclusion and inclusion lists. */
578 if (opt.includes || opt.excludes)
580 if (!accdir (u->dir))
582 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
586 if (!accept_url (url))
588 DEBUGP (("%s is excluded/not-included through regex.\n", url));
592 /* 6. Check for acceptance/rejection rules. We ignore these rules
593 for directories (no file name to match) and for non-leaf HTMLs,
594 which can lead to other files that do need to be downloaded. (-p
595 automatically implies non-leaf because with -p we can, if
596 necesary, overstep the maximum depth to get the page requisites.) */
597 if (u->file[0] != '\0'
598 && !(has_html_suffix_p (u->file)
599 /* The exception only applies to non-leaf HTMLs (but -p
600 always implies non-leaf because we can overstep the
601 maximum depth to get the requisites): */
603 opt.reclevel == INFINITE_RECURSION
605 || depth < opt.reclevel - 1
606 /* -p, which implies non-leaf (see above) */
607 || opt.page_requisites)))
609 if (!acceptable (u->file))
611 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
618 if (schemes_are_similar_p (u->scheme, parent->scheme))
619 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
621 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
622 u->host, parent->host));
627 if (opt.use_robots && u_scheme_like_http)
629 struct robot_specs *specs = res_get_specs (u->host, u->port);
633 if (res_retrieve_file (url, &rfile, iri))
635 specs = res_parse_from_file (rfile);
637 /* Delete the robots.txt file if we chose to either delete the
638 files after downloading or we're just running a spider. */
639 if (opt.delete_after || opt.spider)
641 logprintf (LOG_VERBOSE, _("Removing %s.\n"), rfile);
643 logprintf (LOG_NOTQUIET, "unlink: %s\n",
651 /* If we cannot get real specs, at least produce
652 dummy ones so that we can register them and stop
653 trying to retrieve them. */
654 specs = res_parse ("", 0);
656 res_register_specs (u->host, u->port, specs);
659 /* Now that we have (or don't have) robots.txt specs, we can
660 check what they say. */
661 if (!res_match_path (specs, u->path))
663 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
664 string_set_add (blacklist, url);
669 /* The URL has passed all the tests. It can be placed in the
671 DEBUGP (("Decided to load it.\n"));
676 DEBUGP (("Decided NOT to load it.\n"));
681 /* This function determines whether we will consider downloading the
682 children of a URL whose download resulted in a redirection,
683 possibly to another host, etc. It is needed very rarely, and thus
684 it is merely a simple-minded wrapper around download_child_p. */
687 descend_redirect_p (const char *redirected, struct url *orig_parsed, int depth,
688 struct url *start_url_parsed, struct hash_table *blacklist,
691 struct url *new_parsed;
695 assert (orig_parsed != NULL);
697 new_parsed = url_parse (redirected, NULL, NULL, false);
698 assert (new_parsed != NULL);
700 upos = xnew0 (struct urlpos);
701 upos->url = new_parsed;
703 success = download_child_p (upos, orig_parsed, depth,
704 start_url_parsed, blacklist, iri);
706 url_free (new_parsed);
710 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
715 /* vim:set sts=2 sw=2 cino+={s: */