1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of GNU Wget.
7 GNU Wget is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 GNU Wget is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Wget. If not, see <http://www.gnu.org/licenses/>.
20 Additional permission under GNU GPL version 3 section 7
22 If you modify this program, or any covered work, by linking or
23 combining it with the OpenSSL project's OpenSSL library (or a
24 modified version of that library), containing parts covered by the
25 terms of the OpenSSL or SSLeay licenses, the Free Software Foundation
26 grants you additional permission to convey the resulting work.
27 Corresponding Source for a non-source form of such a combination
28 shall include the source code for the parts of OpenSSL used as well
29 as that of the covered work. */
38 #endif /* HAVE_UNISTD_H */
55 /* Functions for maintaining the URL queue. */
57 struct queue_element {
58 const char *url; /* the URL to download */
59 const char *referer; /* the referring document */
60 int depth; /* the depth */
61 bool html_allowed; /* whether the document is allowed to
62 be treated as HTML. */
63 struct iri *iri; /* sXXXav */
64 bool css_allowed; /* whether the document is allowed to
66 struct queue_element *next; /* next element in queue */
70 struct queue_element *head;
71 struct queue_element *tail;
75 /* Create a URL queue. */
77 static struct url_queue *
80 struct url_queue *queue = xnew0 (struct url_queue);
84 /* Delete a URL queue. */
87 url_queue_delete (struct url_queue *queue)
92 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
93 retrieved ("dequeued") from the queue in the order they were placed
97 url_enqueue (struct url_queue *queue, struct iri *i,
98 const char *url, const char *referer, int depth,
99 bool html_allowed, bool css_allowed)
101 struct queue_element *qel = xnew (struct queue_element);
104 qel->referer = referer;
106 qel->html_allowed = html_allowed;
107 qel->css_allowed = css_allowed;
111 if (queue->count > queue->maxcount)
112 queue->maxcount = queue->count;
114 DEBUGP (("Enqueuing %s at depth %d\n",
115 quotearg_n_style (0, escape_quoting_style, url), depth));
116 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
119 DEBUGP (("[IRI Enqueuing %s with %s\n", quote_n (0, url),
120 i->uri_encoding ? quote_n (1, i->uri_encoding) : "None"));
123 queue->tail->next = qel;
127 queue->head = queue->tail;
130 /* Take a URL out of the queue. Return true if this operation
131 succeeded, or false if the queue is empty. */
134 url_dequeue (struct url_queue *queue, struct iri **i,
135 const char **url, const char **referer, int *depth,
136 bool *html_allowed, bool *css_allowed)
138 struct queue_element *qel = queue->head;
143 queue->head = queue->head->next;
149 *referer = qel->referer;
151 *html_allowed = qel->html_allowed;
152 *css_allowed = qel->css_allowed;
156 DEBUGP (("Dequeuing %s at depth %d\n",
157 quotearg_n_style (0, escape_quoting_style, qel->url), qel->depth));
158 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
164 static bool download_child_p (const struct urlpos *, struct url *, int,
165 struct url *, struct hash_table *, struct iri *);
166 static bool descend_redirect_p (const char *, struct url *, int,
167 struct url *, struct hash_table *, struct iri *);
170 /* Retrieve a part of the web beginning with START_URL. This used to
171 be called "recursive retrieval", because the old function was
172 recursive and implemented depth-first search. retrieve_tree on the
173 other hand implements breadth-search traversal of the tree, which
174 results in much nicer ordering of downloads.
176 The algorithm this function uses is simple:
178 1. put START_URL in the queue.
179 2. while there are URLs in the queue:
181 3. get next URL from the queue.
183 5. if the URL is HTML and its depth does not exceed maximum depth,
184 get the list of URLs embedded therein.
185 6. for each of those URLs do the following:
187 7. if the URL is not one of those downloaded before, and if it
188 satisfies the criteria specified by the various command-line
189 options, add it to the queue. */
192 retrieve_tree (struct url *start_url_parsed, struct iri *pi)
194 uerr_t status = RETROK;
196 /* The queue of URLs we need to load. */
197 struct url_queue *queue;
199 /* The URLs we do not wish to enqueue, because they are already in
200 the queue, but haven't been downloaded yet. */
201 struct hash_table *blacklist;
203 struct iri *i = iri_new ();
205 #define COPYSTR(x) (x) ? xstrdup(x) : NULL;
206 /* Duplicate pi struct if not NULL */
209 i->uri_encoding = COPYSTR (pi->uri_encoding);
210 i->content_encoding = COPYSTR (pi->content_encoding);
211 i->utf8_encode = pi->utf8_encode;
214 set_uri_encoding (i, opt.locale, true);
217 queue = url_queue_new ();
218 blacklist = make_string_hash_table (0);
220 /* Enqueue the starting URL. Use start_url_parsed->url rather than
221 just URL so we enqueue the canonical form of the URL. */
222 url_enqueue (queue, i, xstrdup (start_url_parsed->url), NULL, 0, true,
224 string_set_add (blacklist, start_url_parsed->url);
228 bool descend = false;
229 char *url, *referer, *file = NULL;
231 bool html_allowed, css_allowed;
233 bool dash_p_leaf_HTML = false;
235 if (opt.quota && total_downloaded_bytes > opt.quota)
237 if (status == FWRITEERR)
240 /* Get the next URL from the queue... */
242 if (!url_dequeue (queue, (struct iri **) &i,
243 (const char **)&url, (const char **)&referer,
244 &depth, &html_allowed, &css_allowed))
247 /* ...and download it. Note that this download is in most cases
248 unconditional, as download_child_p already makes sure a file
249 doesn't get enqueued twice -- and yet this check is here, and
250 not in download_child_p. This is so that if you run `wget -r
251 URL1 URL2', and a random URL is encountered once under URL1
252 and again under URL2, but at a different (possibly smaller)
253 depth, we want the URL's children to be taken into account
255 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
257 file = xstrdup (hash_table_get (dl_url_file_map, url));
259 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
262 /* this sucks, needs to be combined! */
264 && downloaded_html_set
265 && string_set_contains (downloaded_html_set, file))
271 && downloaded_css_set
272 && string_set_contains (downloaded_css_set, file))
281 char *redirected = NULL;
282 struct url *url_parsed = url_parse (url, &url_err, i, true);
284 status = retrieve_url (url_parsed, url, &file, &redirected, referer,
285 &dt, false, i, true);
287 if (html_allowed && file && status == RETROK
288 && (dt & RETROKF) && (dt & TEXTHTML))
294 /* a little different, css_allowed can override content type
295 lots of web servers serve css with an incorrect content type
297 if (file && status == RETROK
299 ((dt & TEXTCSS) || css_allowed))
307 /* We have been redirected, possibly to another host, or
308 different path, or wherever. Check whether we really
309 want to follow it. */
312 if (!descend_redirect_p (redirected, url_parsed, depth,
313 start_url_parsed, blacklist, i))
316 /* Make sure that the old pre-redirect form gets
318 string_set_add (blacklist, url);
327 url = xstrdup (url_parsed->url);
329 url_free(url_parsed);
334 visited_url (url, referer);
338 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
340 if (opt.page_requisites
341 && (depth == opt.reclevel || depth == opt.reclevel + 1))
343 /* When -p is specified, we are allowed to exceed the
344 maximum depth, but only for the "inline" links,
345 i.e. those that are needed to display the page.
346 Originally this could exceed the depth at most by
347 one, but we allow one more level so that the leaf
348 pages that contain frames can be loaded
350 dash_p_leaf_HTML = true;
354 /* Either -p wasn't specified or it was and we've
355 already spent the two extra (pseudo-)levels that it
356 affords us, so we need to bail out. */
357 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
358 depth, opt.reclevel));
363 /* If the downloaded document was HTML or CSS, parse it and enqueue the
364 links it contains. */
368 bool meta_disallow_follow = false;
369 struct urlpos *children
370 = is_css ? get_urls_css_file (file, url) :
371 get_urls_html (file, url, &meta_disallow_follow, i);
373 if (opt.use_robots && meta_disallow_follow)
375 free_urlpos (children);
381 struct urlpos *child = children;
382 struct url *url_parsed = url_parse (url, NULL, i, true);
384 char *referer_url = url;
385 bool strip_auth = (url_parsed != NULL
386 && url_parsed->user != NULL);
387 assert (url_parsed != NULL);
389 /* Strip auth info if present */
391 referer_url = url_string (url_parsed, URL_AUTH_HIDE);
393 for (; child; child = child->next)
395 if (child->ignore_when_downloading)
397 if (dash_p_leaf_HTML && !child->link_inline_p)
399 if (download_child_p (child, url_parsed, depth, start_url_parsed,
403 set_uri_encoding (ci, i->content_encoding, false);
404 url_enqueue (queue, ci, xstrdup (child->url->url),
405 xstrdup (referer_url), depth + 1,
406 child->link_expect_html,
407 child->link_expect_css);
408 /* We blacklist the URL we have enqueued, because we
409 don't want to enqueue (and hence download) the
411 string_set_add (blacklist, child->url->url);
417 url_free (url_parsed);
418 free_urlpos (children);
424 || opt.spider /* opt.recursive is implicitely true */
425 || !acceptable (file)))
427 /* Either --delete-after was specified, or we loaded this
428 (otherwise unneeded because of --spider or rejected by -R)
429 HTML file just to harvest its hyperlinks -- in either case,
430 delete the local file. */
431 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
432 opt.delete_after ? "--delete-after" :
433 (opt.spider ? "--spider" :
434 "recursive rejection criteria")));
435 logprintf (LOG_VERBOSE,
436 (opt.delete_after || opt.spider
437 ? _("Removing %s.\n")
438 : _("Removing %s since it should be rejected.\n")),
441 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
442 logputs (LOG_VERBOSE, "\n");
443 register_delete_file (file);
447 xfree_null (referer);
452 /* If anything is left of the queue due to a premature exit, free it
459 while (url_dequeue (queue, (struct iri **)&d6,
460 (const char **)&d1, (const char **)&d2, &d3, &d4, &d5))
467 url_queue_delete (queue);
469 string_set_free (blacklist);
471 if (opt.quota && total_downloaded_bytes > opt.quota)
473 else if (status == FWRITEERR)
479 /* Based on the context provided by retrieve_tree, decide whether a
480 URL is to be descended to. This is only ever called from
481 retrieve_tree, but is in a separate function for clarity.
483 The most expensive checks (such as those for robots) are memoized
484 by storing these URLs to BLACKLIST. This may or may not help. It
485 will help if those URLs are encountered many times. */
488 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
489 struct url *start_url_parsed, struct hash_table *blacklist,
492 struct url *u = upos->url;
493 const char *url = u->url;
494 bool u_scheme_like_http;
496 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
498 if (string_set_contains (blacklist, url))
502 char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD);
503 DEBUGP (("download_child_p: parent->url is: %s\n", quote (parent->url)));
504 visited_url (url, referrer);
507 DEBUGP (("Already on the black list.\n"));
511 /* Several things to check for:
512 1. if scheme is not http, and we don't load it
513 2. check for relative links (if relative_only is set)
515 4. check for no-parent
516 5. check for excludes && includes
518 7. check for same host (if spanhost is unset), with possible
519 gethostbyname baggage
520 8. check for robots.txt
522 Addendum: If the URL is FTP, and it is to be loaded, only the
523 domain and suffix settings are "stronger".
525 Note that .html files will get loaded regardless of suffix rules
526 (but that is remedied later with unlink) unless the depth equals
529 More time- and memory- consuming tests should be put later on
532 /* Determine whether URL under consideration has a HTTP-like scheme. */
533 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
535 /* 1. Schemes other than HTTP are normally not recursed into. */
536 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
538 DEBUGP (("Not following non-HTTP schemes.\n"));
542 /* 2. If it is an absolute link and they are not followed, throw it
544 if (u_scheme_like_http)
545 if (opt.relative_only && !upos->link_relative_p)
547 DEBUGP (("It doesn't really look like a relative link.\n"));
551 /* 3. If its domain is not to be accepted/looked-up, chuck it
553 if (!accept_domain (u))
555 DEBUGP (("The domain was not accepted.\n"));
559 /* 4. Check for parent directory.
561 If we descended to a different host or changed the scheme, ignore
562 opt.no_parent. Also ignore it for documents needed to display
563 the parent page when in -p mode. */
565 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
566 && 0 == strcasecmp (u->host, start_url_parsed->host)
567 && u->port == start_url_parsed->port
568 && !(opt.page_requisites && upos->link_inline_p))
570 if (!subdir_p (start_url_parsed->dir, u->dir))
572 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
573 u->dir, start_url_parsed->dir));
578 /* 5. If the file does not match the acceptance list, or is on the
579 rejection list, chuck it out. The same goes for the directory
580 exclusion and inclusion lists. */
581 if (opt.includes || opt.excludes)
583 if (!accdir (u->dir))
585 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
590 /* 6. Check for acceptance/rejection rules. We ignore these rules
591 for directories (no file name to match) and for non-leaf HTMLs,
592 which can lead to other files that do need to be downloaded. (-p
593 automatically implies non-leaf because with -p we can, if
594 necesary, overstep the maximum depth to get the page requisites.) */
595 if (u->file[0] != '\0'
596 && !(has_html_suffix_p (u->file)
597 /* The exception only applies to non-leaf HTMLs (but -p
598 always implies non-leaf because we can overstep the
599 maximum depth to get the requisites): */
601 opt.reclevel == INFINITE_RECURSION
603 || depth < opt.reclevel - 1
604 /* -p, which implies non-leaf (see above) */
605 || opt.page_requisites)))
607 if (!acceptable (u->file))
609 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
616 if (schemes_are_similar_p (u->scheme, parent->scheme))
617 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
619 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
620 u->host, parent->host));
625 if (opt.use_robots && u_scheme_like_http)
627 struct robot_specs *specs = res_get_specs (u->host, u->port);
631 if (res_retrieve_file (url, &rfile, iri))
633 specs = res_parse_from_file (rfile);
635 /* Delete the robots.txt file if we chose to either delete the
636 files after downloading or we're just running a spider. */
637 if (opt.delete_after || opt.spider)
639 logprintf (LOG_VERBOSE, "Removing %s.\n", rfile);
641 logprintf (LOG_NOTQUIET, "unlink: %s\n",
649 /* If we cannot get real specs, at least produce
650 dummy ones so that we can register them and stop
651 trying to retrieve them. */
652 specs = res_parse ("", 0);
654 res_register_specs (u->host, u->port, specs);
657 /* Now that we have (or don't have) robots.txt specs, we can
658 check what they say. */
659 if (!res_match_path (specs, u->path))
661 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
662 string_set_add (blacklist, url);
667 /* The URL has passed all the tests. It can be placed in the
669 DEBUGP (("Decided to load it.\n"));
674 DEBUGP (("Decided NOT to load it.\n"));
679 /* This function determines whether we will consider downloading the
680 children of a URL whose download resulted in a redirection,
681 possibly to another host, etc. It is needed very rarely, and thus
682 it is merely a simple-minded wrapper around download_child_p. */
685 descend_redirect_p (const char *redirected, struct url *orig_parsed, int depth,
686 struct url *start_url_parsed, struct hash_table *blacklist,
689 struct url *new_parsed;
693 assert (orig_parsed != NULL);
695 new_parsed = url_parse (redirected, NULL, NULL, false);
696 assert (new_parsed != NULL);
698 upos = xnew0 (struct urlpos);
699 upos->url = new_parsed;
701 success = download_child_p (upos, orig_parsed, depth,
702 start_url_parsed, blacklist, iri);
704 url_free (new_parsed);
708 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
713 /* vim:set sts=2 sw=2 cino+={s: */