1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of GNU Wget.
7 GNU Wget is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 GNU Wget is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Wget. If not, see <http://www.gnu.org/licenses/>.
20 Additional permission under GNU GPL version 3 section 7
22 If you modify this program, or any covered work, by linking or
23 combining it with the OpenSSL project's OpenSSL library (or a
24 modified version of that library), containing parts covered by the
25 terms of the OpenSSL or SSLeay licenses, the Free Software Foundation
26 grants you additional permission to convey the resulting work.
27 Corresponding Source for a non-source form of such a combination
28 shall include the source code for the parts of OpenSSL used as well
29 as that of the covered work. */
38 #endif /* HAVE_UNISTD_H */
56 /* Functions for maintaining the URL queue. */
58 struct queue_element {
59 const char *url; /* the URL to download */
60 const char *referer; /* the referring document */
61 int depth; /* the depth */
62 bool html_allowed; /* whether the document is allowed to
63 be treated as HTML. */
64 struct iri *iri; /* sXXXav */
65 bool css_allowed; /* whether the document is allowed to
67 struct queue_element *next; /* next element in queue */
71 struct queue_element *head;
72 struct queue_element *tail;
76 /* Create a URL queue. */
78 static struct url_queue *
81 struct url_queue *queue = xnew0 (struct url_queue);
85 /* Delete a URL queue. */
88 url_queue_delete (struct url_queue *queue)
93 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
94 retrieved ("dequeued") from the queue in the order they were placed
98 url_enqueue (struct url_queue *queue, struct iri *i,
99 const char *url, const char *referer, int depth,
100 bool html_allowed, bool css_allowed)
102 struct queue_element *qel = xnew (struct queue_element);
105 qel->referer = referer;
107 qel->html_allowed = html_allowed;
108 qel->css_allowed = css_allowed;
112 if (queue->count > queue->maxcount)
113 queue->maxcount = queue->count;
115 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
116 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
119 printf ("[Enqueuing %s with %s\n", url, i->uri_encoding);
122 queue->tail->next = qel;
126 queue->head = queue->tail;
129 /* Take a URL out of the queue. Return true if this operation
130 succeeded, or false if the queue is empty. */
133 url_dequeue (struct url_queue *queue, struct iri **i,
134 const char **url, const char **referer, int *depth,
135 bool *html_allowed, bool *css_allowed)
137 struct queue_element *qel = queue->head;
142 queue->head = queue->head->next;
148 *referer = qel->referer;
150 *html_allowed = qel->html_allowed;
151 *css_allowed = qel->css_allowed;
155 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
156 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
162 static bool download_child_p (const struct urlpos *, struct url *, int,
163 struct url *, struct hash_table *, struct iri *);
164 static bool descend_redirect_p (const char *, const char *, int,
165 struct url *, struct hash_table *, struct iri *);
168 /* Retrieve a part of the web beginning with START_URL. This used to
169 be called "recursive retrieval", because the old function was
170 recursive and implemented depth-first search. retrieve_tree on the
171 other hand implements breadth-search traversal of the tree, which
172 results in much nicer ordering of downloads.
174 The algorithm this function uses is simple:
176 1. put START_URL in the queue.
177 2. while there are URLs in the queue:
179 3. get next URL from the queue.
181 5. if the URL is HTML and its depth does not exceed maximum depth,
182 get the list of URLs embedded therein.
183 6. for each of those URLs do the following:
185 7. if the URL is not one of those downloaded before, and if it
186 satisfies the criteria specified by the various command-line
187 options, add it to the queue. */
190 retrieve_tree (const char *start_url)
192 uerr_t status = RETROK;
194 /* The queue of URLs we need to load. */
195 struct url_queue *queue;
197 /* The URLs we do not wish to enqueue, because they are already in
198 the queue, but haven't been downloaded yet. */
199 struct hash_table *blacklist;
202 struct url *start_url_parsed;
203 struct iri *i = iri_new ();
204 set_uri_encoding (i, opt.locale);
206 start_url_parsed = url_parse (start_url, &up_error_code, i);
207 if (!start_url_parsed)
209 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
210 url_error (up_error_code));
214 queue = url_queue_new ();
215 blacklist = make_string_hash_table (0);
217 /* Enqueue the starting URL. Use start_url_parsed->url rather than
218 just URL so we enqueue the canonical form of the URL. */
219 url_enqueue (queue, i, xstrdup (start_url_parsed->url), NULL, 0, true,
221 string_set_add (blacklist, start_url_parsed->url);
225 bool descend = false;
226 char *url, *referer, *file = NULL;
228 bool html_allowed, css_allowed;
230 bool dash_p_leaf_HTML = false;
232 if (opt.quota && total_downloaded_bytes > opt.quota)
234 if (status == FWRITEERR)
237 /* Get the next URL from the queue... */
239 if (!url_dequeue (queue, (struct iri **) &i,
240 (const char **)&url, (const char **)&referer,
241 &depth, &html_allowed, &css_allowed))
244 /* ...and download it. Note that this download is in most cases
245 unconditional, as download_child_p already makes sure a file
246 doesn't get enqueued twice -- and yet this check is here, and
247 not in download_child_p. This is so that if you run `wget -r
248 URL1 URL2', and a random URL is encountered once under URL1
249 and again under URL2, but at a different (possibly smaller)
250 depth, we want the URL's children to be taken into account
252 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
254 file = xstrdup (hash_table_get (dl_url_file_map, url));
256 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
259 /* this sucks, needs to be combined! */
261 && downloaded_html_set
262 && string_set_contains (downloaded_html_set, file))
268 && downloaded_css_set
269 && string_set_contains (downloaded_css_set, file))
278 char *redirected = NULL;
280 status = retrieve_url (url, &file, &redirected, referer, &dt,
283 if (html_allowed && file && status == RETROK
284 && (dt & RETROKF) && (dt & TEXTHTML))
290 /* a little different, css_allowed can override content type
291 lots of web servers serve css with an incorrect content type
293 if (file && status == RETROK
295 ((dt & TEXTCSS) || css_allowed))
303 /* We have been redirected, possibly to another host, or
304 different path, or wherever. Check whether we really
305 want to follow it. */
308 if (!descend_redirect_p (redirected, url, depth,
309 start_url_parsed, blacklist, i))
312 /* Make sure that the old pre-redirect form gets
314 string_set_add (blacklist, url);
324 visited_url (url, referer);
328 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
330 if (opt.page_requisites
331 && (depth == opt.reclevel || depth == opt.reclevel + 1))
333 /* When -p is specified, we are allowed to exceed the
334 maximum depth, but only for the "inline" links,
335 i.e. those that are needed to display the page.
336 Originally this could exceed the depth at most by
337 one, but we allow one more level so that the leaf
338 pages that contain frames can be loaded
340 dash_p_leaf_HTML = true;
344 /* Either -p wasn't specified or it was and we've
345 already spent the two extra (pseudo-)levels that it
346 affords us, so we need to bail out. */
347 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
348 depth, opt.reclevel));
353 /* If the downloaded document was HTML or CSS, parse it and enqueue the
354 links it contains. */
358 bool meta_disallow_follow = false;
359 struct urlpos *children
360 = is_css ? get_urls_css_file (file, url) :
361 get_urls_html (file, url, &meta_disallow_follow, i);
363 if (opt.use_robots && meta_disallow_follow)
365 free_urlpos (children);
371 struct urlpos *child = children;
372 struct url *url_parsed = url_parse (url, NULL, i);
374 char *referer_url = url;
375 bool strip_auth = (url_parsed != NULL
376 && url_parsed->user != NULL);
377 assert (url_parsed != NULL);
379 /* Strip auth info if present */
381 referer_url = url_string (url_parsed, URL_AUTH_HIDE);
383 for (; child; child = child->next)
385 if (child->ignore_when_downloading)
387 if (dash_p_leaf_HTML && !child->link_inline_p)
389 if (download_child_p (child, url_parsed, depth, start_url_parsed,
393 set_uri_encoding (ci, i->content_encoding);
394 url_enqueue (queue, ci, xstrdup (child->url->url),
395 xstrdup (referer_url), depth + 1,
396 child->link_expect_html,
397 child->link_expect_css);
398 /* We blacklist the URL we have enqueued, because we
399 don't want to enqueue (and hence download) the
401 string_set_add (blacklist, child->url->url);
407 url_free (url_parsed);
408 free_urlpos (children);
414 || opt.spider /* opt.recursive is implicitely true */
415 || !acceptable (file)))
417 /* Either --delete-after was specified, or we loaded this
418 (otherwise unneeded because of --spider or rejected by -R)
419 HTML file just to harvest its hyperlinks -- in either case,
420 delete the local file. */
421 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
422 opt.delete_after ? "--delete-after" :
423 (opt.spider ? "--spider" :
424 "recursive rejection criteria")));
425 logprintf (LOG_VERBOSE,
426 (opt.delete_after || opt.spider
427 ? _("Removing %s.\n")
428 : _("Removing %s since it should be rejected.\n")),
431 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
432 logputs (LOG_VERBOSE, "\n");
433 register_delete_file (file);
437 xfree_null (referer);
442 /* If anything is left of the queue due to a premature exit, free it
449 while (url_dequeue (queue, (struct iri **)&d6,
450 (const char **)&d1, (const char **)&d2, &d3, &d4, &d5))
457 url_queue_delete (queue);
459 if (start_url_parsed)
460 url_free (start_url_parsed);
461 string_set_free (blacklist);
463 if (opt.quota && total_downloaded_bytes > opt.quota)
465 else if (status == FWRITEERR)
471 /* Based on the context provided by retrieve_tree, decide whether a
472 URL is to be descended to. This is only ever called from
473 retrieve_tree, but is in a separate function for clarity.
475 The most expensive checks (such as those for robots) are memoized
476 by storing these URLs to BLACKLIST. This may or may not help. It
477 will help if those URLs are encountered many times. */
480 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
481 struct url *start_url_parsed, struct hash_table *blacklist,
484 struct url *u = upos->url;
485 const char *url = u->url;
486 bool u_scheme_like_http;
488 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
490 if (string_set_contains (blacklist, url))
494 char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD);
495 DEBUGP (("download_child_p: parent->url is: %s\n", quote (parent->url)));
496 visited_url (url, referrer);
499 DEBUGP (("Already on the black list.\n"));
503 /* Several things to check for:
504 1. if scheme is not http, and we don't load it
505 2. check for relative links (if relative_only is set)
507 4. check for no-parent
508 5. check for excludes && includes
510 7. check for same host (if spanhost is unset), with possible
511 gethostbyname baggage
512 8. check for robots.txt
514 Addendum: If the URL is FTP, and it is to be loaded, only the
515 domain and suffix settings are "stronger".
517 Note that .html files will get loaded regardless of suffix rules
518 (but that is remedied later with unlink) unless the depth equals
521 More time- and memory- consuming tests should be put later on
524 /* Determine whether URL under consideration has a HTTP-like scheme. */
525 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
527 /* 1. Schemes other than HTTP are normally not recursed into. */
528 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
530 DEBUGP (("Not following non-HTTP schemes.\n"));
534 /* 2. If it is an absolute link and they are not followed, throw it
536 if (u_scheme_like_http)
537 if (opt.relative_only && !upos->link_relative_p)
539 DEBUGP (("It doesn't really look like a relative link.\n"));
543 /* 3. If its domain is not to be accepted/looked-up, chuck it
545 if (!accept_domain (u))
547 DEBUGP (("The domain was not accepted.\n"));
551 /* 4. Check for parent directory.
553 If we descended to a different host or changed the scheme, ignore
554 opt.no_parent. Also ignore it for documents needed to display
555 the parent page when in -p mode. */
557 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
558 && 0 == strcasecmp (u->host, start_url_parsed->host)
559 && u->port == start_url_parsed->port
560 && !(opt.page_requisites && upos->link_inline_p))
562 if (!subdir_p (start_url_parsed->dir, u->dir))
564 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
565 u->dir, start_url_parsed->dir));
570 /* 5. If the file does not match the acceptance list, or is on the
571 rejection list, chuck it out. The same goes for the directory
572 exclusion and inclusion lists. */
573 if (opt.includes || opt.excludes)
575 if (!accdir (u->dir))
577 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
582 /* 6. Check for acceptance/rejection rules. We ignore these rules
583 for directories (no file name to match) and for non-leaf HTMLs,
584 which can lead to other files that do need to be downloaded. (-p
585 automatically implies non-leaf because with -p we can, if
586 necesary, overstep the maximum depth to get the page requisites.) */
587 if (u->file[0] != '\0'
588 && !(has_html_suffix_p (u->file)
589 /* The exception only applies to non-leaf HTMLs (but -p
590 always implies non-leaf because we can overstep the
591 maximum depth to get the requisites): */
593 opt.reclevel == INFINITE_RECURSION
595 || depth < opt.reclevel - 1
596 /* -p, which implies non-leaf (see above) */
597 || opt.page_requisites)))
599 if (!acceptable (u->file))
601 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
608 if (schemes_are_similar_p (u->scheme, parent->scheme))
609 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
611 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
612 u->host, parent->host));
617 if (opt.use_robots && u_scheme_like_http)
619 struct robot_specs *specs = res_get_specs (u->host, u->port);
623 if (res_retrieve_file (url, &rfile, iri))
625 specs = res_parse_from_file (rfile);
627 /* Delete the robots.txt file if we chose to either delete the
628 files after downloading or we're just running a spider. */
629 if (opt.delete_after || opt.spider)
631 logprintf (LOG_VERBOSE, "Removing %s.\n", rfile);
633 logprintf (LOG_NOTQUIET, "unlink: %s\n",
641 /* If we cannot get real specs, at least produce
642 dummy ones so that we can register them and stop
643 trying to retrieve them. */
644 specs = res_parse ("", 0);
646 res_register_specs (u->host, u->port, specs);
649 /* Now that we have (or don't have) robots.txt specs, we can
650 check what they say. */
651 if (!res_match_path (specs, u->path))
653 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
654 string_set_add (blacklist, url);
659 /* The URL has passed all the tests. It can be placed in the
661 DEBUGP (("Decided to load it.\n"));
666 DEBUGP (("Decided NOT to load it.\n"));
671 /* This function determines whether we will consider downloading the
672 children of a URL whose download resulted in a redirection,
673 possibly to another host, etc. It is needed very rarely, and thus
674 it is merely a simple-minded wrapper around download_child_p. */
677 descend_redirect_p (const char *redirected, const char *original, int depth,
678 struct url *start_url_parsed, struct hash_table *blacklist,
681 struct url *orig_parsed, *new_parsed;
685 orig_parsed = url_parse (original, NULL, NULL);
686 assert (orig_parsed != NULL);
688 new_parsed = url_parse (redirected, NULL, NULL);
689 assert (new_parsed != NULL);
691 upos = xnew0 (struct urlpos);
692 upos->url = new_parsed;
694 success = download_child_p (upos, orig_parsed, depth,
695 start_url_parsed, blacklist, iri);
697 url_free (orig_parsed);
698 url_free (new_parsed);
702 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
707 /* vim:set sts=2 sw=2 cino+={s: */