1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of GNU Wget.
7 GNU Wget is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 GNU Wget is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Wget. If not, see <http://www.gnu.org/licenses/>.
20 Additional permission under GNU GPL version 3 section 7
22 If you modify this program, or any covered work, by linking or
23 combining it with the OpenSSL project's OpenSSL library (or a
24 modified version of that library), containing parts covered by the
25 terms of the OpenSSL or SSLeay licenses, the Free Software Foundation
26 grants you additional permission to convey the resulting work.
27 Corresponding Source for a non-source form of such a combination
28 shall include the source code for the parts of OpenSSL used as well
29 as that of the covered work. */
38 #endif /* HAVE_UNISTD_H */
55 /* Functions for maintaining the URL queue. */
57 struct queue_element {
58 const char *referer; /* the referring document */
59 int depth; /* the depth */
60 bool html_allowed; /* whether the document is allowed to
61 be treated as HTML. */
62 bool css_allowed; /* whether the document is allowed to
64 struct queue_element *next; /* next element in queue */
68 struct queue_element *head;
69 struct queue_element *tail;
73 /* Create a URL queue. */
75 static struct url_queue *
78 struct url_queue *queue = xnew0 (struct url_queue);
82 /* Delete a URL queue. */
85 url_queue_delete (struct url_queue *queue)
90 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
91 retrieved ("dequeued") from the queue in the order they were placed
95 url_enqueue (struct url_queue *queue,
96 const char *url, const char *referer, int depth,
97 bool html_allowed, bool css_allowed)
99 struct queue_element *qel = xnew (struct queue_element);
101 qel->referer = referer;
103 qel->html_allowed = html_allowed;
104 qel->css_allowed = css_allowed;
108 if (queue->count > queue->maxcount)
109 queue->maxcount = queue->count;
111 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
112 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
115 queue->tail->next = qel;
119 queue->head = queue->tail;
122 /* Take a URL out of the queue. Return true if this operation
123 succeeded, or false if the queue is empty. */
126 url_dequeue (struct url_queue *queue,
127 const char **url, const char **referer, int *depth,
128 bool *html_allowed, bool *css_allowed)
130 struct queue_element *qel = queue->head;
135 queue->head = queue->head->next;
140 *referer = qel->referer;
142 *html_allowed = qel->html_allowed;
143 *css_allowed = qel->css_allowed;
147 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
148 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
154 static bool download_child_p (const struct urlpos *, struct url *, int,
155 struct url *, struct hash_table *);
156 static bool descend_redirect_p (const char *, const char *, int,
157 struct url *, struct hash_table *);
160 /* Retrieve a part of the web beginning with START_URL. This used to
161 be called "recursive retrieval", because the old function was
162 recursive and implemented depth-first search. retrieve_tree on the
163 other hand implements breadth-search traversal of the tree, which
164 results in much nicer ordering of downloads.
166 The algorithm this function uses is simple:
168 1. put START_URL in the queue.
169 2. while there are URLs in the queue:
171 3. get next URL from the queue.
173 5. if the URL is HTML and its depth does not exceed maximum depth,
174 get the list of URLs embedded therein.
175 6. for each of those URLs do the following:
177 7. if the URL is not one of those downloaded before, and if it
178 satisfies the criteria specified by the various command-line
179 options, add it to the queue. */
182 retrieve_tree (const char *start_url)
184 uerr_t status = RETROK;
186 /* The queue of URLs we need to load. */
187 struct url_queue *queue;
189 /* The URLs we do not wish to enqueue, because they are already in
190 the queue, but haven't been downloaded yet. */
191 struct hash_table *blacklist;
194 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
196 if (!start_url_parsed)
198 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
199 url_error (up_error_code));
203 queue = url_queue_new ();
204 blacklist = make_string_hash_table (0);
206 /* Enqueue the starting URL. Use start_url_parsed->url rather than
207 just URL so we enqueue the canonical form of the URL. */
208 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, true, false);
209 string_set_add (blacklist, start_url_parsed->url);
213 bool descend = false;
214 char *url, *referer, *file = NULL;
216 bool html_allowed, css_allowed;
218 bool dash_p_leaf_HTML = false;
220 if (opt.quota && total_downloaded_bytes > opt.quota)
222 if (status == FWRITEERR)
225 /* Get the next URL from the queue... */
227 if (!url_dequeue (queue,
228 (const char **)&url, (const char **)&referer,
229 &depth, &html_allowed, &css_allowed))
232 /* ...and download it. Note that this download is in most cases
233 unconditional, as download_child_p already makes sure a file
234 doesn't get enqueued twice -- and yet this check is here, and
235 not in download_child_p. This is so that if you run `wget -r
236 URL1 URL2', and a random URL is encountered once under URL1
237 and again under URL2, but at a different (possibly smaller)
238 depth, we want the URL's children to be taken into account
240 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
242 file = xstrdup (hash_table_get (dl_url_file_map, url));
244 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
247 /* this sucks, needs to be combined! */
249 && downloaded_html_set
250 && string_set_contains (downloaded_html_set, file))
256 && downloaded_css_set
257 && string_set_contains (downloaded_css_set, file))
266 char *redirected = NULL;
268 status = retrieve_url (url, &file, &redirected, referer, &dt, false);
270 if (html_allowed && file && status == RETROK
271 && (dt & RETROKF) && (dt & TEXTHTML))
277 /* a little different, css_allowed can override content type
278 lots of web servers serve css with an incorrect content type
280 if (file && status == RETROK
282 ((dt & TEXTCSS) || css_allowed))
290 /* We have been redirected, possibly to another host, or
291 different path, or wherever. Check whether we really
292 want to follow it. */
295 if (!descend_redirect_p (redirected, url, depth,
296 start_url_parsed, blacklist))
299 /* Make sure that the old pre-redirect form gets
301 string_set_add (blacklist, url);
311 visited_url (url, referer);
315 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
317 if (opt.page_requisites
318 && (depth == opt.reclevel || depth == opt.reclevel + 1))
320 /* When -p is specified, we are allowed to exceed the
321 maximum depth, but only for the "inline" links,
322 i.e. those that are needed to display the page.
323 Originally this could exceed the depth at most by
324 one, but we allow one more level so that the leaf
325 pages that contain frames can be loaded
327 dash_p_leaf_HTML = true;
331 /* Either -p wasn't specified or it was and we've
332 already spent the two extra (pseudo-)levels that it
333 affords us, so we need to bail out. */
334 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
335 depth, opt.reclevel));
340 /* If the downloaded document was HTML or CSS, parse it and enqueue the
341 links it contains. */
345 bool meta_disallow_follow = false;
346 struct urlpos *children
347 = is_css ? get_urls_css_file (file, url) :
348 get_urls_html (file, url, &meta_disallow_follow);
350 if (opt.use_robots && meta_disallow_follow)
352 free_urlpos (children);
358 struct urlpos *child = children;
359 struct url *url_parsed = url_parsed = url_parse (url, NULL);
360 char *referer_url = url;
361 bool strip_auth = (url_parsed != NULL
362 && url_parsed->user != NULL);
363 assert (url_parsed != NULL);
365 /* Strip auth info if present */
367 referer_url = url_string (url_parsed, URL_AUTH_HIDE);
369 for (; child; child = child->next)
371 if (child->ignore_when_downloading)
373 if (dash_p_leaf_HTML && !child->link_inline_p)
375 if (download_child_p (child, url_parsed, depth, start_url_parsed,
378 url_enqueue (queue, xstrdup (child->url->url),
379 xstrdup (referer_url), depth + 1,
380 child->link_expect_html,
381 child->link_expect_css);
382 /* We blacklist the URL we have enqueued, because we
383 don't want to enqueue (and hence download) the
385 string_set_add (blacklist, child->url->url);
391 url_free (url_parsed);
392 free_urlpos (children);
398 || opt.spider /* opt.recursive is implicitely true */
399 || !acceptable (file)))
401 /* Either --delete-after was specified, or we loaded this
402 (otherwise unneeded because of --spider or rejected by -R)
403 HTML file just to harvest its hyperlinks -- in either case,
404 delete the local file. */
405 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
406 opt.delete_after ? "--delete-after" :
407 (opt.spider ? "--spider" :
408 "recursive rejection criteria")));
409 logprintf (LOG_VERBOSE,
410 (opt.delete_after || opt.spider
411 ? _("Removing %s.\n")
412 : _("Removing %s since it should be rejected.\n")),
415 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
416 logputs (LOG_VERBOSE, "\n");
417 register_delete_file (file);
421 xfree_null (referer);
425 /* If anything is left of the queue due to a premature exit, free it
431 while (url_dequeue (queue,
432 (const char **)&d1, (const char **)&d2, &d3, &d4, &d5))
438 url_queue_delete (queue);
440 if (start_url_parsed)
441 url_free (start_url_parsed);
442 string_set_free (blacklist);
444 if (opt.quota && total_downloaded_bytes > opt.quota)
446 else if (status == FWRITEERR)
452 /* Based on the context provided by retrieve_tree, decide whether a
453 URL is to be descended to. This is only ever called from
454 retrieve_tree, but is in a separate function for clarity.
456 The most expensive checks (such as those for robots) are memoized
457 by storing these URLs to BLACKLIST. This may or may not help. It
458 will help if those URLs are encountered many times. */
461 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
462 struct url *start_url_parsed, struct hash_table *blacklist)
464 struct url *u = upos->url;
465 const char *url = u->url;
466 bool u_scheme_like_http;
468 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
470 if (string_set_contains (blacklist, url))
474 char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD);
475 DEBUGP (("download_child_p: parent->url is: `%s'\n", parent->url));
476 visited_url (url, referrer);
479 DEBUGP (("Already on the black list.\n"));
483 /* Several things to check for:
484 1. if scheme is not http, and we don't load it
485 2. check for relative links (if relative_only is set)
487 4. check for no-parent
488 5. check for excludes && includes
490 7. check for same host (if spanhost is unset), with possible
491 gethostbyname baggage
492 8. check for robots.txt
494 Addendum: If the URL is FTP, and it is to be loaded, only the
495 domain and suffix settings are "stronger".
497 Note that .html files will get loaded regardless of suffix rules
498 (but that is remedied later with unlink) unless the depth equals
501 More time- and memory- consuming tests should be put later on
504 /* Determine whether URL under consideration has a HTTP-like scheme. */
505 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
507 /* 1. Schemes other than HTTP are normally not recursed into. */
508 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
510 DEBUGP (("Not following non-HTTP schemes.\n"));
514 /* 2. If it is an absolute link and they are not followed, throw it
516 if (u_scheme_like_http)
517 if (opt.relative_only && !upos->link_relative_p)
519 DEBUGP (("It doesn't really look like a relative link.\n"));
523 /* 3. If its domain is not to be accepted/looked-up, chuck it
525 if (!accept_domain (u))
527 DEBUGP (("The domain was not accepted.\n"));
531 /* 4. Check for parent directory.
533 If we descended to a different host or changed the scheme, ignore
534 opt.no_parent. Also ignore it for documents needed to display
535 the parent page when in -p mode. */
537 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
538 && 0 == strcasecmp (u->host, start_url_parsed->host)
539 && u->port == start_url_parsed->port
540 && !(opt.page_requisites && upos->link_inline_p))
542 if (!subdir_p (start_url_parsed->dir, u->dir))
544 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
545 u->dir, start_url_parsed->dir));
550 /* 5. If the file does not match the acceptance list, or is on the
551 rejection list, chuck it out. The same goes for the directory
552 exclusion and inclusion lists. */
553 if (opt.includes || opt.excludes)
555 if (!accdir (u->dir))
557 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
562 /* 6. Check for acceptance/rejection rules. We ignore these rules
563 for directories (no file name to match) and for non-leaf HTMLs,
564 which can lead to other files that do need to be downloaded. (-p
565 automatically implies non-leaf because with -p we can, if
566 necesary, overstep the maximum depth to get the page requisites.) */
567 if (u->file[0] != '\0'
568 && !(has_html_suffix_p (u->file)
569 /* The exception only applies to non-leaf HTMLs (but -p
570 always implies non-leaf because we can overstep the
571 maximum depth to get the requisites): */
573 opt.reclevel == INFINITE_RECURSION
575 || depth < opt.reclevel - 1
576 /* -p, which implies non-leaf (see above) */
577 || opt.page_requisites)))
579 if (!acceptable (u->file))
581 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
588 if (schemes_are_similar_p (u->scheme, parent->scheme))
589 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
591 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
592 u->host, parent->host));
597 if (opt.use_robots && u_scheme_like_http)
599 struct robot_specs *specs = res_get_specs (u->host, u->port);
603 if (res_retrieve_file (url, &rfile))
605 specs = res_parse_from_file (rfile);
607 /* Delete the robots.txt file if we chose to either delete the
608 files after downloading or we're just running a spider. */
609 if (opt.delete_after || opt.spider)
611 logprintf (LOG_VERBOSE, "Removing %s.\n", rfile);
613 logprintf (LOG_NOTQUIET, "unlink: %s\n",
621 /* If we cannot get real specs, at least produce
622 dummy ones so that we can register them and stop
623 trying to retrieve them. */
624 specs = res_parse ("", 0);
626 res_register_specs (u->host, u->port, specs);
629 /* Now that we have (or don't have) robots.txt specs, we can
630 check what they say. */
631 if (!res_match_path (specs, u->path))
633 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
634 string_set_add (blacklist, url);
639 /* The URL has passed all the tests. It can be placed in the
641 DEBUGP (("Decided to load it.\n"));
646 DEBUGP (("Decided NOT to load it.\n"));
651 /* This function determines whether we will consider downloading the
652 children of a URL whose download resulted in a redirection,
653 possibly to another host, etc. It is needed very rarely, and thus
654 it is merely a simple-minded wrapper around download_child_p. */
657 descend_redirect_p (const char *redirected, const char *original, int depth,
658 struct url *start_url_parsed, struct hash_table *blacklist)
660 struct url *orig_parsed, *new_parsed;
664 orig_parsed = url_parse (original, NULL);
665 assert (orig_parsed != NULL);
667 new_parsed = url_parse (redirected, NULL);
668 assert (new_parsed != NULL);
670 upos = xnew0 (struct urlpos);
671 upos->url = new_parsed;
673 success = download_child_p (upos, orig_parsed, depth,
674 start_url_parsed, blacklist);
676 url_free (orig_parsed);
677 url_free (new_parsed);
681 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
686 /* vim:set sts=2 sw=2 cino+={s: */