1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of GNU Wget.
7 GNU Wget is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 GNU Wget is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Wget. If not, see <http://www.gnu.org/licenses/>.
20 Additional permission under GNU GPL version 3 section 7
22 If you modify this program, or any covered work, by linking or
23 combining it with the OpenSSL project's OpenSSL library (or a
24 modified version of that library), containing parts covered by the
25 terms of the OpenSSL or SSLeay licenses, the Free Software Foundation
26 grants you additional permission to convey the resulting work.
27 Corresponding Source for a non-source form of such a combination
28 shall include the source code for the parts of OpenSSL used as well
29 as that of the covered work. */
38 #endif /* HAVE_UNISTD_H */
55 /* Functions for maintaining the URL queue. */
57 struct queue_element {
58 const char *url; /* the URL to download */
59 const char *referer; /* the referring document */
60 int depth; /* the depth */
61 bool html_allowed; /* whether the document is allowed to
62 be treated as HTML. */
63 bool css_allowed; /* whether the document is allowed to
65 struct queue_element *next; /* next element in queue */
69 struct queue_element *head;
70 struct queue_element *tail;
74 /* Create a URL queue. */
76 static struct url_queue *
79 struct url_queue *queue = xnew0 (struct url_queue);
83 /* Delete a URL queue. */
86 url_queue_delete (struct url_queue *queue)
91 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
92 retrieved ("dequeued") from the queue in the order they were placed
96 url_enqueue (struct url_queue *queue,
97 const char *url, const char *referer, int depth,
98 bool html_allowed, bool css_allowed)
100 struct queue_element *qel = xnew (struct queue_element);
102 qel->referer = referer;
104 qel->html_allowed = html_allowed;
105 qel->css_allowed = css_allowed;
109 if (queue->count > queue->maxcount)
110 queue->maxcount = queue->count;
112 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
113 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
116 queue->tail->next = qel;
120 queue->head = queue->tail;
123 /* Take a URL out of the queue. Return true if this operation
124 succeeded, or false if the queue is empty. */
127 url_dequeue (struct url_queue *queue,
128 const char **url, const char **referer, int *depth,
129 bool *html_allowed, bool *css_allowed)
131 struct queue_element *qel = queue->head;
136 queue->head = queue->head->next;
141 *referer = qel->referer;
143 *html_allowed = qel->html_allowed;
144 *css_allowed = qel->css_allowed;
148 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
149 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
155 static bool download_child_p (const struct urlpos *, struct url *, int,
156 struct url *, struct hash_table *);
157 static bool descend_redirect_p (const char *, struct url *, int,
158 struct url *, struct hash_table *);
161 /* Retrieve a part of the web beginning with START_URL. This used to
162 be called "recursive retrieval", because the old function was
163 recursive and implemented depth-first search. retrieve_tree on the
164 other hand implements breadth-search traversal of the tree, which
165 results in much nicer ordering of downloads.
167 The algorithm this function uses is simple:
169 1. put START_URL in the queue.
170 2. while there are URLs in the queue:
172 3. get next URL from the queue.
174 5. if the URL is HTML and its depth does not exceed maximum depth,
175 get the list of URLs embedded therein.
176 6. for each of those URLs do the following:
178 7. if the URL is not one of those downloaded before, and if it
179 satisfies the criteria specified by the various command-line
180 options, add it to the queue. */
183 retrieve_tree (const char *start_url)
185 uerr_t status = RETROK;
187 /* The queue of URLs we need to load. */
188 struct url_queue *queue;
190 /* The URLs we do not wish to enqueue, because they are already in
191 the queue, but haven't been downloaded yet. */
192 struct hash_table *blacklist;
195 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
197 if (!start_url_parsed)
199 char *error = url_error (start_url, up_error_code);
200 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url, error);
205 queue = url_queue_new ();
206 blacklist = make_string_hash_table (0);
208 /* Enqueue the starting URL. Use start_url_parsed->url rather than
209 just URL so we enqueue the canonical form of the URL. */
210 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, true, false);
211 string_set_add (blacklist, start_url_parsed->url);
215 bool descend = false;
216 char *url, *referer, *file = NULL;
218 bool html_allowed, css_allowed;
220 bool dash_p_leaf_HTML = false;
222 if (opt.quota && total_downloaded_bytes > opt.quota)
224 if (status == FWRITEERR)
227 /* Get the next URL from the queue... */
229 if (!url_dequeue (queue,
230 (const char **)&url, (const char **)&referer,
231 &depth, &html_allowed, &css_allowed))
234 /* ...and download it. Note that this download is in most cases
235 unconditional, as download_child_p already makes sure a file
236 doesn't get enqueued twice -- and yet this check is here, and
237 not in download_child_p. This is so that if you run `wget -r
238 URL1 URL2', and a random URL is encountered once under URL1
239 and again under URL2, but at a different (possibly smaller)
240 depth, we want the URL's children to be taken into account
242 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
244 file = xstrdup (hash_table_get (dl_url_file_map, url));
246 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
249 /* this sucks, needs to be combined! */
251 && downloaded_html_set
252 && string_set_contains (downloaded_html_set, file))
258 && downloaded_css_set
259 && string_set_contains (downloaded_css_set, file))
268 char *redirected = NULL;
269 struct url *url_parsed = url_parse (url, &url_err);
273 char *error = url_error (url, url_err);
274 logprintf (LOG_NOTQUIET, "%s: %s.\n", url, error);
280 status = retrieve_url (url, &file, &redirected, referer, &dt, false);
283 if (html_allowed && file && status == RETROK
284 && (dt & RETROKF) && (dt & TEXTHTML))
290 /* a little different, css_allowed can override content type
291 lots of web servers serve css with an incorrect content type
293 if (file && status == RETROK
295 ((dt & TEXTCSS) || css_allowed))
303 /* We have been redirected, possibly to another host, or
304 different path, or wherever. Check whether we really
305 want to follow it. */
308 if (!descend_redirect_p (redirected, url_parsed, depth,
309 start_url_parsed, blacklist))
312 /* Make sure that the old pre-redirect form gets
314 string_set_add (blacklist, url);
320 url_free(url_parsed);
325 visited_url (url, referer);
329 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
331 if (opt.page_requisites
332 && (depth == opt.reclevel || depth == opt.reclevel + 1))
334 /* When -p is specified, we are allowed to exceed the
335 maximum depth, but only for the "inline" links,
336 i.e. those that are needed to display the page.
337 Originally this could exceed the depth at most by
338 one, but we allow one more level so that the leaf
339 pages that contain frames can be loaded
341 dash_p_leaf_HTML = true;
345 /* Either -p wasn't specified or it was and we've
346 already spent the two extra (pseudo-)levels that it
347 affords us, so we need to bail out. */
348 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
349 depth, opt.reclevel));
354 /* If the downloaded document was HTML or CSS, parse it and enqueue the
355 links it contains. */
359 bool meta_disallow_follow = false;
360 struct urlpos *children
361 = is_css ? get_urls_css_file (file, url) :
362 get_urls_html (file, url, &meta_disallow_follow);
364 if (opt.use_robots && meta_disallow_follow)
366 free_urlpos (children);
372 struct urlpos *child = children;
373 struct url *url_parsed = url_parsed = url_parse (url, NULL);
374 char *referer_url = url;
375 bool strip_auth = (url_parsed != NULL
376 && url_parsed->user != NULL);
377 assert (url_parsed != NULL);
379 /* Strip auth info if present */
381 referer_url = url_string (url_parsed, URL_AUTH_HIDE);
383 for (; child; child = child->next)
385 if (child->ignore_when_downloading)
387 if (dash_p_leaf_HTML && !child->link_inline_p)
389 if (download_child_p (child, url_parsed, depth, start_url_parsed,
392 url_enqueue (queue, xstrdup (child->url->url),
393 xstrdup (referer_url), depth + 1,
394 child->link_expect_html,
395 child->link_expect_css);
396 /* We blacklist the URL we have enqueued, because we
397 don't want to enqueue (and hence download) the
399 string_set_add (blacklist, child->url->url);
405 url_free (url_parsed);
406 free_urlpos (children);
412 || opt.spider /* opt.recursive is implicitely true */
413 || !acceptable (file)))
415 /* Either --delete-after was specified, or we loaded this
416 (otherwise unneeded because of --spider or rejected by -R)
417 HTML file just to harvest its hyperlinks -- in either case,
418 delete the local file. */
419 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
420 opt.delete_after ? "--delete-after" :
421 (opt.spider ? "--spider" :
422 "recursive rejection criteria")));
423 logprintf (LOG_VERBOSE,
424 (opt.delete_after || opt.spider
425 ? _("Removing %s.\n")
426 : _("Removing %s since it should be rejected.\n")),
429 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
430 logputs (LOG_VERBOSE, "\n");
431 register_delete_file (file);
435 xfree_null (referer);
439 /* If anything is left of the queue due to a premature exit, free it
445 while (url_dequeue (queue,
446 (const char **)&d1, (const char **)&d2, &d3, &d4, &d5))
452 url_queue_delete (queue);
454 if (start_url_parsed)
455 url_free (start_url_parsed);
456 string_set_free (blacklist);
458 if (opt.quota && total_downloaded_bytes > opt.quota)
460 else if (status == FWRITEERR)
466 /* Based on the context provided by retrieve_tree, decide whether a
467 URL is to be descended to. This is only ever called from
468 retrieve_tree, but is in a separate function for clarity.
470 The most expensive checks (such as those for robots) are memoized
471 by storing these URLs to BLACKLIST. This may or may not help. It
472 will help if those URLs are encountered many times. */
475 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
476 struct url *start_url_parsed, struct hash_table *blacklist)
478 struct url *u = upos->url;
479 const char *url = u->url;
480 bool u_scheme_like_http;
482 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
484 if (string_set_contains (blacklist, url))
488 char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD);
489 DEBUGP (("download_child_p: parent->url is: %s\n", quote (parent->url)));
490 visited_url (url, referrer);
493 DEBUGP (("Already on the black list.\n"));
497 /* Several things to check for:
498 1. if scheme is not http, and we don't load it
499 2. check for relative links (if relative_only is set)
501 4. check for no-parent
502 5. check for excludes && includes
504 7. check for same host (if spanhost is unset), with possible
505 gethostbyname baggage
506 8. check for robots.txt
508 Addendum: If the URL is FTP, and it is to be loaded, only the
509 domain and suffix settings are "stronger".
511 Note that .html files will get loaded regardless of suffix rules
512 (but that is remedied later with unlink) unless the depth equals
515 More time- and memory- consuming tests should be put later on
518 /* Determine whether URL under consideration has a HTTP-like scheme. */
519 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
521 /* 1. Schemes other than HTTP are normally not recursed into. */
522 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
524 DEBUGP (("Not following non-HTTP schemes.\n"));
528 /* 2. If it is an absolute link and they are not followed, throw it
530 if (u_scheme_like_http)
531 if (opt.relative_only && !upos->link_relative_p)
533 DEBUGP (("It doesn't really look like a relative link.\n"));
537 /* 3. If its domain is not to be accepted/looked-up, chuck it
539 if (!accept_domain (u))
541 DEBUGP (("The domain was not accepted.\n"));
545 /* 4. Check for parent directory.
547 If we descended to a different host or changed the scheme, ignore
548 opt.no_parent. Also ignore it for documents needed to display
549 the parent page when in -p mode. */
551 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
552 && 0 == strcasecmp (u->host, start_url_parsed->host)
553 && u->port == start_url_parsed->port
554 && !(opt.page_requisites && upos->link_inline_p))
556 if (!subdir_p (start_url_parsed->dir, u->dir))
558 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
559 u->dir, start_url_parsed->dir));
564 /* 5. If the file does not match the acceptance list, or is on the
565 rejection list, chuck it out. The same goes for the directory
566 exclusion and inclusion lists. */
567 if (opt.includes || opt.excludes)
569 if (!accdir (u->dir))
571 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
576 /* 6. Check for acceptance/rejection rules. We ignore these rules
577 for directories (no file name to match) and for non-leaf HTMLs,
578 which can lead to other files that do need to be downloaded. (-p
579 automatically implies non-leaf because with -p we can, if
580 necesary, overstep the maximum depth to get the page requisites.) */
581 if (u->file[0] != '\0'
582 && !(has_html_suffix_p (u->file)
583 /* The exception only applies to non-leaf HTMLs (but -p
584 always implies non-leaf because we can overstep the
585 maximum depth to get the requisites): */
587 opt.reclevel == INFINITE_RECURSION
589 || depth < opt.reclevel - 1
590 /* -p, which implies non-leaf (see above) */
591 || opt.page_requisites)))
593 if (!acceptable (u->file))
595 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
602 if (schemes_are_similar_p (u->scheme, parent->scheme))
603 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
605 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
606 u->host, parent->host));
611 if (opt.use_robots && u_scheme_like_http)
613 struct robot_specs *specs = res_get_specs (u->host, u->port);
617 if (res_retrieve_file (url, &rfile))
619 specs = res_parse_from_file (rfile);
621 /* Delete the robots.txt file if we chose to either delete the
622 files after downloading or we're just running a spider. */
623 if (opt.delete_after || opt.spider)
625 logprintf (LOG_VERBOSE, "Removing %s.\n", rfile);
627 logprintf (LOG_NOTQUIET, "unlink: %s\n",
635 /* If we cannot get real specs, at least produce
636 dummy ones so that we can register them and stop
637 trying to retrieve them. */
638 specs = res_parse ("", 0);
640 res_register_specs (u->host, u->port, specs);
643 /* Now that we have (or don't have) robots.txt specs, we can
644 check what they say. */
645 if (!res_match_path (specs, u->path))
647 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
648 string_set_add (blacklist, url);
653 /* The URL has passed all the tests. It can be placed in the
655 DEBUGP (("Decided to load it.\n"));
660 DEBUGP (("Decided NOT to load it.\n"));
665 /* This function determines whether we will consider downloading the
666 children of a URL whose download resulted in a redirection,
667 possibly to another host, etc. It is needed very rarely, and thus
668 it is merely a simple-minded wrapper around download_child_p. */
671 descend_redirect_p (const char *redirected, struct url *orig_parsed, int depth,
672 struct url *start_url_parsed, struct hash_table *blacklist)
674 struct url *new_parsed;
678 assert (orig_parsed != NULL);
680 new_parsed = url_parse (redirected, NULL);
681 assert (new_parsed != NULL);
683 upos = xnew0 (struct urlpos);
684 upos->url = new_parsed;
686 success = download_child_p (upos, orig_parsed, depth,
687 start_url_parsed, blacklist);
689 url_free (new_parsed);
693 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
698 /* vim:set sts=2 sw=2 cino+={s: */