1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1996-2006 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 In addition, as a special exception, the Free Software Foundation
21 gives permission to link the code of its release of Wget with the
22 OpenSSL project's "OpenSSL" library (or with modified versions of it
23 that use the same license as the "OpenSSL" library), and distribute
24 the linked executables. You must obey the GNU General Public License
25 in all respects for all of the code used other than "OpenSSL". If you
26 modify this file, you may extend this exception to your version of the
27 file, but you are not obligated to do so. If you do not wish to do
28 so, delete this exception statement from your version. */
37 #endif /* HAVE_UNISTD_H */
55 /* Functions for maintaining the URL queue. */
57 struct queue_element {
58 const char *url; /* the URL to download */
59 const char *referer; /* the referring document */
60 int depth; /* the depth */
61 bool html_allowed; /* whether the document is allowed to
62 be treated as HTML. */
63 bool css_allowed; /* whether the document is allowed to
65 struct queue_element *next; /* next element in queue */
69 struct queue_element *head;
70 struct queue_element *tail;
74 /* Create a URL queue. */
76 static struct url_queue *
79 struct url_queue *queue = xnew0 (struct url_queue);
83 /* Delete a URL queue. */
86 url_queue_delete (struct url_queue *queue)
91 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
92 retrieved ("dequeued") from the queue in the order they were placed
96 url_enqueue (struct url_queue *queue,
97 const char *url, const char *referer, int depth,
98 bool html_allowed, bool css_allowed)
100 struct queue_element *qel = xnew (struct queue_element);
102 qel->referer = referer;
104 qel->html_allowed = html_allowed;
105 qel->css_allowed = css_allowed;
109 if (queue->count > queue->maxcount)
110 queue->maxcount = queue->count;
112 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
113 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
116 queue->tail->next = qel;
120 queue->head = queue->tail;
123 /* Take a URL out of the queue. Return true if this operation
124 succeeded, or false if the queue is empty. */
127 url_dequeue (struct url_queue *queue,
128 const char **url, const char **referer, int *depth,
129 bool *html_allowed, bool *css_allowed)
131 struct queue_element *qel = queue->head;
136 queue->head = queue->head->next;
141 *referer = qel->referer;
143 *html_allowed = qel->html_allowed;
144 *css_allowed = qel->css_allowed;
148 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
149 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
155 static bool download_child_p (const struct urlpos *, struct url *, int,
156 struct url *, struct hash_table *);
157 static bool descend_redirect_p (const char *, const char *, int,
158 struct url *, struct hash_table *);
161 /* Retrieve a part of the web beginning with START_URL. This used to
162 be called "recursive retrieval", because the old function was
163 recursive and implemented depth-first search. retrieve_tree on the
164 other hand implements breadth-search traversal of the tree, which
165 results in much nicer ordering of downloads.
167 The algorithm this function uses is simple:
169 1. put START_URL in the queue.
170 2. while there are URLs in the queue:
172 3. get next URL from the queue.
174 5. if the URL is HTML and its depth does not exceed maximum depth,
175 get the list of URLs embedded therein.
176 6. for each of those URLs do the following:
178 7. if the URL is not one of those downloaded before, and if it
179 satisfies the criteria specified by the various command-line
180 options, add it to the queue. */
183 retrieve_tree (const char *start_url)
185 uerr_t status = RETROK;
187 /* The queue of URLs we need to load. */
188 struct url_queue *queue;
190 /* The URLs we do not wish to enqueue, because they are already in
191 the queue, but haven't been downloaded yet. */
192 struct hash_table *blacklist;
195 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
197 if (!start_url_parsed)
199 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
200 url_error (up_error_code));
204 queue = url_queue_new ();
205 blacklist = make_string_hash_table (0);
207 /* Enqueue the starting URL. Use start_url_parsed->url rather than
208 just URL so we enqueue the canonical form of the URL. */
209 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, true, false);
210 string_set_add (blacklist, start_url_parsed->url);
214 bool descend = false;
215 char *url, *referer, *file = NULL;
217 bool html_allowed, css_allowed;
219 bool dash_p_leaf_HTML = false;
221 if (opt.quota && total_downloaded_bytes > opt.quota)
223 if (status == FWRITEERR)
226 /* Get the next URL from the queue... */
228 if (!url_dequeue (queue,
229 (const char **)&url, (const char **)&referer,
230 &depth, &html_allowed, &css_allowed))
233 /* ...and download it. Note that this download is in most cases
234 unconditional, as download_child_p already makes sure a file
235 doesn't get enqueued twice -- and yet this check is here, and
236 not in download_child_p. This is so that if you run `wget -r
237 URL1 URL2', and a random URL is encountered once under URL1
238 and again under URL2, but at a different (possibly smaller)
239 depth, we want the URL's children to be taken into account
241 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
243 file = xstrdup (hash_table_get (dl_url_file_map, url));
245 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
248 /* this sucks, needs to be combined! */
250 && downloaded_html_set
251 && string_set_contains (downloaded_html_set, file))
257 && downloaded_css_set
258 && string_set_contains (downloaded_css_set, file))
267 char *redirected = NULL;
269 status = retrieve_url (url, &file, &redirected, referer, &dt, false);
271 if (html_allowed && file && status == RETROK
272 && (dt & RETROKF) && (dt & TEXTHTML))
278 /* a little different, css_allowed can override content type
279 lots of web servers serve css with an incorrect content type
281 if (file && status == RETROK
283 ((dt & TEXTCSS) || css_allowed))
291 /* We have been redirected, possibly to another host, or
292 different path, or wherever. Check whether we really
293 want to follow it. */
296 if (!descend_redirect_p (redirected, url, depth,
297 start_url_parsed, blacklist))
300 /* Make sure that the old pre-redirect form gets
302 string_set_add (blacklist, url);
312 visited_url (url, referer);
316 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
318 if (opt.page_requisites
319 && (depth == opt.reclevel || depth == opt.reclevel + 1))
321 /* When -p is specified, we are allowed to exceed the
322 maximum depth, but only for the "inline" links,
323 i.e. those that are needed to display the page.
324 Originally this could exceed the depth at most by
325 one, but we allow one more level so that the leaf
326 pages that contain frames can be loaded
328 dash_p_leaf_HTML = true;
332 /* Either -p wasn't specified or it was and we've
333 already spent the two extra (pseudo-)levels that it
334 affords us, so we need to bail out. */
335 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
336 depth, opt.reclevel));
341 /* If the downloaded document was HTML or CSS, parse it and enqueue the
342 links it contains. */
346 bool meta_disallow_follow = false;
347 struct urlpos *children
348 = is_css ? get_urls_css_file (file, url) :
349 get_urls_html (file, url, &meta_disallow_follow);
351 if (opt.use_robots && meta_disallow_follow)
353 free_urlpos (children);
359 struct urlpos *child = children;
360 struct url *url_parsed = url_parsed = url_parse (url, NULL);
361 assert (url_parsed != NULL);
363 for (; child; child = child->next)
365 if (child->ignore_when_downloading)
367 if (dash_p_leaf_HTML && !child->link_inline_p)
369 if (download_child_p (child, url_parsed, depth, start_url_parsed,
372 url_enqueue (queue, xstrdup (child->url->url),
373 xstrdup (url), depth + 1,
374 child->link_expect_html,
375 child->link_expect_css);
376 /* We blacklist the URL we have enqueued, because we
377 don't want to enqueue (and hence download) the
379 string_set_add (blacklist, child->url->url);
383 url_free (url_parsed);
384 free_urlpos (children);
390 || opt.spider /* opt.recursive is implicitely true */
391 || !acceptable (file)))
393 /* Either --delete-after was specified, or we loaded this
394 (otherwise unneeded because of --spider or rejected by -R)
395 HTML file just to harvest its hyperlinks -- in either case,
396 delete the local file. */
397 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
398 opt.delete_after ? "--delete-after" :
399 (opt.spider ? "--spider" :
400 "recursive rejection criteria")));
401 logprintf (LOG_VERBOSE,
402 (opt.delete_after || opt.spider
403 ? _("Removing %s.\n")
404 : _("Removing %s since it should be rejected.\n")),
407 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
408 logputs (LOG_VERBOSE, "\n");
409 register_delete_file (file);
413 xfree_null (referer);
417 /* If anything is left of the queue due to a premature exit, free it
423 while (url_dequeue (queue,
424 (const char **)&d1, (const char **)&d2, &d3, &d4, &d5))
430 url_queue_delete (queue);
432 if (start_url_parsed)
433 url_free (start_url_parsed);
434 string_set_free (blacklist);
436 if (opt.quota && total_downloaded_bytes > opt.quota)
438 else if (status == FWRITEERR)
444 /* Based on the context provided by retrieve_tree, decide whether a
445 URL is to be descended to. This is only ever called from
446 retrieve_tree, but is in a separate function for clarity.
448 The most expensive checks (such as those for robots) are memoized
449 by storing these URLs to BLACKLIST. This may or may not help. It
450 will help if those URLs are encountered many times. */
453 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
454 struct url *start_url_parsed, struct hash_table *blacklist)
456 struct url *u = upos->url;
457 const char *url = u->url;
458 bool u_scheme_like_http;
460 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
462 if (string_set_contains (blacklist, url))
466 char *referrer = url_string (parent, true);
467 DEBUGP (("download_child_p: parent->url is: `%s'\n", parent->url));
468 visited_url (url, referrer);
471 DEBUGP (("Already on the black list.\n"));
475 /* Several things to check for:
476 1. if scheme is not http, and we don't load it
477 2. check for relative links (if relative_only is set)
479 4. check for no-parent
480 5. check for excludes && includes
482 7. check for same host (if spanhost is unset), with possible
483 gethostbyname baggage
484 8. check for robots.txt
486 Addendum: If the URL is FTP, and it is to be loaded, only the
487 domain and suffix settings are "stronger".
489 Note that .html files will get loaded regardless of suffix rules
490 (but that is remedied later with unlink) unless the depth equals
493 More time- and memory- consuming tests should be put later on
496 /* Determine whether URL under consideration has a HTTP-like scheme. */
497 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
499 /* 1. Schemes other than HTTP are normally not recursed into. */
500 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
502 DEBUGP (("Not following non-HTTP schemes.\n"));
506 /* 2. If it is an absolute link and they are not followed, throw it
508 if (u_scheme_like_http)
509 if (opt.relative_only && !upos->link_relative_p)
511 DEBUGP (("It doesn't really look like a relative link.\n"));
515 /* 3. If its domain is not to be accepted/looked-up, chuck it
517 if (!accept_domain (u))
519 DEBUGP (("The domain was not accepted.\n"));
523 /* 4. Check for parent directory.
525 If we descended to a different host or changed the scheme, ignore
526 opt.no_parent. Also ignore it for documents needed to display
527 the parent page when in -p mode. */
529 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
530 && 0 == strcasecmp (u->host, start_url_parsed->host)
531 && u->port == start_url_parsed->port
532 && !(opt.page_requisites && upos->link_inline_p))
534 if (!subdir_p (start_url_parsed->dir, u->dir))
536 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
537 u->dir, start_url_parsed->dir));
542 /* 5. If the file does not match the acceptance list, or is on the
543 rejection list, chuck it out. The same goes for the directory
544 exclusion and inclusion lists. */
545 if (opt.includes || opt.excludes)
547 if (!accdir (u->dir))
549 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
554 /* 6. Check for acceptance/rejection rules. We ignore these rules
555 for directories (no file name to match) and for non-leaf HTMLs,
556 which can lead to other files that do need to be downloaded. (-p
557 automatically implies non-leaf because with -p we can, if
558 necesary, overstep the maximum depth to get the page requisites.) */
559 if (u->file[0] != '\0'
560 && !(has_html_suffix_p (u->file)
561 /* The exception only applies to non-leaf HTMLs (but -p
562 always implies non-leaf because we can overstep the
563 maximum depth to get the requisites): */
565 opt.reclevel == INFINITE_RECURSION
567 || depth < opt.reclevel - 1
568 /* -p, which implies non-leaf (see above) */
569 || opt.page_requisites)))
571 if (!acceptable (u->file))
573 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
580 if (schemes_are_similar_p (u->scheme, parent->scheme))
581 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
583 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
584 u->host, parent->host));
589 if (opt.use_robots && u_scheme_like_http)
591 struct robot_specs *specs = res_get_specs (u->host, u->port);
595 if (res_retrieve_file (url, &rfile))
597 specs = res_parse_from_file (rfile);
602 /* If we cannot get real specs, at least produce
603 dummy ones so that we can register them and stop
604 trying to retrieve them. */
605 specs = res_parse ("", 0);
607 res_register_specs (u->host, u->port, specs);
610 /* Now that we have (or don't have) robots.txt specs, we can
611 check what they say. */
612 if (!res_match_path (specs, u->path))
614 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
615 string_set_add (blacklist, url);
620 /* The URL has passed all the tests. It can be placed in the
622 DEBUGP (("Decided to load it.\n"));
627 DEBUGP (("Decided NOT to load it.\n"));
632 /* This function determines whether we will consider downloading the
633 children of a URL whose download resulted in a redirection,
634 possibly to another host, etc. It is needed very rarely, and thus
635 it is merely a simple-minded wrapper around download_child_p. */
638 descend_redirect_p (const char *redirected, const char *original, int depth,
639 struct url *start_url_parsed, struct hash_table *blacklist)
641 struct url *orig_parsed, *new_parsed;
645 orig_parsed = url_parse (original, NULL);
646 assert (orig_parsed != NULL);
648 new_parsed = url_parse (redirected, NULL);
649 assert (new_parsed != NULL);
651 upos = xnew0 (struct urlpos);
652 upos->url = new_parsed;
654 success = download_child_p (upos, orig_parsed, depth,
655 start_url_parsed, blacklist);
657 url_free (orig_parsed);
658 url_free (new_parsed);
662 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));