1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of GNU Wget.
7 GNU Wget is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 GNU Wget is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Wget. If not, see <http://www.gnu.org/licenses/>.
20 Additional permission under GNU GPL version 3 section 7
22 If you modify this program, or any covered work, by linking or
23 combining it with the OpenSSL project's OpenSSL library (or a
24 modified version of that library), containing parts covered by the
25 terms of the OpenSSL or SSLeay licenses, the Free Software Foundation
26 grants you additional permission to convey the resulting work.
27 Corresponding Source for a non-source form of such a combination
28 shall include the source code for the parts of OpenSSL used as well
29 as that of the covered work. */
31 #define USE_GNULIB_ALLOC
40 #endif /* HAVE_UNISTD_H */
55 /* Functions for maintaining the URL queue. */
57 struct queue_element {
58 const char *url; /* the URL to download */
59 const char *referer; /* the referring document */
60 int depth; /* the depth */
61 bool html_allowed; /* whether the document is allowed to
62 be treated as HTML. */
64 struct queue_element *next; /* next element in queue */
68 struct queue_element *head;
69 struct queue_element *tail;
73 /* Create a URL queue. */
75 static struct url_queue *
78 struct url_queue *queue = xnew0 (struct url_queue);
82 /* Delete a URL queue. */
85 url_queue_delete (struct url_queue *queue)
90 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
91 retrieved ("dequeued") from the queue in the order they were placed
95 url_enqueue (struct url_queue *queue,
96 const char *url, const char *referer, int depth, bool html_allowed)
98 struct queue_element *qel = xnew (struct queue_element);
100 qel->referer = referer;
102 qel->html_allowed = html_allowed;
106 if (queue->count > queue->maxcount)
107 queue->maxcount = queue->count;
109 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
110 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
113 queue->tail->next = qel;
117 queue->head = queue->tail;
120 /* Take a URL out of the queue. Return true if this operation
121 succeeded, or false if the queue is empty. */
124 url_dequeue (struct url_queue *queue,
125 const char **url, const char **referer, int *depth,
128 struct queue_element *qel = queue->head;
133 queue->head = queue->head->next;
138 *referer = qel->referer;
140 *html_allowed = qel->html_allowed;
144 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
145 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
151 static bool download_child_p (const struct urlpos *, struct url *, int,
152 struct url *, struct hash_table *);
153 static bool descend_redirect_p (const char *, const char *, int,
154 struct url *, struct hash_table *);
157 /* Retrieve a part of the web beginning with START_URL. This used to
158 be called "recursive retrieval", because the old function was
159 recursive and implemented depth-first search. retrieve_tree on the
160 other hand implements breadth-search traversal of the tree, which
161 results in much nicer ordering of downloads.
163 The algorithm this function uses is simple:
165 1. put START_URL in the queue.
166 2. while there are URLs in the queue:
168 3. get next URL from the queue.
170 5. if the URL is HTML and its depth does not exceed maximum depth,
171 get the list of URLs embedded therein.
172 6. for each of those URLs do the following:
174 7. if the URL is not one of those downloaded before, and if it
175 satisfies the criteria specified by the various command-line
176 options, add it to the queue. */
179 retrieve_tree (const char *start_url)
181 uerr_t status = RETROK;
183 /* The queue of URLs we need to load. */
184 struct url_queue *queue;
186 /* The URLs we do not wish to enqueue, because they are already in
187 the queue, but haven't been downloaded yet. */
188 struct hash_table *blacklist;
191 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
193 if (!start_url_parsed)
195 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
196 url_error (up_error_code));
200 queue = url_queue_new ();
201 blacklist = make_string_hash_table (0);
203 /* Enqueue the starting URL. Use start_url_parsed->url rather than
204 just URL so we enqueue the canonical form of the URL. */
205 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, true);
206 string_set_add (blacklist, start_url_parsed->url);
210 bool descend = false;
211 char *url, *referer, *file = NULL;
214 bool dash_p_leaf_HTML = false;
216 if (opt.quota && total_downloaded_bytes > opt.quota)
218 if (status == FWRITEERR)
221 /* Get the next URL from the queue... */
223 if (!url_dequeue (queue,
224 (const char **)&url, (const char **)&referer,
225 &depth, &html_allowed))
228 /* ...and download it. Note that this download is in most cases
229 unconditional, as download_child_p already makes sure a file
230 doesn't get enqueued twice -- and yet this check is here, and
231 not in download_child_p. This is so that if you run `wget -r
232 URL1 URL2', and a random URL is encountered once under URL1
233 and again under URL2, but at a different (possibly smaller)
234 depth, we want the URL's children to be taken into account
236 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
238 file = xstrdup (hash_table_get (dl_url_file_map, url));
240 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
244 && downloaded_html_set
245 && string_set_contains (downloaded_html_set, file))
251 char *redirected = NULL;
253 status = retrieve_url (url, &file, &redirected, referer, &dt, false);
255 if (html_allowed && file && status == RETROK
256 && (dt & RETROKF) && (dt & TEXTHTML))
261 /* We have been redirected, possibly to another host, or
262 different path, or wherever. Check whether we really
263 want to follow it. */
266 if (!descend_redirect_p (redirected, url, depth,
267 start_url_parsed, blacklist))
270 /* Make sure that the old pre-redirect form gets
272 string_set_add (blacklist, url);
282 visited_url (url, referer);
286 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
288 if (opt.page_requisites
289 && (depth == opt.reclevel || depth == opt.reclevel + 1))
291 /* When -p is specified, we are allowed to exceed the
292 maximum depth, but only for the "inline" links,
293 i.e. those that are needed to display the page.
294 Originally this could exceed the depth at most by
295 one, but we allow one more level so that the leaf
296 pages that contain frames can be loaded
298 dash_p_leaf_HTML = true;
302 /* Either -p wasn't specified or it was and we've
303 already spent the two extra (pseudo-)levels that it
304 affords us, so we need to bail out. */
305 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
306 depth, opt.reclevel));
311 /* If the downloaded document was HTML, parse it and enqueue the
312 links it contains. */
316 bool meta_disallow_follow = false;
317 struct urlpos *children
318 = get_urls_html (file, url, &meta_disallow_follow);
320 if (opt.use_robots && meta_disallow_follow)
322 free_urlpos (children);
328 struct urlpos *child = children;
329 struct url *url_parsed = url_parsed = url_parse (url, NULL);
330 char *referer_url = url;
331 bool strip_auth = (url_parsed != NULL
332 && url_parsed->user != NULL);
333 assert (url_parsed != NULL);
335 /* Strip auth info if present */
337 referer_url = url_string (url_parsed, URL_AUTH_HIDE);
339 for (; child; child = child->next)
341 if (child->ignore_when_downloading)
343 if (dash_p_leaf_HTML && !child->link_inline_p)
345 if (download_child_p (child, url_parsed, depth, start_url_parsed,
348 url_enqueue (queue, xstrdup (child->url->url),
349 xstrdup (referer_url), depth + 1,
350 child->link_expect_html);
351 /* We blacklist the URL we have enqueued, because we
352 don't want to enqueue (and hence download) the
354 string_set_add (blacklist, child->url->url);
360 url_free (url_parsed);
361 free_urlpos (children);
367 || opt.spider /* opt.recursive is implicitely true */
368 || !acceptable (file)))
370 /* Either --delete-after was specified, or we loaded this
371 (otherwise unneeded because of --spider or rejected by -R)
372 HTML file just to harvest its hyperlinks -- in either case,
373 delete the local file. */
374 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
375 opt.delete_after ? "--delete-after" :
376 (opt.spider ? "--spider" :
377 "recursive rejection criteria")));
378 logprintf (LOG_VERBOSE,
379 (opt.delete_after || opt.spider
380 ? _("Removing %s.\n")
381 : _("Removing %s since it should be rejected.\n")),
384 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
385 logputs (LOG_VERBOSE, "\n");
386 register_delete_file (file);
390 xfree_null (referer);
394 /* If anything is left of the queue due to a premature exit, free it
400 while (url_dequeue (queue,
401 (const char **)&d1, (const char **)&d2, &d3, &d4))
407 url_queue_delete (queue);
409 if (start_url_parsed)
410 url_free (start_url_parsed);
411 string_set_free (blacklist);
413 if (opt.quota && total_downloaded_bytes > opt.quota)
415 else if (status == FWRITEERR)
421 /* Based on the context provided by retrieve_tree, decide whether a
422 URL is to be descended to. This is only ever called from
423 retrieve_tree, but is in a separate function for clarity.
425 The most expensive checks (such as those for robots) are memoized
426 by storing these URLs to BLACKLIST. This may or may not help. It
427 will help if those URLs are encountered many times. */
430 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
431 struct url *start_url_parsed, struct hash_table *blacklist)
433 struct url *u = upos->url;
434 const char *url = u->url;
435 bool u_scheme_like_http;
437 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
439 if (string_set_contains (blacklist, url))
443 char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD);
444 DEBUGP (("download_child_p: parent->url is: %s\n", quote (parent->url)));
445 visited_url (url, referrer);
448 DEBUGP (("Already on the black list.\n"));
452 /* Several things to check for:
453 1. if scheme is not http, and we don't load it
454 2. check for relative links (if relative_only is set)
456 4. check for no-parent
457 5. check for excludes && includes
459 7. check for same host (if spanhost is unset), with possible
460 gethostbyname baggage
461 8. check for robots.txt
463 Addendum: If the URL is FTP, and it is to be loaded, only the
464 domain and suffix settings are "stronger".
466 Note that .html files will get loaded regardless of suffix rules
467 (but that is remedied later with unlink) unless the depth equals
470 More time- and memory- consuming tests should be put later on
473 /* Determine whether URL under consideration has a HTTP-like scheme. */
474 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
476 /* 1. Schemes other than HTTP are normally not recursed into. */
477 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
479 DEBUGP (("Not following non-HTTP schemes.\n"));
483 /* 2. If it is an absolute link and they are not followed, throw it
485 if (u_scheme_like_http)
486 if (opt.relative_only && !upos->link_relative_p)
488 DEBUGP (("It doesn't really look like a relative link.\n"));
492 /* 3. If its domain is not to be accepted/looked-up, chuck it
494 if (!accept_domain (u))
496 DEBUGP (("The domain was not accepted.\n"));
500 /* 4. Check for parent directory.
502 If we descended to a different host or changed the scheme, ignore
503 opt.no_parent. Also ignore it for documents needed to display
504 the parent page when in -p mode. */
506 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
507 && 0 == strcasecmp (u->host, start_url_parsed->host)
508 && u->port == start_url_parsed->port
509 && !(opt.page_requisites && upos->link_inline_p))
511 if (!subdir_p (start_url_parsed->dir, u->dir))
513 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
514 u->dir, start_url_parsed->dir));
519 /* 5. If the file does not match the acceptance list, or is on the
520 rejection list, chuck it out. The same goes for the directory
521 exclusion and inclusion lists. */
522 if (opt.includes || opt.excludes)
524 if (!accdir (u->dir))
526 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
531 /* 6. Check for acceptance/rejection rules. We ignore these rules
532 for directories (no file name to match) and for non-leaf HTMLs,
533 which can lead to other files that do need to be downloaded. (-p
534 automatically implies non-leaf because with -p we can, if
535 necesary, overstep the maximum depth to get the page requisites.) */
536 if (u->file[0] != '\0'
537 && !(has_html_suffix_p (u->file)
538 /* The exception only applies to non-leaf HTMLs (but -p
539 always implies non-leaf because we can overstep the
540 maximum depth to get the requisites): */
542 opt.reclevel == INFINITE_RECURSION
544 || depth < opt.reclevel - 1
545 /* -p, which implies non-leaf (see above) */
546 || opt.page_requisites)))
548 if (!acceptable (u->file))
550 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
557 if (schemes_are_similar_p (u->scheme, parent->scheme))
558 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
560 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
561 u->host, parent->host));
566 if (opt.use_robots && u_scheme_like_http)
568 struct robot_specs *specs = res_get_specs (u->host, u->port);
572 if (res_retrieve_file (url, &rfile))
574 specs = res_parse_from_file (rfile);
576 /* Delete the robots.txt file if we chose to either delete the
577 files after downloading or we're just running a spider. */
578 if (opt.delete_after || opt.spider)
580 logprintf (LOG_VERBOSE, "Removing %s.\n", rfile);
582 logprintf (LOG_NOTQUIET, "unlink: %s\n",
590 /* If we cannot get real specs, at least produce
591 dummy ones so that we can register them and stop
592 trying to retrieve them. */
593 specs = res_parse ("", 0);
595 res_register_specs (u->host, u->port, specs);
598 /* Now that we have (or don't have) robots.txt specs, we can
599 check what they say. */
600 if (!res_match_path (specs, u->path))
602 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
603 string_set_add (blacklist, url);
608 /* The URL has passed all the tests. It can be placed in the
610 DEBUGP (("Decided to load it.\n"));
615 DEBUGP (("Decided NOT to load it.\n"));
620 /* This function determines whether we will consider downloading the
621 children of a URL whose download resulted in a redirection,
622 possibly to another host, etc. It is needed very rarely, and thus
623 it is merely a simple-minded wrapper around download_child_p. */
626 descend_redirect_p (const char *redirected, const char *original, int depth,
627 struct url *start_url_parsed, struct hash_table *blacklist)
629 struct url *orig_parsed, *new_parsed;
633 orig_parsed = url_parse (original, NULL);
634 assert (orig_parsed != NULL);
636 new_parsed = url_parse (redirected, NULL);
637 assert (new_parsed != NULL);
639 upos = xnew0 (struct urlpos);
640 upos->url = new_parsed;
642 success = download_child_p (upos, orig_parsed, depth,
643 start_url_parsed, blacklist);
645 url_free (orig_parsed);
646 url_free (new_parsed);
650 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
655 /* vim:set sts=2 sw=2 cino+={s: */