1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
5 This file is part of GNU Wget.
7 GNU Wget is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 GNU Wget is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Wget. If not, see <http://www.gnu.org/licenses/>.
20 In addition, as a special exception, the Free Software Foundation
21 gives permission to link the code of its release of Wget with the
22 OpenSSL project's "OpenSSL" library (or with modified versions of it
23 that use the same license as the "OpenSSL" library), and distribute
24 the linked executables. You must obey the GNU General Public License
25 in all respects for all of the code used other than "OpenSSL". If you
26 modify this file, you may extend this exception to your version of the
27 file, but you are not obligated to do so. If you do not wish to do
28 so, delete this exception statement from your version. */
37 #endif /* HAVE_UNISTD_H */
52 /* Functions for maintaining the URL queue. */
54 struct queue_element {
55 const char *url; /* the URL to download */
56 const char *referer; /* the referring document */
57 int depth; /* the depth */
58 bool html_allowed; /* whether the document is allowed to
59 be treated as HTML. */
61 struct queue_element *next; /* next element in queue */
65 struct queue_element *head;
66 struct queue_element *tail;
70 /* Create a URL queue. */
72 static struct url_queue *
75 struct url_queue *queue = xnew0 (struct url_queue);
79 /* Delete a URL queue. */
82 url_queue_delete (struct url_queue *queue)
87 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
88 retrieved ("dequeued") from the queue in the order they were placed
92 url_enqueue (struct url_queue *queue,
93 const char *url, const char *referer, int depth, bool html_allowed)
95 struct queue_element *qel = xnew (struct queue_element);
97 qel->referer = referer;
99 qel->html_allowed = html_allowed;
103 if (queue->count > queue->maxcount)
104 queue->maxcount = queue->count;
106 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
107 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
110 queue->tail->next = qel;
114 queue->head = queue->tail;
117 /* Take a URL out of the queue. Return true if this operation
118 succeeded, or false if the queue is empty. */
121 url_dequeue (struct url_queue *queue,
122 const char **url, const char **referer, int *depth,
125 struct queue_element *qel = queue->head;
130 queue->head = queue->head->next;
135 *referer = qel->referer;
137 *html_allowed = qel->html_allowed;
141 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
142 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
148 static bool download_child_p (const struct urlpos *, struct url *, int,
149 struct url *, struct hash_table *);
150 static bool descend_redirect_p (const char *, const char *, int,
151 struct url *, struct hash_table *);
154 /* Retrieve a part of the web beginning with START_URL. This used to
155 be called "recursive retrieval", because the old function was
156 recursive and implemented depth-first search. retrieve_tree on the
157 other hand implements breadth-search traversal of the tree, which
158 results in much nicer ordering of downloads.
160 The algorithm this function uses is simple:
162 1. put START_URL in the queue.
163 2. while there are URLs in the queue:
165 3. get next URL from the queue.
167 5. if the URL is HTML and its depth does not exceed maximum depth,
168 get the list of URLs embedded therein.
169 6. for each of those URLs do the following:
171 7. if the URL is not one of those downloaded before, and if it
172 satisfies the criteria specified by the various command-line
173 options, add it to the queue. */
176 retrieve_tree (const char *start_url)
178 uerr_t status = RETROK;
180 /* The queue of URLs we need to load. */
181 struct url_queue *queue;
183 /* The URLs we do not wish to enqueue, because they are already in
184 the queue, but haven't been downloaded yet. */
185 struct hash_table *blacklist;
188 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
190 if (!start_url_parsed)
192 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
193 url_error (up_error_code));
197 queue = url_queue_new ();
198 blacklist = make_string_hash_table (0);
200 /* Enqueue the starting URL. Use start_url_parsed->url rather than
201 just URL so we enqueue the canonical form of the URL. */
202 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, true);
203 string_set_add (blacklist, start_url_parsed->url);
207 bool descend = false;
208 char *url, *referer, *file = NULL;
211 bool dash_p_leaf_HTML = false;
213 if (opt.quota && total_downloaded_bytes > opt.quota)
215 if (status == FWRITEERR)
218 /* Get the next URL from the queue... */
220 if (!url_dequeue (queue,
221 (const char **)&url, (const char **)&referer,
222 &depth, &html_allowed))
225 /* ...and download it. Note that this download is in most cases
226 unconditional, as download_child_p already makes sure a file
227 doesn't get enqueued twice -- and yet this check is here, and
228 not in download_child_p. This is so that if you run `wget -r
229 URL1 URL2', and a random URL is encountered once under URL1
230 and again under URL2, but at a different (possibly smaller)
231 depth, we want the URL's children to be taken into account
233 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
235 file = xstrdup (hash_table_get (dl_url_file_map, url));
237 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
241 && downloaded_html_set
242 && string_set_contains (downloaded_html_set, file))
248 char *redirected = NULL;
250 status = retrieve_url (url, &file, &redirected, referer, &dt, false);
252 if (html_allowed && file && status == RETROK
253 && (dt & RETROKF) && (dt & TEXTHTML))
258 /* We have been redirected, possibly to another host, or
259 different path, or wherever. Check whether we really
260 want to follow it. */
263 if (!descend_redirect_p (redirected, url, depth,
264 start_url_parsed, blacklist))
267 /* Make sure that the old pre-redirect form gets
269 string_set_add (blacklist, url);
279 visited_url (url, referer);
283 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
285 if (opt.page_requisites
286 && (depth == opt.reclevel || depth == opt.reclevel + 1))
288 /* When -p is specified, we are allowed to exceed the
289 maximum depth, but only for the "inline" links,
290 i.e. those that are needed to display the page.
291 Originally this could exceed the depth at most by
292 one, but we allow one more level so that the leaf
293 pages that contain frames can be loaded
295 dash_p_leaf_HTML = true;
299 /* Either -p wasn't specified or it was and we've
300 already spent the two extra (pseudo-)levels that it
301 affords us, so we need to bail out. */
302 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
303 depth, opt.reclevel));
308 /* If the downloaded document was HTML, parse it and enqueue the
309 links it contains. */
313 bool meta_disallow_follow = false;
314 struct urlpos *children
315 = get_urls_html (file, url, &meta_disallow_follow);
317 if (opt.use_robots && meta_disallow_follow)
319 free_urlpos (children);
325 struct urlpos *child = children;
326 struct url *url_parsed = url_parsed = url_parse (url, NULL);
327 char *referer_url = url;
328 bool strip_auth = (url_parsed != NULL
329 && url_parsed->user != NULL);
330 assert (url_parsed != NULL);
332 /* Strip auth info if present */
334 referer_url = url_string (url_parsed, URL_AUTH_HIDE);
336 for (; child; child = child->next)
338 if (child->ignore_when_downloading)
340 if (dash_p_leaf_HTML && !child->link_inline_p)
342 if (download_child_p (child, url_parsed, depth, start_url_parsed,
345 url_enqueue (queue, xstrdup (child->url->url),
346 xstrdup (referer_url), depth + 1,
347 child->link_expect_html);
348 /* We blacklist the URL we have enqueued, because we
349 don't want to enqueue (and hence download) the
351 string_set_add (blacklist, child->url->url);
357 url_free (url_parsed);
358 free_urlpos (children);
364 || opt.spider /* opt.recursive is implicitely true */
365 || !acceptable (file)))
367 /* Either --delete-after was specified, or we loaded this
368 (otherwise unneeded because of --spider or rejected by -R)
369 HTML file just to harvest its hyperlinks -- in either case,
370 delete the local file. */
371 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
372 opt.delete_after ? "--delete-after" :
373 (opt.spider ? "--spider" :
374 "recursive rejection criteria")));
375 logprintf (LOG_VERBOSE,
376 (opt.delete_after || opt.spider
377 ? _("Removing %s.\n")
378 : _("Removing %s since it should be rejected.\n")),
381 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
382 logputs (LOG_VERBOSE, "\n");
383 register_delete_file (file);
387 xfree_null (referer);
391 /* If anything is left of the queue due to a premature exit, free it
397 while (url_dequeue (queue,
398 (const char **)&d1, (const char **)&d2, &d3, &d4))
404 url_queue_delete (queue);
406 if (start_url_parsed)
407 url_free (start_url_parsed);
408 string_set_free (blacklist);
410 if (opt.quota && total_downloaded_bytes > opt.quota)
412 else if (status == FWRITEERR)
418 /* Based on the context provided by retrieve_tree, decide whether a
419 URL is to be descended to. This is only ever called from
420 retrieve_tree, but is in a separate function for clarity.
422 The most expensive checks (such as those for robots) are memoized
423 by storing these URLs to BLACKLIST. This may or may not help. It
424 will help if those URLs are encountered many times. */
427 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
428 struct url *start_url_parsed, struct hash_table *blacklist)
430 struct url *u = upos->url;
431 const char *url = u->url;
432 bool u_scheme_like_http;
434 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
436 if (string_set_contains (blacklist, url))
440 char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD);
441 DEBUGP (("download_child_p: parent->url is: `%s'\n", parent->url));
442 visited_url (url, referrer);
445 DEBUGP (("Already on the black list.\n"));
449 /* Several things to check for:
450 1. if scheme is not http, and we don't load it
451 2. check for relative links (if relative_only is set)
453 4. check for no-parent
454 5. check for excludes && includes
456 7. check for same host (if spanhost is unset), with possible
457 gethostbyname baggage
458 8. check for robots.txt
460 Addendum: If the URL is FTP, and it is to be loaded, only the
461 domain and suffix settings are "stronger".
463 Note that .html files will get loaded regardless of suffix rules
464 (but that is remedied later with unlink) unless the depth equals
467 More time- and memory- consuming tests should be put later on
470 /* Determine whether URL under consideration has a HTTP-like scheme. */
471 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
473 /* 1. Schemes other than HTTP are normally not recursed into. */
474 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
476 DEBUGP (("Not following non-HTTP schemes.\n"));
480 /* 2. If it is an absolute link and they are not followed, throw it
482 if (u_scheme_like_http)
483 if (opt.relative_only && !upos->link_relative_p)
485 DEBUGP (("It doesn't really look like a relative link.\n"));
489 /* 3. If its domain is not to be accepted/looked-up, chuck it
491 if (!accept_domain (u))
493 DEBUGP (("The domain was not accepted.\n"));
497 /* 4. Check for parent directory.
499 If we descended to a different host or changed the scheme, ignore
500 opt.no_parent. Also ignore it for documents needed to display
501 the parent page when in -p mode. */
503 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
504 && 0 == strcasecmp (u->host, start_url_parsed->host)
505 && u->port == start_url_parsed->port
506 && !(opt.page_requisites && upos->link_inline_p))
508 if (!subdir_p (start_url_parsed->dir, u->dir))
510 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
511 u->dir, start_url_parsed->dir));
516 /* 5. If the file does not match the acceptance list, or is on the
517 rejection list, chuck it out. The same goes for the directory
518 exclusion and inclusion lists. */
519 if (opt.includes || opt.excludes)
521 if (!accdir (u->dir))
523 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
528 /* 6. Check for acceptance/rejection rules. We ignore these rules
529 for directories (no file name to match) and for non-leaf HTMLs,
530 which can lead to other files that do need to be downloaded. (-p
531 automatically implies non-leaf because with -p we can, if
532 necesary, overstep the maximum depth to get the page requisites.) */
533 if (u->file[0] != '\0'
534 && !(has_html_suffix_p (u->file)
535 /* The exception only applies to non-leaf HTMLs (but -p
536 always implies non-leaf because we can overstep the
537 maximum depth to get the requisites): */
539 opt.reclevel == INFINITE_RECURSION
541 || depth < opt.reclevel - 1
542 /* -p, which implies non-leaf (see above) */
543 || opt.page_requisites)))
545 if (!acceptable (u->file))
547 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
554 if (schemes_are_similar_p (u->scheme, parent->scheme))
555 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
557 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
558 u->host, parent->host));
563 if (opt.use_robots && u_scheme_like_http)
565 struct robot_specs *specs = res_get_specs (u->host, u->port);
569 if (res_retrieve_file (url, &rfile))
571 specs = res_parse_from_file (rfile);
573 /* Delete the robots.txt file if we chose to either delete the
574 files after downloading or we're just running a spider. */
575 if (opt.delete_after || opt.spider)
577 logprintf (LOG_VERBOSE, "Removing %s.\n", rfile);
579 logprintf (LOG_NOTQUIET, "unlink: %s\n",
587 /* If we cannot get real specs, at least produce
588 dummy ones so that we can register them and stop
589 trying to retrieve them. */
590 specs = res_parse ("", 0);
592 res_register_specs (u->host, u->port, specs);
595 /* Now that we have (or don't have) robots.txt specs, we can
596 check what they say. */
597 if (!res_match_path (specs, u->path))
599 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
600 string_set_add (blacklist, url);
605 /* The URL has passed all the tests. It can be placed in the
607 DEBUGP (("Decided to load it.\n"));
612 DEBUGP (("Decided NOT to load it.\n"));
617 /* This function determines whether we will consider downloading the
618 children of a URL whose download resulted in a redirection,
619 possibly to another host, etc. It is needed very rarely, and thus
620 it is merely a simple-minded wrapper around download_child_p. */
623 descend_redirect_p (const char *redirected, const char *original, int depth,
624 struct url *start_url_parsed, struct hash_table *blacklist)
626 struct url *orig_parsed, *new_parsed;
630 orig_parsed = url_parse (original, NULL);
631 assert (orig_parsed != NULL);
633 new_parsed = url_parse (redirected, NULL);
634 assert (new_parsed != NULL);
636 upos = xnew0 (struct urlpos);
637 upos->url = new_parsed;
639 success = download_child_p (upos, orig_parsed, depth,
640 start_url_parsed, blacklist);
642 url_free (orig_parsed);
643 url_free (new_parsed);
647 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
652 /* vim:set sts=2 sw=2 cino+={s: */