1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 In addition, as a special exception, the Free Software Foundation
21 gives permission to link the code of its release of Wget with the
22 OpenSSL project's "OpenSSL" library (or with modified versions of it
23 that use the same license as the "OpenSSL" library), and distribute
24 the linked executables. You must obey the GNU General Public License
25 in all respects for all of the code used other than "OpenSSL". If you
26 modify this file, you may extend this exception to your version of the
27 file, but you are not obligated to do so. If you do not wish to do
28 so, delete this exception statement from your version. */
38 #endif /* HAVE_STRING_H */
41 #endif /* HAVE_UNISTD_H */
44 #include <sys/types.h>
62 extern char *version_string;
64 extern struct hash_table *dl_url_file_map;
65 extern struct hash_table *downloaded_html_set;
67 /* Functions for maintaining the URL queue. */
69 struct queue_element {
73 struct queue_element *next;
77 struct queue_element *head;
78 struct queue_element *tail;
82 /* Create a URL queue. */
84 static struct url_queue *
87 struct url_queue *queue = xmalloc (sizeof (*queue));
88 memset (queue, '\0', sizeof (*queue));
92 /* Delete a URL queue. */
95 url_queue_delete (struct url_queue *queue)
100 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
101 retrieved ("dequeued") from the queue in the order they were placed
105 url_enqueue (struct url_queue *queue,
106 const char *url, const char *referer, int depth)
108 struct queue_element *qel = xmalloc (sizeof (*qel));
110 qel->referer = referer;
115 if (queue->count > queue->maxcount)
116 queue->maxcount = queue->count;
118 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
119 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
122 queue->tail->next = qel;
126 queue->head = queue->tail;
129 /* Take a URL out of the queue. Return 1 if this operation succeeded,
130 or 0 if the queue is empty. */
133 url_dequeue (struct url_queue *queue,
134 const char **url, const char **referer, int *depth)
136 struct queue_element *qel = queue->head;
141 queue->head = queue->head->next;
146 *referer = qel->referer;
151 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
152 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
158 static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
159 struct url *, struct hash_table *));
160 static int descend_redirect_p PARAMS ((const char *, const char *, int,
161 struct url *, struct hash_table *));
164 /* Retrieve a part of the web beginning with START_URL. This used to
165 be called "recursive retrieval", because the old function was
166 recursive and implemented depth-first search. retrieve_tree on the
167 other hand implements breadth-search traversal of the tree, which
168 results in much nicer ordering of downloads.
170 The algorithm this function uses is simple:
172 1. put START_URL in the queue.
173 2. while there are URLs in the queue:
175 3. get next URL from the queue.
177 5. if the URL is HTML and its depth does not exceed maximum depth,
178 get the list of URLs embedded therein.
179 6. for each of those URLs do the following:
181 7. if the URL is not one of those downloaded before, and if it
182 satisfies the criteria specified by the various command-line
183 options, add it to the queue. */
186 retrieve_tree (const char *start_url)
188 uerr_t status = RETROK;
190 /* The queue of URLs we need to load. */
191 struct url_queue *queue;
193 /* The URLs we do not wish to enqueue, because they are already in
194 the queue, but haven't been downloaded yet. */
195 struct hash_table *blacklist;
198 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
200 if (!start_url_parsed)
202 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
203 url_error (up_error_code));
207 queue = url_queue_new ();
208 blacklist = make_string_hash_table (0);
210 /* Enqueue the starting URL. Use start_url_parsed->url rather than
211 just URL so we enqueue the canonical form of the URL. */
212 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0);
213 string_set_add (blacklist, start_url_parsed->url);
218 char *url, *referer, *file = NULL;
220 boolean dash_p_leaf_HTML = FALSE;
222 if (downloaded_exceeds_quota ())
224 if (status == FWRITEERR)
227 /* Get the next URL from the queue... */
229 if (!url_dequeue (queue,
230 (const char **)&url, (const char **)&referer,
234 /* ...and download it. Note that this download is in most cases
235 unconditional, as download_child_p already makes sure a file
236 doesn't get enqueued twice -- and yet this check is here, and
237 not in download_child_p. This is so that if you run `wget -r
238 URL1 URL2', and a random URL is encountered once under URL1
239 and again under URL2, but at a different (possibly smaller)
240 depth, we want the URL's children to be taken into account
242 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
244 file = xstrdup (hash_table_get (dl_url_file_map, url));
246 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
249 if (downloaded_html_set
250 && string_set_contains (downloaded_html_set, file))
256 char *redirected = NULL;
257 int oldrec = opt.recursive;
260 status = retrieve_url (url, &file, &redirected, referer, &dt);
261 opt.recursive = oldrec;
263 if (file && status == RETROK
264 && (dt & RETROKF) && (dt & TEXTHTML))
269 /* We have been redirected, possibly to another host, or
270 different path, or wherever. Check whether we really
271 want to follow it. */
274 if (!descend_redirect_p (redirected, url, depth,
275 start_url_parsed, blacklist))
278 /* Make sure that the old pre-redirect form gets
280 string_set_add (blacklist, url);
289 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
291 if (opt.page_requisites
292 && (depth == opt.reclevel || depth == opt.reclevel + 1))
294 /* When -p is specified, we are allowed to exceed the
295 maximum depth, but only for the "inline" links,
296 i.e. those that are needed to display the page.
297 Originally this could exceed the depth at most by
298 one, but we allow one more level so that the leaf
299 pages that contain frames can be loaded
301 dash_p_leaf_HTML = TRUE;
305 /* Either -p wasn't specified or it was and we've
306 already spent the two extra (pseudo-)levels that it
307 affords us, so we need to bail out. */
308 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
309 depth, opt.reclevel));
314 /* If the downloaded document was HTML, parse it and enqueue the
315 links it contains. */
319 int meta_disallow_follow = 0;
320 struct urlpos *children
321 = get_urls_html (file, url, &meta_disallow_follow);
323 if (opt.use_robots && meta_disallow_follow)
325 free_urlpos (children);
331 struct urlpos *child = children;
332 struct url *url_parsed = url_parsed = url_parse (url, NULL);
333 assert (url_parsed != NULL);
335 for (; child; child = child->next)
337 if (child->ignore_when_downloading)
339 if (dash_p_leaf_HTML && !child->link_inline_p)
341 if (download_child_p (child, url_parsed, depth, start_url_parsed,
344 url_enqueue (queue, xstrdup (child->url->url),
345 xstrdup (url), depth + 1);
346 /* We blacklist the URL we have enqueued, because we
347 don't want to enqueue (and hence download) the
349 string_set_add (blacklist, child->url->url);
353 url_free (url_parsed);
354 free_urlpos (children);
358 if (opt.delete_after || (file && !acceptable (file)))
360 /* Either --delete-after was specified, or we loaded this
361 otherwise rejected (e.g. by -R) HTML file just so we
362 could harvest its hyperlinks -- in either case, delete
364 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
365 opt.delete_after ? "--delete-after" :
366 "recursive rejection criteria"));
367 logprintf (LOG_VERBOSE,
369 ? _("Removing %s.\n")
370 : _("Removing %s since it should be rejected.\n")),
373 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
374 register_delete_file (file);
378 FREE_MAYBE (referer);
382 /* If anything is left of the queue due to a premature exit, free it
387 while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3))
393 url_queue_delete (queue);
395 if (start_url_parsed)
396 url_free (start_url_parsed);
397 string_set_free (blacklist);
399 if (downloaded_exceeds_quota ())
401 else if (status == FWRITEERR)
407 /* Based on the context provided by retrieve_tree, decide whether a
408 URL is to be descended to. This is only ever called from
409 retrieve_tree, but is in a separate function for clarity.
411 The most expensive checks (such as those for robots) are memoized
412 by storing these URLs to BLACKLIST. This may or may not help. It
413 will help if those URLs are encountered many times. */
416 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
417 struct url *start_url_parsed, struct hash_table *blacklist)
419 struct url *u = upos->url;
420 const char *url = u->url;
421 int u_scheme_like_http;
423 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
425 if (string_set_contains (blacklist, url))
427 DEBUGP (("Already on the black list.\n"));
431 /* Several things to check for:
432 1. if scheme is not http, and we don't load it
433 2. check for relative links (if relative_only is set)
435 4. check for no-parent
436 5. check for excludes && includes
438 7. check for same host (if spanhost is unset), with possible
439 gethostbyname baggage
440 8. check for robots.txt
442 Addendum: If the URL is FTP, and it is to be loaded, only the
443 domain and suffix settings are "stronger".
445 Note that .html files will get loaded regardless of suffix rules
446 (but that is remedied later with unlink) unless the depth equals
449 More time- and memory- consuming tests should be put later on
452 /* Determine whether URL under consideration has a HTTP-like scheme. */
453 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
455 /* 1. Schemes other than HTTP are normally not recursed into. */
456 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
458 DEBUGP (("Not following non-HTTP schemes.\n"));
462 /* 2. If it is an absolute link and they are not followed, throw it
464 if (u_scheme_like_http)
465 if (opt.relative_only && !upos->link_relative_p)
467 DEBUGP (("It doesn't really look like a relative link.\n"));
471 /* 3. If its domain is not to be accepted/looked-up, chuck it
473 if (!accept_domain (u))
475 DEBUGP (("The domain was not accepted.\n"));
479 /* 4. Check for parent directory.
481 If we descended to a different host or changed the scheme, ignore
482 opt.no_parent. Also ignore it for documents needed to display
483 the parent page when in -p mode. */
485 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
486 && 0 == strcasecmp (u->host, start_url_parsed->host)
487 && u->port == start_url_parsed->port
488 && !(opt.page_requisites && upos->link_inline_p))
490 if (!frontcmp (start_url_parsed->dir, u->dir))
492 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
493 u->dir, start_url_parsed->dir));
498 /* 5. If the file does not match the acceptance list, or is on the
499 rejection list, chuck it out. The same goes for the directory
500 exclusion and inclusion lists. */
501 if (opt.includes || opt.excludes)
503 if (!accdir (u->dir, ALLABS))
505 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
510 /* 6. Check for acceptance/rejection rules. We ignore these rules
511 for directories (no file name to match) and for HTML documents,
512 which might lead to other files that do need to be downloaded.
513 That is, unless we've exhausted the recursion depth anyway. */
514 if (u->file[0] != '\0'
515 && !(has_html_suffix_p (u->file)
516 && depth != INFINITE_RECURSION
517 && depth < opt.reclevel - 1))
519 if (!acceptable (u->file))
521 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
528 if (schemes_are_similar_p (u->scheme, parent->scheme))
529 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
531 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
532 u->host, parent->host));
537 if (opt.use_robots && u_scheme_like_http)
539 struct robot_specs *specs = res_get_specs (u->host, u->port);
543 if (res_retrieve_file (url, &rfile))
545 specs = res_parse_from_file (rfile);
550 /* If we cannot get real specs, at least produce
551 dummy ones so that we can register them and stop
552 trying to retrieve them. */
553 specs = res_parse ("", 0);
555 res_register_specs (u->host, u->port, specs);
558 /* Now that we have (or don't have) robots.txt specs, we can
559 check what they say. */
560 if (!res_match_path (specs, u->path))
562 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
563 string_set_add (blacklist, url);
568 /* The URL has passed all the tests. It can be placed in the
570 DEBUGP (("Decided to load it.\n"));
575 DEBUGP (("Decided NOT to load it.\n"));
580 /* This function determines whether we will consider downloading the
581 children of a URL whose download resulted in a redirection,
582 possibly to another host, etc. It is needed very rarely, and thus
583 it is merely a simple-minded wrapper around download_child_p. */
586 descend_redirect_p (const char *redirected, const char *original, int depth,
587 struct url *start_url_parsed, struct hash_table *blacklist)
589 struct url *orig_parsed, *new_parsed;
593 orig_parsed = url_parse (original, NULL);
594 assert (orig_parsed != NULL);
596 new_parsed = url_parse (redirected, NULL);
597 assert (new_parsed != NULL);
599 upos = xmalloc (sizeof (struct urlpos));
600 memset (upos, 0, sizeof (*upos));
601 upos->url = new_parsed;
603 success = download_child_p (upos, orig_parsed, depth,
604 start_url_parsed, blacklist);
606 url_free (orig_parsed);
607 url_free (new_parsed);
611 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));