1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 In addition, as a special exception, the Free Software Foundation
21 gives permission to link the code of its release of Wget with the
22 OpenSSL project's "OpenSSL" library (or with modified versions of it
23 that use the same license as the "OpenSSL" library), and distribute
24 the linked executables. You must obey the GNU General Public License
25 in all respects for all of the code used other than "OpenSSL". If you
26 modify this file, you may extend this exception to your version of the
27 file, but you are not obligated to do so. If you do not wish to do
28 so, delete this exception statement from your version. */
38 #endif /* HAVE_STRING_H */
41 #endif /* HAVE_UNISTD_H */
44 #include <sys/types.h>
61 extern char *version_string;
62 extern LARGE_INT total_downloaded_bytes;
64 extern struct hash_table *dl_url_file_map;
65 extern struct hash_table *downloaded_html_set;
67 /* Functions for maintaining the URL queue. */
69 struct queue_element {
70 const char *url; /* the URL to download */
71 const char *referer; /* the referring document */
72 int depth; /* the depth */
73 unsigned int html_allowed :1; /* whether the document is allowed to
74 be treated as HTML. */
76 struct queue_element *next; /* next element in queue */
80 struct queue_element *head;
81 struct queue_element *tail;
85 /* Create a URL queue. */
87 static struct url_queue *
90 struct url_queue *queue = xnew0 (struct url_queue);
94 /* Delete a URL queue. */
97 url_queue_delete (struct url_queue *queue)
102 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
103 retrieved ("dequeued") from the queue in the order they were placed
107 url_enqueue (struct url_queue *queue,
108 const char *url, const char *referer, int depth, int html_allowed)
110 struct queue_element *qel = xnew (struct queue_element);
112 qel->referer = referer;
114 qel->html_allowed = html_allowed;
118 if (queue->count > queue->maxcount)
119 queue->maxcount = queue->count;
121 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
122 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
125 queue->tail->next = qel;
129 queue->head = queue->tail;
132 /* Take a URL out of the queue. Return 1 if this operation succeeded,
133 or 0 if the queue is empty. */
136 url_dequeue (struct url_queue *queue,
137 const char **url, const char **referer, int *depth,
140 struct queue_element *qel = queue->head;
145 queue->head = queue->head->next;
150 *referer = qel->referer;
152 *html_allowed = qel->html_allowed;
156 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
157 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
163 static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
164 struct url *, struct hash_table *));
165 static int descend_redirect_p PARAMS ((const char *, const char *, int,
166 struct url *, struct hash_table *));
169 /* Retrieve a part of the web beginning with START_URL. This used to
170 be called "recursive retrieval", because the old function was
171 recursive and implemented depth-first search. retrieve_tree on the
172 other hand implements breadth-search traversal of the tree, which
173 results in much nicer ordering of downloads.
175 The algorithm this function uses is simple:
177 1. put START_URL in the queue.
178 2. while there are URLs in the queue:
180 3. get next URL from the queue.
182 5. if the URL is HTML and its depth does not exceed maximum depth,
183 get the list of URLs embedded therein.
184 6. for each of those URLs do the following:
186 7. if the URL is not one of those downloaded before, and if it
187 satisfies the criteria specified by the various command-line
188 options, add it to the queue. */
191 retrieve_tree (const char *start_url)
193 uerr_t status = RETROK;
195 /* The queue of URLs we need to load. */
196 struct url_queue *queue;
198 /* The URLs we do not wish to enqueue, because they are already in
199 the queue, but haven't been downloaded yet. */
200 struct hash_table *blacklist;
203 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
205 if (!start_url_parsed)
207 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
208 url_error (up_error_code));
212 queue = url_queue_new ();
213 blacklist = make_string_hash_table (0);
215 /* Enqueue the starting URL. Use start_url_parsed->url rather than
216 just URL so we enqueue the canonical form of the URL. */
217 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, 1);
218 string_set_add (blacklist, start_url_parsed->url);
223 char *url, *referer, *file = NULL;
224 int depth, html_allowed;
225 int dash_p_leaf_HTML = 0;
227 if (opt.quota && total_downloaded_bytes > opt.quota)
229 if (status == FWRITEERR)
232 /* Get the next URL from the queue... */
234 if (!url_dequeue (queue,
235 (const char **)&url, (const char **)&referer,
236 &depth, &html_allowed))
239 /* ...and download it. Note that this download is in most cases
240 unconditional, as download_child_p already makes sure a file
241 doesn't get enqueued twice -- and yet this check is here, and
242 not in download_child_p. This is so that if you run `wget -r
243 URL1 URL2', and a random URL is encountered once under URL1
244 and again under URL2, but at a different (possibly smaller)
245 depth, we want the URL's children to be taken into account
247 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
249 file = xstrdup (hash_table_get (dl_url_file_map, url));
251 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
255 && downloaded_html_set
256 && string_set_contains (downloaded_html_set, file))
262 char *redirected = NULL;
263 int oldrec = opt.recursive;
266 status = retrieve_url (url, &file, &redirected, referer, &dt);
267 opt.recursive = oldrec;
269 if (html_allowed && file && status == RETROK
270 && (dt & RETROKF) && (dt & TEXTHTML))
275 /* We have been redirected, possibly to another host, or
276 different path, or wherever. Check whether we really
277 want to follow it. */
280 if (!descend_redirect_p (redirected, url, depth,
281 start_url_parsed, blacklist))
284 /* Make sure that the old pre-redirect form gets
286 string_set_add (blacklist, url);
295 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
297 if (opt.page_requisites
298 && (depth == opt.reclevel || depth == opt.reclevel + 1))
300 /* When -p is specified, we are allowed to exceed the
301 maximum depth, but only for the "inline" links,
302 i.e. those that are needed to display the page.
303 Originally this could exceed the depth at most by
304 one, but we allow one more level so that the leaf
305 pages that contain frames can be loaded
307 dash_p_leaf_HTML = 1;
311 /* Either -p wasn't specified or it was and we've
312 already spent the two extra (pseudo-)levels that it
313 affords us, so we need to bail out. */
314 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
315 depth, opt.reclevel));
320 /* If the downloaded document was HTML, parse it and enqueue the
321 links it contains. */
325 int meta_disallow_follow = 0;
326 struct urlpos *children
327 = get_urls_html (file, url, &meta_disallow_follow);
329 if (opt.use_robots && meta_disallow_follow)
331 free_urlpos (children);
337 struct urlpos *child = children;
338 struct url *url_parsed = url_parsed = url_parse (url, NULL);
339 assert (url_parsed != NULL);
341 for (; child; child = child->next)
343 if (child->ignore_when_downloading)
345 if (dash_p_leaf_HTML && !child->link_inline_p)
347 if (download_child_p (child, url_parsed, depth, start_url_parsed,
350 url_enqueue (queue, xstrdup (child->url->url),
351 xstrdup (url), depth + 1,
352 child->link_expect_html);
353 /* We blacklist the URL we have enqueued, because we
354 don't want to enqueue (and hence download) the
356 string_set_add (blacklist, child->url->url);
360 url_free (url_parsed);
361 free_urlpos (children);
365 if (opt.delete_after || (file && !acceptable (file)))
367 /* Either --delete-after was specified, or we loaded this
368 otherwise rejected (e.g. by -R) HTML file just so we
369 could harvest its hyperlinks -- in either case, delete
371 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
372 opt.delete_after ? "--delete-after" :
373 "recursive rejection criteria"));
374 logprintf (LOG_VERBOSE,
376 ? _("Removing %s.\n")
377 : _("Removing %s since it should be rejected.\n")),
380 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
381 register_delete_file (file);
385 xfree_null (referer);
389 /* If anything is left of the queue due to a premature exit, free it
394 while (url_dequeue (queue,
395 (const char **)&d1, (const char **)&d2, &d3, &d4))
401 url_queue_delete (queue);
403 if (start_url_parsed)
404 url_free (start_url_parsed);
405 string_set_free (blacklist);
407 if (opt.quota && total_downloaded_bytes > opt.quota)
409 else if (status == FWRITEERR)
415 /* Based on the context provided by retrieve_tree, decide whether a
416 URL is to be descended to. This is only ever called from
417 retrieve_tree, but is in a separate function for clarity.
419 The most expensive checks (such as those for robots) are memoized
420 by storing these URLs to BLACKLIST. This may or may not help. It
421 will help if those URLs are encountered many times. */
424 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
425 struct url *start_url_parsed, struct hash_table *blacklist)
427 struct url *u = upos->url;
428 const char *url = u->url;
429 int u_scheme_like_http;
431 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
433 if (string_set_contains (blacklist, url))
435 DEBUGP (("Already on the black list.\n"));
439 /* Several things to check for:
440 1. if scheme is not http, and we don't load it
441 2. check for relative links (if relative_only is set)
443 4. check for no-parent
444 5. check for excludes && includes
446 7. check for same host (if spanhost is unset), with possible
447 gethostbyname baggage
448 8. check for robots.txt
450 Addendum: If the URL is FTP, and it is to be loaded, only the
451 domain and suffix settings are "stronger".
453 Note that .html files will get loaded regardless of suffix rules
454 (but that is remedied later with unlink) unless the depth equals
457 More time- and memory- consuming tests should be put later on
460 /* Determine whether URL under consideration has a HTTP-like scheme. */
461 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
463 /* 1. Schemes other than HTTP are normally not recursed into. */
464 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
466 DEBUGP (("Not following non-HTTP schemes.\n"));
470 /* 2. If it is an absolute link and they are not followed, throw it
472 if (u_scheme_like_http)
473 if (opt.relative_only && !upos->link_relative_p)
475 DEBUGP (("It doesn't really look like a relative link.\n"));
479 /* 3. If its domain is not to be accepted/looked-up, chuck it
481 if (!accept_domain (u))
483 DEBUGP (("The domain was not accepted.\n"));
487 /* 4. Check for parent directory.
489 If we descended to a different host or changed the scheme, ignore
490 opt.no_parent. Also ignore it for documents needed to display
491 the parent page when in -p mode. */
493 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
494 && 0 == strcasecmp (u->host, start_url_parsed->host)
495 && u->port == start_url_parsed->port
496 && !(opt.page_requisites && upos->link_inline_p))
498 if (!frontcmp (start_url_parsed->dir, u->dir))
500 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
501 u->dir, start_url_parsed->dir));
506 /* 5. If the file does not match the acceptance list, or is on the
507 rejection list, chuck it out. The same goes for the directory
508 exclusion and inclusion lists. */
509 if (opt.includes || opt.excludes)
511 if (!accdir (u->dir, ALLABS))
513 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
518 /* 6. Check for acceptance/rejection rules. We ignore these rules
519 for directories (no file name to match) and for non-leaf HTMLs,
520 which can lead to other files that do need to be downloaded. (-p
521 automatically implies non-leaf because with -p we can, if
522 necesary, overstep the maximum depth to get the page requisites.) */
523 if (u->file[0] != '\0'
524 && !(has_html_suffix_p (u->file)
525 /* The exception only applies to non-leaf HTMLs (but -p
526 always implies non-leaf because we can overstep the
527 maximum depth to get the requisites): */
529 opt.reclevel == INFINITE_RECURSION
531 || depth < opt.reclevel - 1
532 /* -p, which implies non-leaf (see above) */
533 || opt.page_requisites)))
535 if (!acceptable (u->file))
537 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
544 if (schemes_are_similar_p (u->scheme, parent->scheme))
545 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
547 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
548 u->host, parent->host));
553 if (opt.use_robots && u_scheme_like_http)
555 struct robot_specs *specs = res_get_specs (u->host, u->port);
559 if (res_retrieve_file (url, &rfile))
561 specs = res_parse_from_file (rfile);
566 /* If we cannot get real specs, at least produce
567 dummy ones so that we can register them and stop
568 trying to retrieve them. */
569 specs = res_parse ("", 0);
571 res_register_specs (u->host, u->port, specs);
574 /* Now that we have (or don't have) robots.txt specs, we can
575 check what they say. */
576 if (!res_match_path (specs, u->path))
578 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
579 string_set_add (blacklist, url);
584 /* The URL has passed all the tests. It can be placed in the
586 DEBUGP (("Decided to load it.\n"));
591 DEBUGP (("Decided NOT to load it.\n"));
596 /* This function determines whether we will consider downloading the
597 children of a URL whose download resulted in a redirection,
598 possibly to another host, etc. It is needed very rarely, and thus
599 it is merely a simple-minded wrapper around download_child_p. */
602 descend_redirect_p (const char *redirected, const char *original, int depth,
603 struct url *start_url_parsed, struct hash_table *blacklist)
605 struct url *orig_parsed, *new_parsed;
609 orig_parsed = url_parse (original, NULL);
610 assert (orig_parsed != NULL);
612 new_parsed = url_parse (redirected, NULL);
613 assert (new_parsed != NULL);
615 upos = xnew0 (struct urlpos);
616 upos->url = new_parsed;
618 success = download_child_p (upos, orig_parsed, depth,
619 start_url_parsed, blacklist);
621 url_free (orig_parsed);
622 url_free (new_parsed);
626 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));