1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 In addition, as a special exception, the Free Software Foundation
21 gives permission to link the code of its release of Wget with the
22 OpenSSL project's "OpenSSL" library (or with modified versions of it
23 that use the same license as the "OpenSSL" library), and distribute
24 the linked executables. You must obey the GNU General Public License
25 in all respects for all of the code used other than "OpenSSL". If you
26 modify this file, you may extend this exception to your version of the
27 file, but you are not obligated to do so. If you do not wish to do
28 so, delete this exception statement from your version. */
38 #endif /* HAVE_STRING_H */
41 #endif /* HAVE_UNISTD_H */
44 #include <sys/types.h>
61 extern char *version_string;
63 extern struct hash_table *dl_url_file_map;
64 extern struct hash_table *downloaded_html_set;
66 /* Functions for maintaining the URL queue. */
68 struct queue_element {
72 struct queue_element *next;
76 struct queue_element *head;
77 struct queue_element *tail;
81 /* Create a URL queue. */
83 static struct url_queue *
86 struct url_queue *queue = xmalloc (sizeof (*queue));
87 memset (queue, '\0', sizeof (*queue));
91 /* Delete a URL queue. */
94 url_queue_delete (struct url_queue *queue)
99 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
100 retrieved ("dequeued") from the queue in the order they were placed
104 url_enqueue (struct url_queue *queue,
105 const char *url, const char *referer, int depth)
107 struct queue_element *qel = xmalloc (sizeof (*qel));
109 qel->referer = referer;
114 if (queue->count > queue->maxcount)
115 queue->maxcount = queue->count;
117 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
118 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
121 queue->tail->next = qel;
125 queue->head = queue->tail;
128 /* Take a URL out of the queue. Return 1 if this operation succeeded,
129 or 0 if the queue is empty. */
132 url_dequeue (struct url_queue *queue,
133 const char **url, const char **referer, int *depth)
135 struct queue_element *qel = queue->head;
140 queue->head = queue->head->next;
145 *referer = qel->referer;
150 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
151 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
157 static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
158 struct url *, struct hash_table *));
159 static int descend_redirect_p PARAMS ((const char *, const char *, int,
160 struct url *, struct hash_table *));
163 /* Retrieve a part of the web beginning with START_URL. This used to
164 be called "recursive retrieval", because the old function was
165 recursive and implemented depth-first search. retrieve_tree on the
166 other hand implements breadth-search traversal of the tree, which
167 results in much nicer ordering of downloads.
169 The algorithm this function uses is simple:
171 1. put START_URL in the queue.
172 2. while there are URLs in the queue:
174 3. get next URL from the queue.
176 5. if the URL is HTML and its depth does not exceed maximum depth,
177 get the list of URLs embedded therein.
178 6. for each of those URLs do the following:
180 7. if the URL is not one of those downloaded before, and if it
181 satisfies the criteria specified by the various command-line
182 options, add it to the queue. */
185 retrieve_tree (const char *start_url)
187 uerr_t status = RETROK;
189 /* The queue of URLs we need to load. */
190 struct url_queue *queue;
192 /* The URLs we do not wish to enqueue, because they are already in
193 the queue, but haven't been downloaded yet. */
194 struct hash_table *blacklist;
197 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
199 if (!start_url_parsed)
201 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
202 url_error (up_error_code));
206 queue = url_queue_new ();
207 blacklist = make_string_hash_table (0);
209 /* Enqueue the starting URL. Use start_url_parsed->url rather than
210 just URL so we enqueue the canonical form of the URL. */
211 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0);
212 string_set_add (blacklist, start_url_parsed->url);
217 char *url, *referer, *file = NULL;
219 boolean dash_p_leaf_HTML = FALSE;
221 if (downloaded_exceeds_quota ())
223 if (status == FWRITEERR)
226 /* Get the next URL from the queue... */
228 if (!url_dequeue (queue,
229 (const char **)&url, (const char **)&referer,
233 /* ...and download it. Note that this download is in most cases
234 unconditional, as download_child_p already makes sure a file
235 doesn't get enqueued twice -- and yet this check is here, and
236 not in download_child_p. This is so that if you run `wget -r
237 URL1 URL2', and a random URL is encountered once under URL1
238 and again under URL2, but at a different (possibly smaller)
239 depth, we want the URL's children to be taken into account
241 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
243 file = xstrdup (hash_table_get (dl_url_file_map, url));
245 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
248 if (downloaded_html_set
249 && string_set_contains (downloaded_html_set, file))
255 char *redirected = NULL;
256 int oldrec = opt.recursive;
259 status = retrieve_url (url, &file, &redirected, referer, &dt);
260 opt.recursive = oldrec;
262 if (file && status == RETROK
263 && (dt & RETROKF) && (dt & TEXTHTML))
268 /* We have been redirected, possibly to another host, or
269 different path, or wherever. Check whether we really
270 want to follow it. */
273 if (!descend_redirect_p (redirected, url, depth,
274 start_url_parsed, blacklist))
277 /* Make sure that the old pre-redirect form gets
279 string_set_add (blacklist, url);
288 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
290 if (opt.page_requisites
291 && (depth == opt.reclevel || depth == opt.reclevel + 1))
293 /* When -p is specified, we are allowed to exceed the
294 maximum depth, but only for the "inline" links,
295 i.e. those that are needed to display the page.
296 Originally this could exceed the depth at most by
297 one, but we allow one more level so that the leaf
298 pages that contain frames can be loaded
300 dash_p_leaf_HTML = TRUE;
304 /* Either -p wasn't specified or it was and we've
305 already spent the two extra (pseudo-)levels that it
306 affords us, so we need to bail out. */
307 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
308 depth, opt.reclevel));
313 /* If the downloaded document was HTML, parse it and enqueue the
314 links it contains. */
318 int meta_disallow_follow = 0;
319 struct urlpos *children
320 = get_urls_html (file, url, &meta_disallow_follow);
322 if (opt.use_robots && meta_disallow_follow)
324 free_urlpos (children);
330 struct urlpos *child = children;
331 struct url *url_parsed = url_parsed = url_parse (url, NULL);
332 assert (url_parsed != NULL);
334 for (; child; child = child->next)
336 if (child->ignore_when_downloading)
338 if (dash_p_leaf_HTML && !child->link_inline_p)
340 if (download_child_p (child, url_parsed, depth, start_url_parsed,
343 url_enqueue (queue, xstrdup (child->url->url),
344 xstrdup (url), depth + 1);
345 /* We blacklist the URL we have enqueued, because we
346 don't want to enqueue (and hence download) the
348 string_set_add (blacklist, child->url->url);
352 url_free (url_parsed);
353 free_urlpos (children);
357 if (opt.delete_after || (file && !acceptable (file)))
359 /* Either --delete-after was specified, or we loaded this
360 otherwise rejected (e.g. by -R) HTML file just so we
361 could harvest its hyperlinks -- in either case, delete
363 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
364 opt.delete_after ? "--delete-after" :
365 "recursive rejection criteria"));
366 logprintf (LOG_VERBOSE,
368 ? _("Removing %s.\n")
369 : _("Removing %s since it should be rejected.\n")),
372 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
373 register_delete_file (file);
377 FREE_MAYBE (referer);
381 /* If anything is left of the queue due to a premature exit, free it
386 while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3))
392 url_queue_delete (queue);
394 if (start_url_parsed)
395 url_free (start_url_parsed);
396 string_set_free (blacklist);
398 if (downloaded_exceeds_quota ())
400 else if (status == FWRITEERR)
406 /* Based on the context provided by retrieve_tree, decide whether a
407 URL is to be descended to. This is only ever called from
408 retrieve_tree, but is in a separate function for clarity.
410 The most expensive checks (such as those for robots) are memoized
411 by storing these URLs to BLACKLIST. This may or may not help. It
412 will help if those URLs are encountered many times. */
415 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
416 struct url *start_url_parsed, struct hash_table *blacklist)
418 struct url *u = upos->url;
419 const char *url = u->url;
420 int u_scheme_like_http;
422 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
424 if (string_set_contains (blacklist, url))
426 DEBUGP (("Already on the black list.\n"));
430 /* Several things to check for:
431 1. if scheme is not http, and we don't load it
432 2. check for relative links (if relative_only is set)
434 4. check for no-parent
435 5. check for excludes && includes
437 7. check for same host (if spanhost is unset), with possible
438 gethostbyname baggage
439 8. check for robots.txt
441 Addendum: If the URL is FTP, and it is to be loaded, only the
442 domain and suffix settings are "stronger".
444 Note that .html files will get loaded regardless of suffix rules
445 (but that is remedied later with unlink) unless the depth equals
448 More time- and memory- consuming tests should be put later on
451 /* Determine whether URL under consideration has a HTTP-like scheme. */
452 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
454 /* 1. Schemes other than HTTP are normally not recursed into. */
455 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
457 DEBUGP (("Not following non-HTTP schemes.\n"));
461 /* 2. If it is an absolute link and they are not followed, throw it
463 if (u_scheme_like_http)
464 if (opt.relative_only && !upos->link_relative_p)
466 DEBUGP (("It doesn't really look like a relative link.\n"));
470 /* 3. If its domain is not to be accepted/looked-up, chuck it
472 if (!accept_domain (u))
474 DEBUGP (("The domain was not accepted.\n"));
478 /* 4. Check for parent directory.
480 If we descended to a different host or changed the scheme, ignore
481 opt.no_parent. Also ignore it for documents needed to display
482 the parent page when in -p mode. */
484 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
485 && 0 == strcasecmp (u->host, start_url_parsed->host)
486 && u->port == start_url_parsed->port
487 && !(opt.page_requisites && upos->link_inline_p))
489 if (!frontcmp (start_url_parsed->dir, u->dir))
491 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
492 u->dir, start_url_parsed->dir));
497 /* 5. If the file does not match the acceptance list, or is on the
498 rejection list, chuck it out. The same goes for the directory
499 exclusion and inclusion lists. */
500 if (opt.includes || opt.excludes)
502 if (!accdir (u->dir, ALLABS))
504 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
509 /* 6. Check for acceptance/rejection rules. We ignore these rules
510 for directories (no file name to match) and for HTML documents,
511 which might lead to other files that do need to be downloaded.
512 That is, unless we've exhausted the recursion depth anyway. */
513 if (u->file[0] != '\0'
514 && !(has_html_suffix_p (u->file)
515 && depth != INFINITE_RECURSION
516 && depth < opt.reclevel - 1))
518 if (!acceptable (u->file))
520 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
527 if (schemes_are_similar_p (u->scheme, parent->scheme))
528 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
530 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
531 u->host, parent->host));
536 if (opt.use_robots && u_scheme_like_http)
538 struct robot_specs *specs = res_get_specs (u->host, u->port);
542 if (res_retrieve_file (url, &rfile))
544 specs = res_parse_from_file (rfile);
549 /* If we cannot get real specs, at least produce
550 dummy ones so that we can register them and stop
551 trying to retrieve them. */
552 specs = res_parse ("", 0);
554 res_register_specs (u->host, u->port, specs);
557 /* Now that we have (or don't have) robots.txt specs, we can
558 check what they say. */
559 if (!res_match_path (specs, u->path))
561 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
562 string_set_add (blacklist, url);
567 /* The URL has passed all the tests. It can be placed in the
569 DEBUGP (("Decided to load it.\n"));
574 DEBUGP (("Decided NOT to load it.\n"));
579 /* This function determines whether we will consider downloading the
580 children of a URL whose download resulted in a redirection,
581 possibly to another host, etc. It is needed very rarely, and thus
582 it is merely a simple-minded wrapper around download_child_p. */
585 descend_redirect_p (const char *redirected, const char *original, int depth,
586 struct url *start_url_parsed, struct hash_table *blacklist)
588 struct url *orig_parsed, *new_parsed;
592 orig_parsed = url_parse (original, NULL);
593 assert (orig_parsed != NULL);
595 new_parsed = url_parse (redirected, NULL);
596 assert (new_parsed != NULL);
598 upos = xmalloc (sizeof (struct urlpos));
599 memset (upos, 0, sizeof (*upos));
600 upos->url = new_parsed;
602 success = download_child_p (upos, orig_parsed, depth,
603 start_url_parsed, blacklist);
605 url_free (orig_parsed);
606 url_free (new_parsed);
610 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));