1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 In addition, as a special exception, the Free Software Foundation
21 gives permission to link the code of its release of Wget with the
22 OpenSSL project's "OpenSSL" library (or with modified versions of it
23 that use the same license as the "OpenSSL" library), and distribute
24 the linked executables. You must obey the GNU General Public License
25 in all respects for all of the code used other than "OpenSSL". If you
26 modify this file, you may extend this exception to your version of the
27 file, but you are not obligated to do so. If you do not wish to do
28 so, delete this exception statement from your version. */
38 #endif /* HAVE_STRING_H */
41 #endif /* HAVE_UNISTD_H */
44 #include <sys/types.h>
61 extern char *version_string;
63 static struct hash_table *dl_file_url_map;
64 static struct hash_table *dl_url_file_map;
66 /* List of HTML files downloaded in this Wget run, used for link
67 conversion after Wget is done. The list and the set contain the
68 same information, except the list maintains the order. Perhaps I
69 should get rid of the list, it's there for historical reasons. */
70 static slist *downloaded_html_list;
71 static struct hash_table *downloaded_html_set;
73 static void register_delete_file PARAMS ((const char *));
75 /* Functions for maintaining the URL queue. */
77 struct queue_element {
81 struct queue_element *next;
85 struct queue_element *head;
86 struct queue_element *tail;
90 /* Create a URL queue. */
92 static struct url_queue *
95 struct url_queue *queue = xmalloc (sizeof (*queue));
96 memset (queue, '\0', sizeof (*queue));
100 /* Delete a URL queue. */
103 url_queue_delete (struct url_queue *queue)
108 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
109 retrieved ("dequeued") from the queue in the order they were placed
113 url_enqueue (struct url_queue *queue,
114 const char *url, const char *referer, int depth)
116 struct queue_element *qel = xmalloc (sizeof (*qel));
118 qel->referer = referer;
123 if (queue->count > queue->maxcount)
124 queue->maxcount = queue->count;
126 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
127 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
130 queue->tail->next = qel;
134 queue->head = queue->tail;
137 /* Take a URL out of the queue. Return 1 if this operation succeeded,
138 or 0 if the queue is empty. */
141 url_dequeue (struct url_queue *queue,
142 const char **url, const char **referer, int *depth)
144 struct queue_element *qel = queue->head;
149 queue->head = queue->head->next;
154 *referer = qel->referer;
159 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
160 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
166 static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
167 struct url *, struct hash_table *));
168 static int descend_redirect_p PARAMS ((const char *, const char *, int,
169 struct url *, struct hash_table *));
172 /* Retrieve a part of the web beginning with START_URL. This used to
173 be called "recursive retrieval", because the old function was
174 recursive and implemented depth-first search. retrieve_tree on the
175 other hand implements breadth-search traversal of the tree, which
176 results in much nicer ordering of downloads.
178 The algorithm this function uses is simple:
180 1. put START_URL in the queue.
181 2. while there are URLs in the queue:
183 3. get next URL from the queue.
185 5. if the URL is HTML and its depth does not exceed maximum depth,
186 get the list of URLs embedded therein.
187 6. for each of those URLs do the following:
189 7. if the URL is not one of those downloaded before, and if it
190 satisfies the criteria specified by the various command-line
191 options, add it to the queue. */
194 retrieve_tree (const char *start_url)
196 uerr_t status = RETROK;
198 /* The queue of URLs we need to load. */
199 struct url_queue *queue;
201 /* The URLs we do not wish to enqueue, because they are already in
202 the queue, but haven't been downloaded yet. */
203 struct hash_table *blacklist;
206 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
208 if (!start_url_parsed)
210 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
211 url_error (up_error_code));
215 queue = url_queue_new ();
216 blacklist = make_string_hash_table (0);
218 /* Enqueue the starting URL. Use start_url_parsed->url rather than
219 just URL so we enqueue the canonical form of the URL. */
220 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0);
221 string_set_add (blacklist, start_url_parsed->url);
226 char *url, *referer, *file = NULL;
228 boolean dash_p_leaf_HTML = FALSE;
230 if (downloaded_exceeds_quota ())
232 if (status == FWRITEERR)
235 /* Get the next URL from the queue... */
237 if (!url_dequeue (queue,
238 (const char **)&url, (const char **)&referer,
242 /* ...and download it. Note that this download is in most cases
243 unconditional, as download_child_p already makes sure a file
244 doesn't get enqueued twice -- and yet this check is here, and
245 not in download_child_p. This is so that if you run `wget -r
246 URL1 URL2', and a random URL is encountered once under URL1
247 and again under URL2, but at a different (possibly smaller)
248 depth, we want the URL's children to be taken into account
250 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
252 file = xstrdup (hash_table_get (dl_url_file_map, url));
254 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
257 if (downloaded_html_set
258 && string_set_contains (downloaded_html_set, file))
264 char *redirected = NULL;
265 int oldrec = opt.recursive;
268 status = retrieve_url (url, &file, &redirected, referer, &dt);
269 opt.recursive = oldrec;
271 if (file && status == RETROK
272 && (dt & RETROKF) && (dt & TEXTHTML))
277 /* We have been redirected, possibly to another host, or
278 different path, or wherever. Check whether we really
279 want to follow it. */
282 if (!descend_redirect_p (redirected, url, depth,
283 start_url_parsed, blacklist))
286 /* Make sure that the old pre-redirect form gets
288 string_set_add (blacklist, url);
297 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
299 if (opt.page_requisites
300 && (depth == opt.reclevel || depth == opt.reclevel + 1))
302 /* When -p is specified, we are allowed to exceed the
303 maximum depth, but only for the "inline" links,
304 i.e. those that are needed to display the page.
305 Originally this could exceed the depth at most by
306 one, but we allow one more level so that the leaf
307 pages that contain frames can be loaded
309 dash_p_leaf_HTML = TRUE;
313 /* Either -p wasn't specified or it was and we've
314 already spent the two extra (pseudo-)levels that it
315 affords us, so we need to bail out. */
316 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
317 depth, opt.reclevel));
322 /* If the downloaded document was HTML, parse it and enqueue the
323 links it contains. */
327 int meta_disallow_follow = 0;
328 struct urlpos *children
329 = get_urls_html (file, url, &meta_disallow_follow);
331 if (opt.use_robots && meta_disallow_follow)
333 free_urlpos (children);
339 struct urlpos *child = children;
340 struct url *url_parsed = url_parsed = url_parse (url, NULL);
341 assert (url_parsed != NULL);
343 for (; child; child = child->next)
345 if (child->ignore_when_downloading)
347 if (dash_p_leaf_HTML && !child->link_inline_p)
349 if (download_child_p (child, url_parsed, depth, start_url_parsed,
352 url_enqueue (queue, xstrdup (child->url->url),
353 xstrdup (url), depth + 1);
354 /* We blacklist the URL we have enqueued, because we
355 don't want to enqueue (and hence download) the
357 string_set_add (blacklist, child->url->url);
361 url_free (url_parsed);
362 free_urlpos (children);
366 if (opt.delete_after || (file && !acceptable (file)))
368 /* Either --delete-after was specified, or we loaded this
369 otherwise rejected (e.g. by -R) HTML file just so we
370 could harvest its hyperlinks -- in either case, delete
372 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
373 opt.delete_after ? "--delete-after" :
374 "recursive rejection criteria"));
375 logprintf (LOG_VERBOSE,
377 ? _("Removing %s.\n")
378 : _("Removing %s since it should be rejected.\n")),
381 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
382 register_delete_file (file);
386 FREE_MAYBE (referer);
390 /* If anything is left of the queue due to a premature exit, free it
395 while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3))
401 url_queue_delete (queue);
403 if (start_url_parsed)
404 url_free (start_url_parsed);
405 string_set_free (blacklist);
407 if (downloaded_exceeds_quota ())
409 else if (status == FWRITEERR)
415 /* Based on the context provided by retrieve_tree, decide whether a
416 URL is to be descended to. This is only ever called from
417 retrieve_tree, but is in a separate function for clarity.
419 The most expensive checks (such as those for robots) are memoized
420 by storing these URLs to BLACKLIST. This may or may not help. It
421 will help if those URLs are encountered many times. */
424 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
425 struct url *start_url_parsed, struct hash_table *blacklist)
427 struct url *u = upos->url;
428 const char *url = u->url;
429 int u_scheme_like_http;
431 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
433 if (string_set_contains (blacklist, url))
435 DEBUGP (("Already on the black list.\n"));
439 /* Several things to check for:
440 1. if scheme is not http, and we don't load it
441 2. check for relative links (if relative_only is set)
443 4. check for no-parent
444 5. check for excludes && includes
446 7. check for same host (if spanhost is unset), with possible
447 gethostbyname baggage
448 8. check for robots.txt
450 Addendum: If the URL is FTP, and it is to be loaded, only the
451 domain and suffix settings are "stronger".
453 Note that .html files will get loaded regardless of suffix rules
454 (but that is remedied later with unlink) unless the depth equals
457 More time- and memory- consuming tests should be put later on
460 /* Determine whether URL under consideration has a HTTP-like scheme. */
461 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
463 /* 1. Schemes other than HTTP are normally not recursed into. */
464 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
466 DEBUGP (("Not following non-HTTP schemes.\n"));
470 /* 2. If it is an absolute link and they are not followed, throw it
472 if (u_scheme_like_http)
473 if (opt.relative_only && !upos->link_relative_p)
475 DEBUGP (("It doesn't really look like a relative link.\n"));
479 /* 3. If its domain is not to be accepted/looked-up, chuck it
481 if (!accept_domain (u))
483 DEBUGP (("The domain was not accepted.\n"));
487 /* 4. Check for parent directory.
489 If we descended to a different host or changed the scheme, ignore
490 opt.no_parent. Also ignore it for documents needed to display
491 the parent page when in -p mode. */
493 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
494 && 0 == strcasecmp (u->host, start_url_parsed->host)
495 && u->port == start_url_parsed->port
496 && !(opt.page_requisites && upos->link_inline_p))
498 if (!frontcmp (start_url_parsed->dir, u->dir))
500 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
501 u->dir, start_url_parsed->dir));
506 /* 5. If the file does not match the acceptance list, or is on the
507 rejection list, chuck it out. The same goes for the directory
508 exclusion and inclusion lists. */
509 if (opt.includes || opt.excludes)
511 if (!accdir (u->dir, ALLABS))
513 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
518 /* 6. Check for acceptance/rejection rules. We ignore these rules
519 for directories (no file name to match) and for HTML documents,
520 which might lead to other files that do need to be downloaded.
521 That is, unless we've exhausted the recursion depth anyway. */
522 if (u->file[0] != '\0'
523 && !(has_html_suffix_p (u->file)
524 && depth != INFINITE_RECURSION
525 && depth < opt.reclevel - 1))
527 if (!acceptable (u->file))
529 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
536 if (schemes_are_similar_p (u->scheme, parent->scheme))
537 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
539 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
540 u->host, parent->host));
545 if (opt.use_robots && u_scheme_like_http)
547 struct robot_specs *specs = res_get_specs (u->host, u->port);
551 if (res_retrieve_file (url, &rfile))
553 specs = res_parse_from_file (rfile);
558 /* If we cannot get real specs, at least produce
559 dummy ones so that we can register them and stop
560 trying to retrieve them. */
561 specs = res_parse ("", 0);
563 res_register_specs (u->host, u->port, specs);
566 /* Now that we have (or don't have) robots.txt specs, we can
567 check what they say. */
568 if (!res_match_path (specs, u->path))
570 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
571 string_set_add (blacklist, url);
576 /* The URL has passed all the tests. It can be placed in the
578 DEBUGP (("Decided to load it.\n"));
583 DEBUGP (("Decided NOT to load it.\n"));
588 /* This function determines whether we will consider downloading the
589 children of a URL whose download resulted in a redirection,
590 possibly to another host, etc. It is needed very rarely, and thus
591 it is merely a simple-minded wrapper around download_child_p. */
594 descend_redirect_p (const char *redirected, const char *original, int depth,
595 struct url *start_url_parsed, struct hash_table *blacklist)
597 struct url *orig_parsed, *new_parsed;
601 orig_parsed = url_parse (original, NULL);
602 assert (orig_parsed != NULL);
604 new_parsed = url_parse (redirected, NULL);
605 assert (new_parsed != NULL);
607 upos = xmalloc (sizeof (struct urlpos));
608 memset (upos, 0, sizeof (*upos));
609 upos->url = new_parsed;
611 success = download_child_p (upos, orig_parsed, depth,
612 start_url_parsed, blacklist);
614 url_free (orig_parsed);
615 url_free (new_parsed);
619 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
625 #define ENSURE_TABLES_EXIST do { \
626 if (!dl_file_url_map) \
627 dl_file_url_map = make_string_hash_table (0); \
628 if (!dl_url_file_map) \
629 dl_url_file_map = make_string_hash_table (0); \
632 /* Return 1 if S1 and S2 are the same, except for "/index.html". The
633 three cases in which it returns one are (substitute any substring
636 m("foo/index.html", "foo/") ==> 1
637 m("foo/", "foo/index.html") ==> 1
638 m("foo", "foo/index.html") ==> 1
639 m("foo", "foo/" ==> 1
640 m("foo", "foo") ==> 1 */
643 match_except_index (const char *s1, const char *s2)
648 /* Skip common substring. */
649 for (i = 0; *s1 && *s2 && *s1 == *s2; s1++, s2++, i++)
652 /* Strings differ at the very beginning -- bail out. We need to
653 check this explicitly to avoid `lng - 1' reading outside the
658 /* Both strings hit EOF -- strings are equal. */
661 /* Strings are randomly different, e.g. "/foo/bar" and "/foo/qux". */
664 /* S1 is the longer one. */
667 /* S2 is the longer one. */
671 /* foo/index.html */ /* or */ /* foo/index.html */
675 /* The right-hand case. */
678 if (*lng == '/' && *(lng + 1) == '\0')
683 return 0 == strcmp (lng, "/index.html");
687 dissociate_urls_from_file_mapper (void *key, void *value, void *arg)
689 char *mapping_url = (char *)key;
690 char *mapping_file = (char *)value;
691 char *file = (char *)arg;
693 if (0 == strcmp (mapping_file, file))
695 hash_table_remove (dl_url_file_map, mapping_url);
697 xfree (mapping_file);
700 /* Continue mapping. */
704 /* Remove all associations from various URLs to FILE from dl_url_file_map. */
707 dissociate_urls_from_file (const char *file)
709 hash_table_map (dl_url_file_map, dissociate_urls_from_file_mapper,
713 /* Register that URL has been successfully downloaded to FILE. This
714 is used by the link conversion code to convert references to URLs
715 to references to local files. It is also being used to check if a
716 URL has already been downloaded. */
719 register_download (const char *url, const char *file)
721 char *old_file, *old_url;
725 /* With some forms of retrieval, it is possible, although not likely
726 or particularly desirable. If both are downloaded, the second
727 download will override the first one. When that happens,
728 dissociate the old file name from the URL. */
730 if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
732 if (0 == strcmp (url, old_url))
733 /* We have somehow managed to download the same URL twice.
737 if (match_except_index (url, old_url)
738 && !hash_table_contains (dl_url_file_map, url))
739 /* The two URLs differ only in the "index.html" ending. For
740 example, one is "http://www.server.com/", and the other is
741 "http://www.server.com/index.html". Don't remove the old
742 one, just add the new one as a non-canonical entry. */
745 hash_table_remove (dl_file_url_map, file);
749 /* Remove all the URLs that point to this file. Yes, there can
750 be more than one such URL, because we store redirections as
751 multiple entries in dl_url_file_map. For example, if URL1
752 redirects to URL2 which gets downloaded to FILE, we map both
753 URL1 and URL2 to FILE in dl_url_file_map. (dl_file_url_map
754 only points to URL2.) When another URL gets loaded to FILE,
755 we want both URL1 and URL2 dissociated from it.
757 This is a relatively expensive operation because it performs
758 a linear search of the whole hash table, but it should be
759 called very rarely, only when two URLs resolve to the same
760 file name, *and* the "<file>.1" extensions are turned off.
761 In other words, almost never. */
762 dissociate_urls_from_file (file);
765 hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
768 /* A URL->FILE mapping is not possible without a FILE->URL mapping.
769 If the latter were present, it should have been removed by the
770 above `if'. So we could write:
772 assert (!hash_table_contains (dl_url_file_map, url));
774 The above is correct when running in recursive mode where the
775 same URL always resolves to the same file. But if you do
780 then the first URL will resolve to "FILE", and the other to
781 "FILE.1". In that case, FILE.1 will not be found in
782 dl_file_url_map, but URL will still point to FILE in
784 if (hash_table_get_pair (dl_url_file_map, url, &old_url, &old_file))
786 hash_table_remove (dl_url_file_map, url);
791 hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
794 /* Register that FROM has been redirected to TO. This assumes that TO
795 is successfully downloaded and already registered using
796 register_download() above. */
799 register_redirection (const char *from, const char *to)
805 file = hash_table_get (dl_url_file_map, to);
806 assert (file != NULL);
807 if (!hash_table_contains (dl_url_file_map, from))
808 hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file));
811 /* Register that the file has been deleted. */
814 register_delete_file (const char *file)
816 char *old_url, *old_file;
820 if (!hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
823 hash_table_remove (dl_file_url_map, file);
826 dissociate_urls_from_file (file);
829 /* Register that FILE is an HTML file that has been downloaded. */
832 register_html (const char *url, const char *file)
834 if (!downloaded_html_set)
835 downloaded_html_set = make_string_hash_table (0);
836 else if (hash_table_contains (downloaded_html_set, file))
839 /* The set and the list should use the same copy of FILE, but the
840 slist interface insists on strduping the string it gets. Oh
842 string_set_add (downloaded_html_set, file);
843 downloaded_html_list = slist_prepend (downloaded_html_list, file);
846 /* This function is called when the retrieval is done to convert the
847 links that have been downloaded. It has to be called at the end of
848 the retrieval, because only then does Wget know conclusively which
849 URLs have been downloaded, and which not, so it can tell which
850 direction to convert to.
852 The "direction" means that the URLs to the files that have been
853 downloaded get converted to the relative URL which will point to
854 that file. And the other URLs get converted to the remote URL on
857 All the downloaded HTMLs are kept in downloaded_html_files, and
858 downloaded URLs in urls_downloaded. All the information is
859 extracted from these two lists. */
862 convert_all_links (void)
868 struct wget_timer *timer = wtimer_new ();
870 /* Destructively reverse downloaded_html_files to get it in the right order.
871 recursive_retrieve() used slist_prepend() consistently. */
872 downloaded_html_list = slist_nreverse (downloaded_html_list);
874 for (html = downloaded_html_list; html; html = html->next)
876 struct urlpos *urls, *cur_url;
878 char *file = html->string;
880 /* Determine the URL of the HTML file. get_urls_html will need
882 url = hash_table_get (dl_file_url_map, file);
885 DEBUGP (("Apparently %s has been removed.\n", file));
889 DEBUGP (("Scanning %s (from %s)\n", file, url));
891 /* Parse the HTML file... */
892 urls = get_urls_html (file, url, NULL);
894 /* We don't respect meta_disallow_follow here because, even if
895 the file is not followed, we might still want to convert the
896 links that have been followed from other files. */
898 for (cur_url = urls; cur_url; cur_url = cur_url->next)
901 struct url *u = cur_url->url;
903 if (cur_url->link_base_p)
905 /* Base references have been resolved by our parser, so
906 we turn the base URL into an empty string. (Perhaps
907 we should remove the tag entirely?) */
908 cur_url->convert = CO_NULLIFY_BASE;
912 /* We decide the direction of conversion according to whether
913 a URL was downloaded. Downloaded URLs will be converted
914 ABS2REL, whereas non-downloaded will be converted REL2ABS. */
915 local_name = hash_table_get (dl_url_file_map, u->url);
917 /* Decide on the conversion type. */
920 /* We've downloaded this URL. Convert it to relative
921 form. We do this even if the URL already is in
922 relative form, because our directory structure may
923 not be identical to that on the server (think `-nd',
924 `--cut-dirs', etc.) */
925 cur_url->convert = CO_CONVERT_TO_RELATIVE;
926 cur_url->local_name = xstrdup (local_name);
927 DEBUGP (("will convert url %s to local %s\n", u->url, local_name));
931 /* We haven't downloaded this URL. If it's not already
932 complete (including a full host name), convert it to
933 that form, so it can be reached while browsing this
935 if (!cur_url->link_complete_p)
936 cur_url->convert = CO_CONVERT_TO_COMPLETE;
937 cur_url->local_name = NULL;
938 DEBUGP (("will convert url %s to complete\n", u->url));
942 /* Convert the links in the file. */
943 convert_links (file, urls);
950 msecs = wtimer_elapsed (timer);
951 wtimer_delete (timer);
952 logprintf (LOG_VERBOSE, _("Converted %d files in %.2f seconds.\n"),
953 file_count, (double)msecs / 1000);
956 /* Cleanup the data structures associated with recursive retrieving
957 (the variables above). */
959 recursive_cleanup (void)
963 free_keys_and_values (dl_file_url_map);
964 hash_table_destroy (dl_file_url_map);
965 dl_file_url_map = NULL;
969 free_keys_and_values (dl_url_file_map);
970 hash_table_destroy (dl_url_file_map);
971 dl_url_file_map = NULL;
973 if (downloaded_html_set)
974 string_set_free (downloaded_html_set);
975 slist_free (downloaded_html_list);
976 downloaded_html_list = NULL;