1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
28 #endif /* HAVE_STRING_H */
31 #endif /* HAVE_UNISTD_H */
34 #include <sys/types.h>
51 extern char *version_string;
53 static struct hash_table *dl_file_url_map;
54 static struct hash_table *dl_url_file_map;
56 /* List of HTML files downloaded in this Wget run, used for link
57 conversion after Wget is done. The list and the set contain the
58 same information, except the list maintains the order. Perhaps I
59 should get rid of the list, it's there for historical reasons. */
60 static slist *downloaded_html_list;
61 static struct hash_table *downloaded_html_set;
63 static void register_delete_file PARAMS ((const char *));
65 /* Functions for maintaining the URL queue. */
67 struct queue_element {
71 struct queue_element *next;
75 struct queue_element *head;
76 struct queue_element *tail;
80 /* Create a URL queue. */
82 static struct url_queue *
85 struct url_queue *queue = xmalloc (sizeof (*queue));
86 memset (queue, '\0', sizeof (*queue));
90 /* Delete a URL queue. */
93 url_queue_delete (struct url_queue *queue)
98 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
99 retrieved ("dequeued") from the queue in the order they were placed
103 url_enqueue (struct url_queue *queue,
104 const char *url, const char *referer, int depth)
106 struct queue_element *qel = xmalloc (sizeof (*qel));
108 qel->referer = referer;
113 if (queue->count > queue->maxcount)
114 queue->maxcount = queue->count;
116 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
117 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
120 queue->tail->next = qel;
124 queue->head = queue->tail;
127 /* Take a URL out of the queue. Return 1 if this operation succeeded,
128 or 0 if the queue is empty. */
131 url_dequeue (struct url_queue *queue,
132 const char **url, const char **referer, int *depth)
134 struct queue_element *qel = queue->head;
139 queue->head = queue->head->next;
144 *referer = qel->referer;
149 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
150 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
156 static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
157 struct url *, struct hash_table *));
158 static int descend_redirect_p PARAMS ((const char *, const char *, int,
159 struct url *, struct hash_table *));
162 /* Retrieve a part of the web beginning with START_URL. This used to
163 be called "recursive retrieval", because the old function was
164 recursive and implemented depth-first search. retrieve_tree on the
165 other hand implements breadth-search traversal of the tree, which
166 results in much nicer ordering of downloads.
168 The algorithm this function uses is simple:
170 1. put START_URL in the queue.
171 2. while there are URLs in the queue:
173 3. get next URL from the queue.
175 5. if the URL is HTML and its depth does not exceed maximum depth,
176 get the list of URLs embedded therein.
177 6. for each of those URLs do the following:
179 7. if the URL is not one of those downloaded before, and if it
180 satisfies the criteria specified by the various command-line
181 options, add it to the queue. */
184 retrieve_tree (const char *start_url)
186 uerr_t status = RETROK;
188 /* The queue of URLs we need to load. */
189 struct url_queue *queue;
191 /* The URLs we do not wish to enqueue, because they are already in
192 the queue, but haven't been downloaded yet. */
193 struct hash_table *blacklist;
196 struct url *start_url_parsed = url_parse (start_url, &up_error_code);
198 if (!start_url_parsed)
200 logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
201 url_error (up_error_code));
205 queue = url_queue_new ();
206 blacklist = make_string_hash_table (0);
208 /* Enqueue the starting URL. Use start_url_parsed->url rather than
209 just URL so we enqueue the canonical form of the URL. */
210 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0);
211 string_set_add (blacklist, start_url_parsed->url);
216 char *url, *referer, *file = NULL;
218 boolean dash_p_leaf_HTML = FALSE;
220 if (downloaded_exceeds_quota ())
222 if (status == FWRITEERR)
225 /* Get the next URL from the queue... */
227 if (!url_dequeue (queue,
228 (const char **)&url, (const char **)&referer,
232 /* ...and download it. Note that this download is in most cases
233 unconditional, as download_child_p already makes sure a file
234 doesn't get enqueued twice -- and yet this check is here, and
235 not in download_child_p. This is so that if you run `wget -r
236 URL1 URL2', and a random URL is encountered once under URL1
237 and again under URL2, but at a different (possibly smaller)
238 depth, we want the URL's children to be taken into account
240 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
242 file = xstrdup (hash_table_get (dl_url_file_map, url));
244 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
247 if (string_set_contains (downloaded_html_set, file))
253 char *redirected = NULL;
254 int oldrec = opt.recursive;
257 status = retrieve_url (url, &file, &redirected, referer, &dt);
258 opt.recursive = oldrec;
260 if (file && status == RETROK
261 && (dt & RETROKF) && (dt & TEXTHTML))
266 /* We have been redirected, possibly to another host, or
267 different path, or wherever. Check whether we really
268 want to follow it. */
271 if (!descend_redirect_p (redirected, url, depth,
272 start_url_parsed, blacklist))
275 /* Make sure that the old pre-redirect form gets
277 string_set_add (blacklist, url);
286 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
288 if (opt.page_requisites
289 && (depth == opt.reclevel || depth == opt.reclevel + 1))
291 /* When -p is specified, we are allowed to exceed the
292 maximum depth, but only for the "inline" links,
293 i.e. those that are needed to display the page.
294 Originally this could exceed the depth at most by
295 one, but we allow one more level so that the leaf
296 pages that contain frames can be loaded
298 dash_p_leaf_HTML = TRUE;
302 /* Either -p wasn't specified or it was and we've
303 already spent the two extra (pseudo-)levels that it
304 affords us, so we need to bail out. */
305 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
306 depth, opt.reclevel));
311 /* If the downloaded document was HTML, parse it and enqueue the
312 links it contains. */
316 int meta_disallow_follow = 0;
317 struct urlpos *children
318 = get_urls_html (file, url, &meta_disallow_follow);
320 if (opt.use_robots && meta_disallow_follow)
322 free_urlpos (children);
328 struct urlpos *child = children;
329 struct url *url_parsed = url_parsed = url_parse (url, NULL);
330 assert (url_parsed != NULL);
332 for (; child; child = child->next)
334 if (child->ignore_when_downloading)
336 if (dash_p_leaf_HTML && !child->link_inline_p)
338 if (download_child_p (child, url_parsed, depth, start_url_parsed,
341 url_enqueue (queue, xstrdup (child->url->url),
342 xstrdup (url), depth + 1);
343 /* We blacklist the URL we have enqueued, because we
344 don't want to enqueue (and hence download) the
346 string_set_add (blacklist, child->url->url);
350 url_free (url_parsed);
351 free_urlpos (children);
355 if (opt.delete_after || (file && !acceptable (file)))
357 /* Either --delete-after was specified, or we loaded this
358 otherwise rejected (e.g. by -R) HTML file just so we
359 could harvest its hyperlinks -- in either case, delete
361 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
362 opt.delete_after ? "--delete-after" :
363 "recursive rejection criteria"));
364 logprintf (LOG_VERBOSE,
366 ? _("Removing %s.\n")
367 : _("Removing %s since it should be rejected.\n")),
370 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
371 register_delete_file (file);
375 FREE_MAYBE (referer);
379 /* If anything is left of the queue due to a premature exit, free it
384 while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3))
390 url_queue_delete (queue);
392 if (start_url_parsed)
393 url_free (start_url_parsed);
394 string_set_free (blacklist);
396 if (downloaded_exceeds_quota ())
398 else if (status == FWRITEERR)
404 /* Based on the context provided by retrieve_tree, decide whether a
405 URL is to be descended to. This is only ever called from
406 retrieve_tree, but is in a separate function for clarity.
408 The most expensive checks (such as those for robots) are memoized
409 by storing these URLs to BLACKLIST. This may or may not help. It
410 will help if those URLs are encountered many times. */
413 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
414 struct url *start_url_parsed, struct hash_table *blacklist)
416 struct url *u = upos->url;
417 const char *url = u->url;
419 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
421 if (string_set_contains (blacklist, url))
423 DEBUGP (("Already on the black list.\n"));
427 /* Several things to check for:
428 1. if scheme is not http, and we don't load it
429 2. check for relative links (if relative_only is set)
431 4. check for no-parent
432 5. check for excludes && includes
434 7. check for same host (if spanhost is unset), with possible
435 gethostbyname baggage
436 8. check for robots.txt
438 Addendum: If the URL is FTP, and it is to be loaded, only the
439 domain and suffix settings are "stronger".
441 Note that .html files will get loaded regardless of suffix rules
442 (but that is remedied later with unlink) unless the depth equals
445 More time- and memory- consuming tests should be put later on
448 /* 1. Schemes other than HTTP are normally not recursed into. */
449 if (u->scheme != SCHEME_HTTP
451 && u->scheme != SCHEME_HTTPS
453 && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
455 DEBUGP (("Not following non-HTTP schemes.\n"));
459 /* 2. If it is an absolute link and they are not followed, throw it
461 if (u->scheme == SCHEME_HTTP
463 || u->scheme == SCHEME_HTTPS
466 if (opt.relative_only && !upos->link_relative_p)
468 DEBUGP (("It doesn't really look like a relative link.\n"));
472 /* 3. If its domain is not to be accepted/looked-up, chuck it
474 if (!accept_domain (u))
476 DEBUGP (("The domain was not accepted.\n"));
480 /* 4. Check for parent directory.
482 If we descended to a different host or changed the scheme, ignore
483 opt.no_parent. Also ignore it for documents needed to display
484 the parent page when in -p mode. */
486 && u->scheme == start_url_parsed->scheme
487 && 0 == strcasecmp (u->host, start_url_parsed->host)
488 && u->port == start_url_parsed->port
489 && !(opt.page_requisites && upos->link_inline_p))
491 if (!frontcmp (start_url_parsed->dir, u->dir))
493 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
494 u->dir, start_url_parsed->dir));
499 /* 5. If the file does not match the acceptance list, or is on the
500 rejection list, chuck it out. The same goes for the directory
501 exclusion and inclusion lists. */
502 if (opt.includes || opt.excludes)
504 if (!accdir (u->dir, ALLABS))
506 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
513 /* Check for acceptance/rejection rules. We ignore these rules
514 for directories (no file name to match) and for HTML documents,
515 which might lead to other files that do need to be downloaded.
516 That is, unless we've exhausted the recursion depth anyway. */
517 if (u->file[0] != '\0'
518 && !(has_html_suffix_p (u->file)
519 && depth < opt.reclevel - 1
520 && depth != INFINITE_RECURSION))
522 if (!acceptable (u->file))
524 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
532 if (u->scheme == parent->scheme)
533 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
535 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
536 u->host, parent->host));
541 if (opt.use_robots && (u->scheme == SCHEME_HTTP
543 || u->scheme == SCHEME_HTTPS
548 struct robot_specs *specs = res_get_specs (u->host, u->port);
552 if (res_retrieve_file (url, &rfile))
554 specs = res_parse_from_file (rfile);
559 /* If we cannot get real specs, at least produce
560 dummy ones so that we can register them and stop
561 trying to retrieve them. */
562 specs = res_parse ("", 0);
564 res_register_specs (u->host, u->port, specs);
567 /* Now that we have (or don't have) robots.txt specs, we can
568 check what they say. */
569 if (!res_match_path (specs, u->path))
571 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
572 string_set_add (blacklist, url);
577 /* The URL has passed all the tests. It can be placed in the
579 DEBUGP (("Decided to load it.\n"));
584 DEBUGP (("Decided NOT to load it.\n"));
589 /* This function determines whether we will consider downloading the
590 children of a URL whose download resulted in a redirection,
591 possibly to another host, etc. It is needed very rarely, and thus
592 it is merely a simple-minded wrapper around download_child_p. */
595 descend_redirect_p (const char *redirected, const char *original, int depth,
596 struct url *start_url_parsed, struct hash_table *blacklist)
598 struct url *orig_parsed, *new_parsed;
602 orig_parsed = url_parse (original, NULL);
603 assert (orig_parsed != NULL);
605 new_parsed = url_parse (redirected, NULL);
606 assert (new_parsed != NULL);
608 upos = xmalloc (sizeof (struct urlpos));
609 memset (upos, 0, sizeof (*upos));
610 upos->url = new_parsed;
612 success = download_child_p (upos, orig_parsed, depth,
613 start_url_parsed, blacklist);
615 url_free (orig_parsed);
616 url_free (new_parsed);
620 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
626 #define ENSURE_TABLES_EXIST do { \
627 if (!dl_file_url_map) \
628 dl_file_url_map = make_string_hash_table (0); \
629 if (!dl_url_file_map) \
630 dl_url_file_map = make_string_hash_table (0); \
633 /* Return 1 if S1 and S2 are the same, except for "/index.html". The
634 three cases in which it returns one are (substitute any substring
637 m("foo/index.html", "foo/") ==> 1
638 m("foo/", "foo/index.html") ==> 1
639 m("foo", "foo/index.html") ==> 1
640 m("foo", "foo/" ==> 1
641 m("foo", "foo") ==> 1 */
644 match_except_index (const char *s1, const char *s2)
649 /* Skip common substring. */
650 for (i = 0; *s1 && *s2 && *s1 == *s2; s1++, s2++, i++)
653 /* Strings differ at the very beginning -- bail out. We need to
654 check this explicitly to avoid `lng - 1' reading outside the
659 /* Both strings hit EOF -- strings are equal. */
662 /* Strings are randomly different, e.g. "/foo/bar" and "/foo/qux". */
665 /* S1 is the longer one. */
668 /* S2 is the longer one. */
672 /* foo/index.html */ /* or */ /* foo/index.html */
676 /* The right-hand case. */
679 if (*lng == '/' && *(lng + 1) == '\0')
684 return 0 == strcmp (lng, "/index.html");
688 dissociate_urls_from_file_mapper (void *key, void *value, void *arg)
690 char *mapping_url = (char *)key;
691 char *mapping_file = (char *)value;
692 char *file = (char *)arg;
694 if (0 == strcmp (mapping_file, file))
696 hash_table_remove (dl_url_file_map, mapping_url);
698 xfree (mapping_file);
701 /* Continue mapping. */
705 /* Remove all associations from various URLs to FILE from dl_url_file_map. */
708 dissociate_urls_from_file (const char *file)
710 hash_table_map (dl_url_file_map, dissociate_urls_from_file_mapper,
714 /* Register that URL has been successfully downloaded to FILE. This
715 is used by the link conversion code to convert references to URLs
716 to references to local files. It is also being used to check if a
717 URL has already been downloaded. */
720 register_download (const char *url, const char *file)
722 char *old_file, *old_url;
726 /* With some forms of retrieval, it is possible, although not likely
727 or particularly desirable. If both are downloaded, the second
728 download will override the first one. When that happens,
729 dissociate the old file name from the URL. */
731 if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
733 if (0 == strcmp (url, old_url))
734 /* We have somehow managed to download the same URL twice.
738 if (match_except_index (url, old_url)
739 && !hash_table_contains (dl_url_file_map, url))
740 /* The two URLs differ only in the "index.html" ending. For
741 example, one is "http://www.server.com/", and the other is
742 "http://www.server.com/index.html". Don't remove the old
743 one, just add the new one as a non-canonical entry. */
746 hash_table_remove (dl_file_url_map, file);
750 /* Remove all the URLs that point to this file. Yes, there can
751 be more than one such URL, because we store redirections as
752 multiple entries in dl_url_file_map. For example, if URL1
753 redirects to URL2 which gets downloaded to FILE, we map both
754 URL1 and URL2 to FILE in dl_url_file_map. (dl_file_url_map
755 only points to URL2.) When another URL gets loaded to FILE,
756 we want both URL1 and URL2 dissociated from it.
758 This is a relatively expensive operation because it performs
759 a linear search of the whole hash table, but it should be
760 called very rarely, only when two URLs resolve to the same
761 file name, *and* the "<file>.1" extensions are turned off.
762 In other words, almost never. */
763 dissociate_urls_from_file (file);
766 hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
769 /* A URL->FILE mapping is not possible without a FILE->URL mapping.
770 If the latter were present, it should have been removed by the
771 above `if'. So we could write:
773 assert (!hash_table_contains (dl_url_file_map, url));
775 The above is correct when running in recursive mode where the
776 same URL always resolves to the same file. But if you do
781 then the first URL will resolve to "FILE", and the other to
782 "FILE.1". In that case, FILE.1 will not be found in
783 dl_file_url_map, but URL will still point to FILE in
785 if (hash_table_get_pair (dl_url_file_map, url, &old_url, &old_file))
787 hash_table_remove (dl_url_file_map, url);
792 hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
795 /* Register that FROM has been redirected to TO. This assumes that TO
796 is successfully downloaded and already registered using
797 register_download() above. */
800 register_redirection (const char *from, const char *to)
806 file = hash_table_get (dl_url_file_map, to);
807 assert (file != NULL);
808 if (!hash_table_contains (dl_url_file_map, from))
809 hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file));
812 /* Register that the file has been deleted. */
815 register_delete_file (const char *file)
817 char *old_url, *old_file;
821 if (!hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
824 hash_table_remove (dl_file_url_map, file);
827 dissociate_urls_from_file (file);
830 /* Register that FILE is an HTML file that has been downloaded. */
833 register_html (const char *url, const char *file)
835 if (!downloaded_html_set)
836 downloaded_html_set = make_string_hash_table (0);
837 else if (hash_table_contains (downloaded_html_set, file))
840 /* The set and the list should use the same copy of FILE, but the
841 slist interface insists on strduping the string it gets. Oh
843 string_set_add (downloaded_html_set, file);
844 downloaded_html_list = slist_prepend (downloaded_html_list, file);
847 /* This function is called when the retrieval is done to convert the
848 links that have been downloaded. It has to be called at the end of
849 the retrieval, because only then does Wget know conclusively which
850 URLs have been downloaded, and which not, so it can tell which
851 direction to convert to.
853 The "direction" means that the URLs to the files that have been
854 downloaded get converted to the relative URL which will point to
855 that file. And the other URLs get converted to the remote URL on
858 All the downloaded HTMLs are kept in downloaded_html_files, and
859 downloaded URLs in urls_downloaded. All the information is
860 extracted from these two lists. */
863 convert_all_links (void)
869 struct wget_timer *timer = wtimer_new ();
871 /* Destructively reverse downloaded_html_files to get it in the right order.
872 recursive_retrieve() used slist_prepend() consistently. */
873 downloaded_html_list = slist_nreverse (downloaded_html_list);
875 for (html = downloaded_html_list; html; html = html->next)
877 struct urlpos *urls, *cur_url;
879 char *file = html->string;
881 /* Determine the URL of the HTML file. get_urls_html will need
883 url = hash_table_get (dl_file_url_map, file);
886 DEBUGP (("Apparently %s has been removed.\n", file));
890 DEBUGP (("Scanning %s (from %s)\n", file, url));
892 /* Parse the HTML file... */
893 urls = get_urls_html (file, url, NULL);
895 /* We don't respect meta_disallow_follow here because, even if
896 the file is not followed, we might still want to convert the
897 links that have been followed from other files. */
899 for (cur_url = urls; cur_url; cur_url = cur_url->next)
902 struct url *u = cur_url->url;
904 if (cur_url->link_base_p)
906 /* Base references have been resolved by our parser, so
907 we turn the base URL into an empty string. (Perhaps
908 we should remove the tag entirely?) */
909 cur_url->convert = CO_NULLIFY_BASE;
913 /* We decide the direction of conversion according to whether
914 a URL was downloaded. Downloaded URLs will be converted
915 ABS2REL, whereas non-downloaded will be converted REL2ABS. */
916 local_name = hash_table_get (dl_url_file_map, u->url);
918 /* Decide on the conversion type. */
921 /* We've downloaded this URL. Convert it to relative
922 form. We do this even if the URL already is in
923 relative form, because our directory structure may
924 not be identical to that on the server (think `-nd',
925 `--cut-dirs', etc.) */
926 cur_url->convert = CO_CONVERT_TO_RELATIVE;
927 cur_url->local_name = xstrdup (local_name);
928 DEBUGP (("will convert url %s to local %s\n", u->url, local_name));
932 /* We haven't downloaded this URL. If it's not already
933 complete (including a full host name), convert it to
934 that form, so it can be reached while browsing this
936 if (!cur_url->link_complete_p)
937 cur_url->convert = CO_CONVERT_TO_COMPLETE;
938 cur_url->local_name = NULL;
939 DEBUGP (("will convert url %s to complete\n", u->url));
943 /* Convert the links in the file. */
944 convert_links (file, urls);
951 msecs = wtimer_elapsed (timer);
952 wtimer_delete (timer);
953 logprintf (LOG_VERBOSE, _("Converted %d files in %.2f seconds.\n"),
954 file_count, (double)msecs / 1000);
957 /* Cleanup the data structures associated with recursive retrieving
958 (the variables above). */
960 recursive_cleanup (void)
964 free_keys_and_values (dl_file_url_map);
965 hash_table_destroy (dl_file_url_map);
966 dl_file_url_map = NULL;
970 free_keys_and_values (dl_url_file_map);
971 hash_table_destroy (dl_url_file_map);
972 dl_url_file_map = NULL;
974 if (downloaded_html_set)
975 string_set_free (downloaded_html_set);
976 slist_free (downloaded_html_list);
977 downloaded_html_list = NULL;