1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
28 #endif /* HAVE_STRING_H */
31 #endif /* HAVE_UNISTD_H */
34 #include <sys/types.h>
51 extern char *version_string;
53 static struct hash_table *dl_file_url_map;
54 static struct hash_table *dl_url_file_map;
56 /* List of HTML files downloaded in this Wget run, used for link
57 conversion after Wget is done. The list and the set contain the
58 same information, except the list maintains the order. Perhaps I
59 should get rid of the list, it's there for historical reasons. */
60 static slist *downloaded_html_list;
61 static struct hash_table *downloaded_html_set;
63 static void register_delete_file PARAMS ((const char *));
65 /* Functions for maintaining the URL queue. */
67 struct queue_element {
71 struct queue_element *next;
75 struct queue_element *head;
76 struct queue_element *tail;
80 /* Create a URL queue. */
82 static struct url_queue *
85 struct url_queue *queue = xmalloc (sizeof (*queue));
86 memset (queue, '\0', sizeof (*queue));
90 /* Delete a URL queue. */
93 url_queue_delete (struct url_queue *queue)
98 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
99 retrieved ("dequeued") from the queue in the order they were placed
103 url_enqueue (struct url_queue *queue,
104 const char *url, const char *referer, int depth)
106 struct queue_element *qel = xmalloc (sizeof (*qel));
108 qel->referer = referer;
113 if (queue->count > queue->maxcount)
114 queue->maxcount = queue->count;
116 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
117 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
120 queue->tail->next = qel;
124 queue->head = queue->tail;
127 /* Take a URL out of the queue. Return 1 if this operation succeeded,
128 or 0 if the queue is empty. */
131 url_dequeue (struct url_queue *queue,
132 const char **url, const char **referer, int *depth)
134 struct queue_element *qel = queue->head;
139 queue->head = queue->head->next;
144 *referer = qel->referer;
149 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
150 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
156 static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
157 struct url *, struct hash_table *));
158 static int descend_redirect_p PARAMS ((const char *, const char *, int,
159 struct url *, struct hash_table *));
162 /* Retrieve a part of the web beginning with START_URL. This used to
163 be called "recursive retrieval", because the old function was
164 recursive and implemented depth-first search. retrieve_tree on the
165 other hand implements breadth-search traversal of the tree, which
166 results in much nicer ordering of downloads.
168 The algorithm this function uses is simple:
170 1. put START_URL in the queue.
171 2. while there are URLs in the queue:
173 3. get next URL from the queue.
175 5. if the URL is HTML and its depth does not exceed maximum depth,
176 get the list of URLs embedded therein.
177 6. for each of those URLs do the following:
179 7. if the URL is not one of those downloaded before, and if it
180 satisfies the criteria specified by the various command-line
181 options, add it to the queue. */
184 retrieve_tree (const char *start_url)
186 uerr_t status = RETROK;
188 /* The queue of URLs we need to load. */
189 struct url_queue *queue = url_queue_new ();
191 /* The URLs we do not wish to enqueue, because they are already in
192 the queue, but haven't been downloaded yet. */
193 struct hash_table *blacklist = make_string_hash_table (0);
195 /* We'll need various components of this, so better get it over with
197 struct url *start_url_parsed = url_parse (start_url, NULL);
199 url_enqueue (queue, xstrdup (start_url), NULL, 0);
200 string_set_add (blacklist, start_url);
205 char *url, *referer, *file = NULL;
207 boolean dash_p_leaf_HTML = FALSE;
209 if (downloaded_exceeds_quota ())
211 if (status == FWRITEERR)
214 /* Get the next URL from the queue... */
216 if (!url_dequeue (queue,
217 (const char **)&url, (const char **)&referer,
221 /* ...and download it. Note that this download is in most cases
222 unconditional, as download_child_p already makes sure a file
223 doesn't get enqueued twice -- and yet this check is here, and
224 not in download_child_p. This is so that if you run `wget -r
225 URL1 URL2', and a random URL is encountered once under URL1
226 and again under URL2, but at a different (possibly smaller)
227 depth, we want the URL's children to be taken into account
229 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
231 file = hash_table_get (dl_url_file_map, url);
233 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
236 /* #### This check might be horribly slow when downloading
237 sites with a huge number of HTML docs. Use a hash table
238 instead! Thankfully, it gets tripped only when you use
239 `wget -r URL1 URL2 ...', as explained above. */
241 if (string_set_contains (downloaded_html_set, file))
247 char *redirected = NULL;
248 int oldrec = opt.recursive;
251 status = retrieve_url (url, &file, &redirected, referer, &dt);
252 opt.recursive = oldrec;
254 if (file && status == RETROK
255 && (dt & RETROKF) && (dt & TEXTHTML))
260 /* We have been redirected, possibly to another host, or
261 different path, or wherever. Check whether we really
262 want to follow it. */
265 if (!descend_redirect_p (redirected, url, depth,
266 start_url_parsed, blacklist))
269 /* Make sure that the old pre-redirect form gets
271 string_set_add (blacklist, url);
280 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
282 if (opt.page_requisites
283 && (depth == opt.reclevel || depth == opt.reclevel + 1))
285 /* When -p is specified, we are allowed to exceed the
286 maximum depth, but only for the "inline" links,
287 i.e. those that are needed to display the page.
288 Originally this could exceed the depth at most by
289 one, but we allow one more level so that the leaf
290 pages that contain frames can be loaded
292 dash_p_leaf_HTML = TRUE;
296 /* Either -p wasn't specified or it was and we've
297 already spent the two extra (pseudo-)levels that it
298 affords us, so we need to bail out. */
299 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
300 depth, opt.reclevel));
305 /* If the downloaded document was HTML, parse it and enqueue the
306 links it contains. */
310 int meta_disallow_follow = 0;
311 struct urlpos *children
312 = get_urls_html (file, url, &meta_disallow_follow);
314 if (opt.use_robots && meta_disallow_follow)
316 free_urlpos (children);
322 struct urlpos *child = children;
323 struct url *url_parsed = url_parsed = url_parse (url, NULL);
324 assert (url_parsed != NULL);
326 for (; child; child = child->next)
328 if (child->ignore_when_downloading)
330 if (dash_p_leaf_HTML && !child->link_inline_p)
332 if (download_child_p (child, url_parsed, depth, start_url_parsed,
335 url_enqueue (queue, xstrdup (child->url->url),
336 xstrdup (url), depth + 1);
337 /* We blacklist the URL we have enqueued, because we
338 don't want to enqueue (and hence download) the
340 string_set_add (blacklist, child->url->url);
344 url_free (url_parsed);
345 free_urlpos (children);
349 if (opt.delete_after || (file && !acceptable (file)))
351 /* Either --delete-after was specified, or we loaded this
352 otherwise rejected (e.g. by -R) HTML file just so we
353 could harvest its hyperlinks -- in either case, delete
355 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
356 opt.delete_after ? "--delete-after" :
357 "recursive rejection criteria"));
358 logprintf (LOG_VERBOSE,
360 ? _("Removing %s.\n")
361 : _("Removing %s since it should be rejected.\n")),
364 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
365 register_delete_file (file);
369 FREE_MAYBE (referer);
373 /* If anything is left of the queue due to a premature exit, free it
378 while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3))
384 url_queue_delete (queue);
386 if (start_url_parsed)
387 url_free (start_url_parsed);
388 string_set_free (blacklist);
390 if (downloaded_exceeds_quota ())
392 else if (status == FWRITEERR)
398 /* Based on the context provided by retrieve_tree, decide whether a
399 URL is to be descended to. This is only ever called from
400 retrieve_tree, but is in a separate function for clarity.
402 The most expensive checks (such as those for robots) are memoized
403 by storing these URLs to BLACKLIST. This may or may not help. It
404 will help if those URLs are encountered many times. */
407 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
408 struct url *start_url_parsed, struct hash_table *blacklist)
410 struct url *u = upos->url;
411 const char *url = u->url;
413 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
415 if (string_set_contains (blacklist, url))
417 DEBUGP (("Already on the black list.\n"));
421 /* Several things to check for:
422 1. if scheme is not http, and we don't load it
423 2. check for relative links (if relative_only is set)
425 4. check for no-parent
426 5. check for excludes && includes
428 7. check for same host (if spanhost is unset), with possible
429 gethostbyname baggage
430 8. check for robots.txt
432 Addendum: If the URL is FTP, and it is to be loaded, only the
433 domain and suffix settings are "stronger".
435 Note that .html files will get loaded regardless of suffix rules
436 (but that is remedied later with unlink) unless the depth equals
439 More time- and memory- consuming tests should be put later on
442 /* 1. Schemes other than HTTP are normally not recursed into. */
443 if (u->scheme != SCHEME_HTTP
444 && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
446 DEBUGP (("Not following non-HTTP schemes.\n"));
450 /* 2. If it is an absolute link and they are not followed, throw it
452 if (u->scheme == SCHEME_HTTP)
453 if (opt.relative_only && !upos->link_relative_p)
455 DEBUGP (("It doesn't really look like a relative link.\n"));
459 /* 3. If its domain is not to be accepted/looked-up, chuck it
461 if (!accept_domain (u))
463 DEBUGP (("The domain was not accepted.\n"));
467 /* 4. Check for parent directory.
469 If we descended to a different host or changed the scheme, ignore
470 opt.no_parent. Also ignore it for documents needed to display
471 the parent page when in -p mode. */
473 && u->scheme == start_url_parsed->scheme
474 && 0 == strcasecmp (u->host, start_url_parsed->host)
475 && u->port == start_url_parsed->port
476 && !(opt.page_requisites && upos->link_inline_p))
478 if (!frontcmp (start_url_parsed->dir, u->dir))
480 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
481 u->dir, start_url_parsed->dir));
486 /* 5. If the file does not match the acceptance list, or is on the
487 rejection list, chuck it out. The same goes for the directory
488 exclusion and inclusion lists. */
489 if (opt.includes || opt.excludes)
491 if (!accdir (u->dir, ALLABS))
493 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
501 /* Check for acceptance/rejection rules. We ignore these rules
502 for HTML documents because they might lead to other files which
503 need to be downloaded. Of course, we don't know which
504 documents are HTML before downloading them, so we guess.
506 A file is subject to acceptance/rejection rules if:
508 * u->file is not "" (i.e. it is not a directory)
510 + there is no file suffix,
511 + or there is a suffix, but is not "html" or "htm",
513 - recursion is not infinite,
514 - and we are at its very end. */
516 if (u->file[0] != '\0'
517 && ((suf = suffix (url)) == NULL
518 || (0 != strcmp (suf, "html") && 0 != strcmp (suf, "htm"))
519 || (opt.reclevel != INFINITE_RECURSION && depth >= opt.reclevel)))
521 if (!acceptable (u->file))
523 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
531 if (u->scheme == parent->scheme)
532 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
534 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
535 u->host, parent->host));
540 if (opt.use_robots && u->scheme == SCHEME_HTTP)
542 struct robot_specs *specs = res_get_specs (u->host, u->port);
546 if (res_retrieve_file (url, &rfile))
548 specs = res_parse_from_file (rfile);
553 /* If we cannot get real specs, at least produce
554 dummy ones so that we can register them and stop
555 trying to retrieve them. */
556 specs = res_parse ("", 0);
558 res_register_specs (u->host, u->port, specs);
561 /* Now that we have (or don't have) robots.txt specs, we can
562 check what they say. */
563 if (!res_match_path (specs, u->path))
565 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
566 string_set_add (blacklist, url);
571 /* The URL has passed all the tests. It can be placed in the
573 DEBUGP (("Decided to load it.\n"));
578 DEBUGP (("Decided NOT to load it.\n"));
583 /* This function determines whether we will consider downloading the
584 children of a URL whose download resulted in a redirection,
585 possibly to another host, etc. It is needed very rarely, and thus
586 it is merely a simple-minded wrapper around download_child_p. */
589 descend_redirect_p (const char *redirected, const char *original, int depth,
590 struct url *start_url_parsed, struct hash_table *blacklist)
592 struct url *orig_parsed, *new_parsed;
596 orig_parsed = url_parse (original, NULL);
597 assert (orig_parsed != NULL);
599 new_parsed = url_parse (redirected, NULL);
600 assert (new_parsed != NULL);
602 upos = xmalloc (sizeof (struct urlpos));
603 memset (upos, 0, sizeof (*upos));
604 upos->url = new_parsed;
606 success = download_child_p (upos, orig_parsed, depth,
607 start_url_parsed, blacklist);
609 url_free (orig_parsed);
610 url_free (new_parsed);
614 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
620 #define ENSURE_TABLES_EXIST do { \
621 if (!dl_file_url_map) \
622 dl_file_url_map = make_string_hash_table (0); \
623 if (!dl_url_file_map) \
624 dl_url_file_map = make_string_hash_table (0); \
627 /* Return 1 if S1 and S2 are the same, except for "/index.html". The
628 three cases in which it returns one are (substitute any substring
631 m("foo/index.html", "foo/") ==> 1
632 m("foo/", "foo/index.html") ==> 1
633 m("foo", "foo/index.html") ==> 1
634 m("foo", "foo/" ==> 1
635 m("foo", "foo") ==> 1 */
638 match_except_index (const char *s1, const char *s2)
643 /* Skip common substring. */
644 for (i = 0; *s1 && *s2 && *s1 == *s2; s1++, s2++, i++)
647 /* Strings differ at the very beginning -- bail out. We need to
648 check this explicitly to avoid `lng - 1' reading outside the
653 /* Both strings hit EOF -- strings are equal. */
656 /* Strings are randomly different, e.g. "/foo/bar" and "/foo/qux". */
659 /* S1 is the longer one. */
662 /* S2 is the longer one. */
666 /* foo/index.html */ /* or */ /* foo/index.html */
670 /* The right-hand case. */
673 if (*lng == '/' && *(lng + 1) == '\0')
678 return 0 == strcmp (lng, "/index.html");
682 dissociate_urls_from_file_mapper (void *key, void *value, void *arg)
684 char *mapping_url = (char *)key;
685 char *mapping_file = (char *)value;
686 char *file = (char *)arg;
688 if (0 == strcmp (mapping_file, file))
690 hash_table_remove (dl_url_file_map, mapping_url);
692 xfree (mapping_file);
695 /* Continue mapping. */
699 /* Remove all associations from various URLs to FILE from dl_url_file_map. */
702 dissociate_urls_from_file (const char *file)
704 hash_table_map (dl_url_file_map, dissociate_urls_from_file_mapper,
708 /* Register that URL has been successfully downloaded to FILE. This
709 is used by the link conversion code to convert references to URLs
710 to references to local files. It is also being used to check if a
711 URL has already been downloaded. */
714 register_download (const char *url, const char *file)
716 char *old_file, *old_url;
720 /* With some forms of retrieval, it is possible, although not likely
721 or particularly desirable. If both are downloaded, the second
722 download will override the first one. When that happens,
723 dissociate the old file name from the URL. */
725 if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
727 if (0 == strcmp (url, old_url))
728 /* We have somehow managed to download the same URL twice.
732 if (match_except_index (url, old_url)
733 && !hash_table_contains (dl_url_file_map, url))
734 /* The two URLs differ only in the "index.html" ending. For
735 example, one is "http://www.server.com/", and the other is
736 "http://www.server.com/index.html". Don't remove the old
737 one, just add the new one as a non-canonical entry. */
740 hash_table_remove (dl_file_url_map, file);
744 /* Remove all the URLs that point to this file. Yes, there can
745 be more than one such URL, because we store redirections as
746 multiple entries in dl_url_file_map. For example, if URL1
747 redirects to URL2 which gets downloaded to FILE, we map both
748 URL1 and URL2 to FILE in dl_url_file_map. (dl_file_url_map
749 only points to URL2.) When another URL gets loaded to FILE,
750 we want both URL1 and URL2 dissociated from it.
752 This is a relatively expensive operation because it performs
753 a linear search of the whole hash table, but it should be
754 called very rarely, only when two URLs resolve to the same
755 file name, *and* the "<file>.1" extensions are turned off.
756 In other words, almost never. */
757 dissociate_urls_from_file (file);
760 hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
763 /* A URL->FILE mapping is not possible without a FILE->URL mapping.
764 If the latter were present, it should have been removed by the
765 above `if'. So we could write:
767 assert (!hash_table_contains (dl_url_file_map, url));
769 The above is correct when running in recursive mode where the
770 same URL always resolves to the same file. But if you do
775 then the first URL will resolve to "FILE", and the other to
776 "FILE.1". In that case, FILE.1 will not be found in
777 dl_file_url_map, but URL will still point to FILE in
779 if (hash_table_get_pair (dl_url_file_map, url, &old_url, &old_file))
781 hash_table_remove (dl_url_file_map, url);
786 hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
789 /* Register that FROM has been redirected to TO. This assumes that TO
790 is successfully downloaded and already registered using
791 register_download() above. */
794 register_redirection (const char *from, const char *to)
800 file = hash_table_get (dl_url_file_map, to);
801 assert (file != NULL);
802 if (!hash_table_contains (dl_url_file_map, from))
803 hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file));
806 /* Register that the file has been deleted. */
809 register_delete_file (const char *file)
811 char *old_url, *old_file;
815 if (!hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
818 hash_table_remove (dl_file_url_map, file);
821 dissociate_urls_from_file (file);
824 /* Register that FILE is an HTML file that has been downloaded. */
827 register_html (const char *url, const char *file)
829 if (!downloaded_html_set)
830 downloaded_html_set = make_string_hash_table (0);
831 else if (hash_table_contains (downloaded_html_set, file))
834 /* The set and the list should use the same copy of FILE, but the
835 slist interface insists on strduping the string it gets. Oh
837 string_set_add (downloaded_html_set, file);
838 downloaded_html_list = slist_prepend (downloaded_html_list, file);
841 /* This function is called when the retrieval is done to convert the
842 links that have been downloaded. It has to be called at the end of
843 the retrieval, because only then does Wget know conclusively which
844 URLs have been downloaded, and which not, so it can tell which
845 direction to convert to.
847 The "direction" means that the URLs to the files that have been
848 downloaded get converted to the relative URL which will point to
849 that file. And the other URLs get converted to the remote URL on
852 All the downloaded HTMLs are kept in downloaded_html_files, and
853 downloaded URLs in urls_downloaded. All the information is
854 extracted from these two lists. */
857 convert_all_links (void)
863 struct wget_timer *timer = wtimer_new ();
865 /* Destructively reverse downloaded_html_files to get it in the right order.
866 recursive_retrieve() used slist_prepend() consistently. */
867 downloaded_html_list = slist_nreverse (downloaded_html_list);
869 for (html = downloaded_html_list; html; html = html->next)
871 struct urlpos *urls, *cur_url;
873 char *file = html->string;
875 /* Determine the URL of the HTML file. get_urls_html will need
877 url = hash_table_get (dl_file_url_map, file);
880 DEBUGP (("Apparently %s has been removed.\n", file));
884 DEBUGP (("Scanning %s (from %s)\n", file, url));
886 /* Parse the HTML file... */
887 urls = get_urls_html (file, url, NULL);
889 /* We don't respect meta_disallow_follow here because, even if
890 the file is not followed, we might still want to convert the
891 links that have been followed from other files. */
893 for (cur_url = urls; cur_url; cur_url = cur_url->next)
896 struct url *u = cur_url->url;
898 if (cur_url->link_base_p)
900 /* Base references have been resolved by our parser, so
901 we turn the base URL into an empty string. (Perhaps
902 we should remove the tag entirely?) */
903 cur_url->convert = CO_NULLIFY_BASE;
907 /* We decide the direction of conversion according to whether
908 a URL was downloaded. Downloaded URLs will be converted
909 ABS2REL, whereas non-downloaded will be converted REL2ABS. */
910 local_name = hash_table_get (dl_url_file_map, u->url);
912 /* Decide on the conversion type. */
915 /* We've downloaded this URL. Convert it to relative
916 form. We do this even if the URL already is in
917 relative form, because our directory structure may
918 not be identical to that on the server (think `-nd',
919 `--cut-dirs', etc.) */
920 cur_url->convert = CO_CONVERT_TO_RELATIVE;
921 cur_url->local_name = xstrdup (local_name);
922 DEBUGP (("will convert url %s to local %s\n", u->url, local_name));
926 /* We haven't downloaded this URL. If it's not already
927 complete (including a full host name), convert it to
928 that form, so it can be reached while browsing this
930 if (!cur_url->link_complete_p)
931 cur_url->convert = CO_CONVERT_TO_COMPLETE;
932 cur_url->local_name = NULL;
933 DEBUGP (("will convert url %s to complete\n", u->url));
937 /* Convert the links in the file. */
938 convert_links (file, urls);
945 msecs = wtimer_elapsed (timer);
946 wtimer_delete (timer);
947 logprintf (LOG_VERBOSE, _("Converted %d files in %.2f seconds.\n"),
948 file_count, (double)msecs / 1000);
951 /* Cleanup the data structures associated with recursive retrieving
952 (the variables above). */
954 recursive_cleanup (void)
958 free_keys_and_values (dl_file_url_map);
959 hash_table_destroy (dl_file_url_map);
960 dl_file_url_map = NULL;
964 free_keys_and_values (dl_url_file_map);
965 hash_table_destroy (dl_url_file_map);
966 dl_url_file_map = NULL;
968 if (downloaded_html_set)
969 string_set_free (downloaded_html_set);
970 slist_free (downloaded_html_list);
971 downloaded_html_list = NULL;