1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
28 #endif /* HAVE_STRING_H */
31 #endif /* HAVE_UNISTD_H */
34 #include <sys/types.h>
51 extern char *version_string;
53 static struct hash_table *dl_file_url_map;
54 static struct hash_table *dl_url_file_map;
56 /* List of HTML files downloaded in this Wget run, used for link
57 conversion after Wget is done. The list and the set contain the
58 same information, except the list maintains the order. Perhaps I
59 should get rid of the list, it's there for historical reasons. */
60 static slist *downloaded_html_list;
61 static struct hash_table *downloaded_html_set;
63 static void register_delete_file PARAMS ((const char *));
65 /* Functions for maintaining the URL queue. */
67 struct queue_element {
71 struct queue_element *next;
75 struct queue_element *head;
76 struct queue_element *tail;
80 /* Create a URL queue. */
82 static struct url_queue *
85 struct url_queue *queue = xmalloc (sizeof (*queue));
86 memset (queue, '\0', sizeof (*queue));
90 /* Delete a URL queue. */
93 url_queue_delete (struct url_queue *queue)
98 /* Enqueue a URL in the queue. The queue is FIFO: the items will be
99 retrieved ("dequeued") from the queue in the order they were placed
103 url_enqueue (struct url_queue *queue,
104 const char *url, const char *referer, int depth)
106 struct queue_element *qel = xmalloc (sizeof (*qel));
108 qel->referer = referer;
113 if (queue->count > queue->maxcount)
114 queue->maxcount = queue->count;
116 DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
117 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
120 queue->tail->next = qel;
124 queue->head = queue->tail;
127 /* Take a URL out of the queue. Return 1 if this operation succeeded,
128 or 0 if the queue is empty. */
131 url_dequeue (struct url_queue *queue,
132 const char **url, const char **referer, int *depth)
134 struct queue_element *qel = queue->head;
139 queue->head = queue->head->next;
144 *referer = qel->referer;
149 DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
150 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
156 static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
157 struct url *, struct hash_table *));
158 static int descend_redirect_p PARAMS ((const char *, const char *, int,
159 struct url *, struct hash_table *));
162 /* Retrieve a part of the web beginning with START_URL. This used to
163 be called "recursive retrieval", because the old function was
164 recursive and implemented depth-first search. retrieve_tree on the
165 other hand implements breadth-search traversal of the tree, which
166 results in much nicer ordering of downloads.
168 The algorithm this function uses is simple:
170 1. put START_URL in the queue.
171 2. while there are URLs in the queue:
173 3. get next URL from the queue.
175 5. if the URL is HTML and its depth does not exceed maximum depth,
176 get the list of URLs embedded therein.
177 6. for each of those URLs do the following:
179 7. if the URL is not one of those downloaded before, and if it
180 satisfies the criteria specified by the various command-line
181 options, add it to the queue. */
184 retrieve_tree (const char *start_url)
186 uerr_t status = RETROK;
188 /* The queue of URLs we need to load. */
189 struct url_queue *queue = url_queue_new ();
191 /* The URLs we do not wish to enqueue, because they are already in
192 the queue, but haven't been downloaded yet. */
193 struct hash_table *blacklist = make_string_hash_table (0);
195 /* We'll need various components of this, so better get it over with
197 struct url *start_url_parsed = url_parse (start_url, NULL);
199 /* Enqueue the starting URL. Use start_url_parsed->url rather than
200 just URL so we enqueue the canonical form of the URL. */
201 url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0);
202 string_set_add (blacklist, start_url_parsed->url);
207 char *url, *referer, *file = NULL;
209 boolean dash_p_leaf_HTML = FALSE;
211 if (downloaded_exceeds_quota ())
213 if (status == FWRITEERR)
216 /* Get the next URL from the queue... */
218 if (!url_dequeue (queue,
219 (const char **)&url, (const char **)&referer,
223 /* ...and download it. Note that this download is in most cases
224 unconditional, as download_child_p already makes sure a file
225 doesn't get enqueued twice -- and yet this check is here, and
226 not in download_child_p. This is so that if you run `wget -r
227 URL1 URL2', and a random URL is encountered once under URL1
228 and again under URL2, but at a different (possibly smaller)
229 depth, we want the URL's children to be taken into account
231 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
233 file = xstrdup (hash_table_get (dl_url_file_map, url));
235 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
238 if (string_set_contains (downloaded_html_set, file))
244 char *redirected = NULL;
245 int oldrec = opt.recursive;
248 status = retrieve_url (url, &file, &redirected, referer, &dt);
249 opt.recursive = oldrec;
251 if (file && status == RETROK
252 && (dt & RETROKF) && (dt & TEXTHTML))
257 /* We have been redirected, possibly to another host, or
258 different path, or wherever. Check whether we really
259 want to follow it. */
262 if (!descend_redirect_p (redirected, url, depth,
263 start_url_parsed, blacklist))
266 /* Make sure that the old pre-redirect form gets
268 string_set_add (blacklist, url);
277 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
279 if (opt.page_requisites
280 && (depth == opt.reclevel || depth == opt.reclevel + 1))
282 /* When -p is specified, we are allowed to exceed the
283 maximum depth, but only for the "inline" links,
284 i.e. those that are needed to display the page.
285 Originally this could exceed the depth at most by
286 one, but we allow one more level so that the leaf
287 pages that contain frames can be loaded
289 dash_p_leaf_HTML = TRUE;
293 /* Either -p wasn't specified or it was and we've
294 already spent the two extra (pseudo-)levels that it
295 affords us, so we need to bail out. */
296 DEBUGP (("Not descending further; at depth %d, max. %d.\n",
297 depth, opt.reclevel));
302 /* If the downloaded document was HTML, parse it and enqueue the
303 links it contains. */
307 int meta_disallow_follow = 0;
308 struct urlpos *children
309 = get_urls_html (file, url, &meta_disallow_follow);
311 if (opt.use_robots && meta_disallow_follow)
313 free_urlpos (children);
319 struct urlpos *child = children;
320 struct url *url_parsed = url_parsed = url_parse (url, NULL);
321 assert (url_parsed != NULL);
323 for (; child; child = child->next)
325 if (child->ignore_when_downloading)
327 if (dash_p_leaf_HTML && !child->link_inline_p)
329 if (download_child_p (child, url_parsed, depth, start_url_parsed,
332 url_enqueue (queue, xstrdup (child->url->url),
333 xstrdup (url), depth + 1);
334 /* We blacklist the URL we have enqueued, because we
335 don't want to enqueue (and hence download) the
337 string_set_add (blacklist, child->url->url);
341 url_free (url_parsed);
342 free_urlpos (children);
346 if (opt.delete_after || (file && !acceptable (file)))
348 /* Either --delete-after was specified, or we loaded this
349 otherwise rejected (e.g. by -R) HTML file just so we
350 could harvest its hyperlinks -- in either case, delete
352 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
353 opt.delete_after ? "--delete-after" :
354 "recursive rejection criteria"));
355 logprintf (LOG_VERBOSE,
357 ? _("Removing %s.\n")
358 : _("Removing %s since it should be rejected.\n")),
361 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
362 register_delete_file (file);
366 FREE_MAYBE (referer);
370 /* If anything is left of the queue due to a premature exit, free it
375 while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3))
381 url_queue_delete (queue);
383 if (start_url_parsed)
384 url_free (start_url_parsed);
385 string_set_free (blacklist);
387 if (downloaded_exceeds_quota ())
389 else if (status == FWRITEERR)
395 /* Based on the context provided by retrieve_tree, decide whether a
396 URL is to be descended to. This is only ever called from
397 retrieve_tree, but is in a separate function for clarity.
399 The most expensive checks (such as those for robots) are memoized
400 by storing these URLs to BLACKLIST. This may or may not help. It
401 will help if those URLs are encountered many times. */
404 download_child_p (const struct urlpos *upos, struct url *parent, int depth,
405 struct url *start_url_parsed, struct hash_table *blacklist)
407 struct url *u = upos->url;
408 const char *url = u->url;
410 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
412 if (string_set_contains (blacklist, url))
414 DEBUGP (("Already on the black list.\n"));
418 /* Several things to check for:
419 1. if scheme is not http, and we don't load it
420 2. check for relative links (if relative_only is set)
422 4. check for no-parent
423 5. check for excludes && includes
425 7. check for same host (if spanhost is unset), with possible
426 gethostbyname baggage
427 8. check for robots.txt
429 Addendum: If the URL is FTP, and it is to be loaded, only the
430 domain and suffix settings are "stronger".
432 Note that .html files will get loaded regardless of suffix rules
433 (but that is remedied later with unlink) unless the depth equals
436 More time- and memory- consuming tests should be put later on
439 /* 1. Schemes other than HTTP are normally not recursed into. */
440 if (u->scheme != SCHEME_HTTP
441 && !(u->scheme == SCHEME_FTP && opt.follow_ftp))
443 DEBUGP (("Not following non-HTTP schemes.\n"));
447 /* 2. If it is an absolute link and they are not followed, throw it
449 if (u->scheme == SCHEME_HTTP)
450 if (opt.relative_only && !upos->link_relative_p)
452 DEBUGP (("It doesn't really look like a relative link.\n"));
456 /* 3. If its domain is not to be accepted/looked-up, chuck it
458 if (!accept_domain (u))
460 DEBUGP (("The domain was not accepted.\n"));
464 /* 4. Check for parent directory.
466 If we descended to a different host or changed the scheme, ignore
467 opt.no_parent. Also ignore it for documents needed to display
468 the parent page when in -p mode. */
470 && u->scheme == start_url_parsed->scheme
471 && 0 == strcasecmp (u->host, start_url_parsed->host)
472 && u->port == start_url_parsed->port
473 && !(opt.page_requisites && upos->link_inline_p))
475 if (!frontcmp (start_url_parsed->dir, u->dir))
477 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
478 u->dir, start_url_parsed->dir));
483 /* 5. If the file does not match the acceptance list, or is on the
484 rejection list, chuck it out. The same goes for the directory
485 exclusion and inclusion lists. */
486 if (opt.includes || opt.excludes)
488 if (!accdir (u->dir, ALLABS))
490 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
498 /* Check for acceptance/rejection rules. We ignore these rules
499 for HTML documents because they might lead to other files which
500 need to be downloaded. Of course, we don't know which
501 documents are HTML before downloading them, so we guess.
503 A file is subject to acceptance/rejection rules if:
505 * u->file is not "" (i.e. it is not a directory)
507 + there is no file suffix,
508 + or there is a suffix, but is not "html" or "htm",
510 - recursion is not infinite,
511 - and we are at its very end. */
513 if (u->file[0] != '\0'
514 && ((suf = suffix (url)) == NULL
515 || (0 != strcmp (suf, "html") && 0 != strcmp (suf, "htm"))
516 || (opt.reclevel != INFINITE_RECURSION && depth >= opt.reclevel)))
518 if (!acceptable (u->file))
520 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
528 if (u->scheme == parent->scheme)
529 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host))
531 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n",
532 u->host, parent->host));
537 if (opt.use_robots && u->scheme == SCHEME_HTTP)
539 struct robot_specs *specs = res_get_specs (u->host, u->port);
543 if (res_retrieve_file (url, &rfile))
545 specs = res_parse_from_file (rfile);
550 /* If we cannot get real specs, at least produce
551 dummy ones so that we can register them and stop
552 trying to retrieve them. */
553 specs = res_parse ("", 0);
555 res_register_specs (u->host, u->port, specs);
558 /* Now that we have (or don't have) robots.txt specs, we can
559 check what they say. */
560 if (!res_match_path (specs, u->path))
562 DEBUGP (("Not following %s because robots.txt forbids it.\n", url));
563 string_set_add (blacklist, url);
568 /* The URL has passed all the tests. It can be placed in the
570 DEBUGP (("Decided to load it.\n"));
575 DEBUGP (("Decided NOT to load it.\n"));
580 /* This function determines whether we will consider downloading the
581 children of a URL whose download resulted in a redirection,
582 possibly to another host, etc. It is needed very rarely, and thus
583 it is merely a simple-minded wrapper around download_child_p. */
586 descend_redirect_p (const char *redirected, const char *original, int depth,
587 struct url *start_url_parsed, struct hash_table *blacklist)
589 struct url *orig_parsed, *new_parsed;
593 orig_parsed = url_parse (original, NULL);
594 assert (orig_parsed != NULL);
596 new_parsed = url_parse (redirected, NULL);
597 assert (new_parsed != NULL);
599 upos = xmalloc (sizeof (struct urlpos));
600 memset (upos, 0, sizeof (*upos));
601 upos->url = new_parsed;
603 success = download_child_p (upos, orig_parsed, depth,
604 start_url_parsed, blacklist);
606 url_free (orig_parsed);
607 url_free (new_parsed);
611 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected));
617 #define ENSURE_TABLES_EXIST do { \
618 if (!dl_file_url_map) \
619 dl_file_url_map = make_string_hash_table (0); \
620 if (!dl_url_file_map) \
621 dl_url_file_map = make_string_hash_table (0); \
624 /* Return 1 if S1 and S2 are the same, except for "/index.html". The
625 three cases in which it returns one are (substitute any substring
628 m("foo/index.html", "foo/") ==> 1
629 m("foo/", "foo/index.html") ==> 1
630 m("foo", "foo/index.html") ==> 1
631 m("foo", "foo/" ==> 1
632 m("foo", "foo") ==> 1 */
635 match_except_index (const char *s1, const char *s2)
640 /* Skip common substring. */
641 for (i = 0; *s1 && *s2 && *s1 == *s2; s1++, s2++, i++)
644 /* Strings differ at the very beginning -- bail out. We need to
645 check this explicitly to avoid `lng - 1' reading outside the
650 /* Both strings hit EOF -- strings are equal. */
653 /* Strings are randomly different, e.g. "/foo/bar" and "/foo/qux". */
656 /* S1 is the longer one. */
659 /* S2 is the longer one. */
663 /* foo/index.html */ /* or */ /* foo/index.html */
667 /* The right-hand case. */
670 if (*lng == '/' && *(lng + 1) == '\0')
675 return 0 == strcmp (lng, "/index.html");
679 dissociate_urls_from_file_mapper (void *key, void *value, void *arg)
681 char *mapping_url = (char *)key;
682 char *mapping_file = (char *)value;
683 char *file = (char *)arg;
685 if (0 == strcmp (mapping_file, file))
687 hash_table_remove (dl_url_file_map, mapping_url);
689 xfree (mapping_file);
692 /* Continue mapping. */
696 /* Remove all associations from various URLs to FILE from dl_url_file_map. */
699 dissociate_urls_from_file (const char *file)
701 hash_table_map (dl_url_file_map, dissociate_urls_from_file_mapper,
705 /* Register that URL has been successfully downloaded to FILE. This
706 is used by the link conversion code to convert references to URLs
707 to references to local files. It is also being used to check if a
708 URL has already been downloaded. */
711 register_download (const char *url, const char *file)
713 char *old_file, *old_url;
717 /* With some forms of retrieval, it is possible, although not likely
718 or particularly desirable. If both are downloaded, the second
719 download will override the first one. When that happens,
720 dissociate the old file name from the URL. */
722 if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
724 if (0 == strcmp (url, old_url))
725 /* We have somehow managed to download the same URL twice.
729 if (match_except_index (url, old_url)
730 && !hash_table_contains (dl_url_file_map, url))
731 /* The two URLs differ only in the "index.html" ending. For
732 example, one is "http://www.server.com/", and the other is
733 "http://www.server.com/index.html". Don't remove the old
734 one, just add the new one as a non-canonical entry. */
737 hash_table_remove (dl_file_url_map, file);
741 /* Remove all the URLs that point to this file. Yes, there can
742 be more than one such URL, because we store redirections as
743 multiple entries in dl_url_file_map. For example, if URL1
744 redirects to URL2 which gets downloaded to FILE, we map both
745 URL1 and URL2 to FILE in dl_url_file_map. (dl_file_url_map
746 only points to URL2.) When another URL gets loaded to FILE,
747 we want both URL1 and URL2 dissociated from it.
749 This is a relatively expensive operation because it performs
750 a linear search of the whole hash table, but it should be
751 called very rarely, only when two URLs resolve to the same
752 file name, *and* the "<file>.1" extensions are turned off.
753 In other words, almost never. */
754 dissociate_urls_from_file (file);
757 hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
760 /* A URL->FILE mapping is not possible without a FILE->URL mapping.
761 If the latter were present, it should have been removed by the
762 above `if'. So we could write:
764 assert (!hash_table_contains (dl_url_file_map, url));
766 The above is correct when running in recursive mode where the
767 same URL always resolves to the same file. But if you do
772 then the first URL will resolve to "FILE", and the other to
773 "FILE.1". In that case, FILE.1 will not be found in
774 dl_file_url_map, but URL will still point to FILE in
776 if (hash_table_get_pair (dl_url_file_map, url, &old_url, &old_file))
778 hash_table_remove (dl_url_file_map, url);
783 hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
786 /* Register that FROM has been redirected to TO. This assumes that TO
787 is successfully downloaded and already registered using
788 register_download() above. */
791 register_redirection (const char *from, const char *to)
797 file = hash_table_get (dl_url_file_map, to);
798 assert (file != NULL);
799 if (!hash_table_contains (dl_url_file_map, from))
800 hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file));
803 /* Register that the file has been deleted. */
806 register_delete_file (const char *file)
808 char *old_url, *old_file;
812 if (!hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
815 hash_table_remove (dl_file_url_map, file);
818 dissociate_urls_from_file (file);
821 /* Register that FILE is an HTML file that has been downloaded. */
824 register_html (const char *url, const char *file)
826 if (!downloaded_html_set)
827 downloaded_html_set = make_string_hash_table (0);
828 else if (hash_table_contains (downloaded_html_set, file))
831 /* The set and the list should use the same copy of FILE, but the
832 slist interface insists on strduping the string it gets. Oh
834 string_set_add (downloaded_html_set, file);
835 downloaded_html_list = slist_prepend (downloaded_html_list, file);
838 /* This function is called when the retrieval is done to convert the
839 links that have been downloaded. It has to be called at the end of
840 the retrieval, because only then does Wget know conclusively which
841 URLs have been downloaded, and which not, so it can tell which
842 direction to convert to.
844 The "direction" means that the URLs to the files that have been
845 downloaded get converted to the relative URL which will point to
846 that file. And the other URLs get converted to the remote URL on
849 All the downloaded HTMLs are kept in downloaded_html_files, and
850 downloaded URLs in urls_downloaded. All the information is
851 extracted from these two lists. */
854 convert_all_links (void)
860 struct wget_timer *timer = wtimer_new ();
862 /* Destructively reverse downloaded_html_files to get it in the right order.
863 recursive_retrieve() used slist_prepend() consistently. */
864 downloaded_html_list = slist_nreverse (downloaded_html_list);
866 for (html = downloaded_html_list; html; html = html->next)
868 struct urlpos *urls, *cur_url;
870 char *file = html->string;
872 /* Determine the URL of the HTML file. get_urls_html will need
874 url = hash_table_get (dl_file_url_map, file);
877 DEBUGP (("Apparently %s has been removed.\n", file));
881 DEBUGP (("Scanning %s (from %s)\n", file, url));
883 /* Parse the HTML file... */
884 urls = get_urls_html (file, url, NULL);
886 /* We don't respect meta_disallow_follow here because, even if
887 the file is not followed, we might still want to convert the
888 links that have been followed from other files. */
890 for (cur_url = urls; cur_url; cur_url = cur_url->next)
893 struct url *u = cur_url->url;
895 if (cur_url->link_base_p)
897 /* Base references have been resolved by our parser, so
898 we turn the base URL into an empty string. (Perhaps
899 we should remove the tag entirely?) */
900 cur_url->convert = CO_NULLIFY_BASE;
904 /* We decide the direction of conversion according to whether
905 a URL was downloaded. Downloaded URLs will be converted
906 ABS2REL, whereas non-downloaded will be converted REL2ABS. */
907 local_name = hash_table_get (dl_url_file_map, u->url);
909 /* Decide on the conversion type. */
912 /* We've downloaded this URL. Convert it to relative
913 form. We do this even if the URL already is in
914 relative form, because our directory structure may
915 not be identical to that on the server (think `-nd',
916 `--cut-dirs', etc.) */
917 cur_url->convert = CO_CONVERT_TO_RELATIVE;
918 cur_url->local_name = xstrdup (local_name);
919 DEBUGP (("will convert url %s to local %s\n", u->url, local_name));
923 /* We haven't downloaded this URL. If it's not already
924 complete (including a full host name), convert it to
925 that form, so it can be reached while browsing this
927 if (!cur_url->link_complete_p)
928 cur_url->convert = CO_CONVERT_TO_COMPLETE;
929 cur_url->local_name = NULL;
930 DEBUGP (("will convert url %s to complete\n", u->url));
934 /* Convert the links in the file. */
935 convert_links (file, urls);
942 msecs = wtimer_elapsed (timer);
943 wtimer_delete (timer);
944 logprintf (LOG_VERBOSE, _("Converted %d files in %.2f seconds.\n"),
945 file_count, (double)msecs / 1000);
948 /* Cleanup the data structures associated with recursive retrieving
949 (the variables above). */
951 recursive_cleanup (void)
955 free_keys_and_values (dl_file_url_map);
956 hash_table_destroy (dl_file_url_map);
957 dl_file_url_map = NULL;
961 free_keys_and_values (dl_url_file_map);
962 hash_table_destroy (dl_url_file_map);
963 dl_url_file_map = NULL;
965 if (downloaded_html_set)
966 string_set_free (downloaded_html_set);
967 slist_free (downloaded_html_list);
968 downloaded_html_list = NULL;