1 /* Handling of recursive HTTP retrieving.
2 Copyright (C) 1995, 1996, 1997, 2000 Free Software Foundation, Inc.
4 This file is part of Wget.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
28 #endif /* HAVE_STRING_H */
31 #endif /* HAVE_UNISTD_H */
34 #include <sys/types.h>
50 extern char *version_string;
52 #define ROBOTS_FILENAME "robots.txt"
54 static struct hash_table *dl_file_url_map;
55 static struct hash_table *dl_url_file_map;
57 /* List of HTML files downloaded in this Wget run. Used for link
58 conversion after Wget is done. */
59 static slist *downloaded_html_files;
61 /* List of undesirable-to-load URLs. */
62 static struct hash_table *undesirable_urls;
64 /* List of forbidden locations. */
65 static char **forbidden = NULL;
67 /* Current recursion depth. */
70 /* Base directory we're recursing from (used by no_parent). */
71 static char *base_dir;
73 /* The host name for which we last checked robots. */
74 static char *robots_host;
76 static int first_time = 1;
78 /* Construct the robots URL. */
79 static struct urlinfo *robots_url PARAMS ((const char *, const char *));
80 static uerr_t retrieve_robots PARAMS ((const char *, const char *));
81 static char **parse_robots PARAMS ((const char *));
82 static int robots_match PARAMS ((struct urlinfo *, char **));
85 /* Cleanup the data structures associated with recursive retrieving
86 (the variables above). */
88 recursive_cleanup (void)
92 string_set_free (undesirable_urls);
93 undesirable_urls = NULL;
97 free_keys_and_values (dl_file_url_map);
98 hash_table_destroy (dl_file_url_map);
99 dl_file_url_map = NULL;
103 free_keys_and_values (dl_url_file_map);
104 hash_table_destroy (dl_url_file_map);
105 dl_url_file_map = NULL;
107 undesirable_urls = NULL;
108 free_vec (forbidden);
110 slist_free (downloaded_html_files);
111 downloaded_html_files = NULL;
112 FREE_MAYBE (base_dir);
113 FREE_MAYBE (robots_host);
117 /* Reset FIRST_TIME to 1, so that some action can be taken in
118 recursive_retrieve(). */
120 recursive_reset (void)
125 /* The core of recursive retrieving. Endless recursion is avoided by
126 having all URLs stored to a linked list of URLs, which is checked
127 before loading any URL. That way no URL can get loaded twice.
129 The function also supports specification of maximum recursion depth
130 and a number of other goodies. */
132 recursive_retrieve (const char *file, const char *this_url)
134 char *constr, *filename, *newloc;
135 char *canon_this_url = NULL;
136 int dt, inl, dash_p_leaf_HTML = FALSE;
137 int meta_disallow_follow;
138 int this_url_ftp; /* See below the explanation */
140 struct urlinfo *rurl;
141 urlpos *url_list, *cur_url;
142 char *rfile; /* For robots */
145 assert (this_url != NULL);
146 assert (file != NULL);
147 /* If quota was exceeded earlier, bail out. */
148 if (downloaded_exceeds_quota ())
150 /* Cache the current URL in the list. */
153 /* These three operations need to be done only once per Wget
154 run. They should probably be at a different location. */
155 if (!undesirable_urls)
156 undesirable_urls = make_string_hash_table (0);
158 hash_table_clear (undesirable_urls);
159 string_set_add (undesirable_urls, this_url);
161 hash_table_clear (dl_file_url_map);
163 hash_table_clear (dl_url_file_map);
164 /* Enter this_url to the hash table, in original and "enhanced" form. */
166 err = parseurl (this_url, u, 0);
169 string_set_add (undesirable_urls, u->url);
171 base_dir = xstrdup (u->dir); /* Set the base dir. */
172 /* Set the canonical this_url to be sent as referer. This
173 problem exists only when running the first time. */
174 canon_this_url = xstrdup (u->url);
178 DEBUGP (("Double yuck! The *base* URL is broken.\n"));
190 if (opt.reclevel != INFINITE_RECURSION && depth > opt.reclevel)
191 /* We've exceeded the maximum recursion depth specified by the user. */
193 if (opt.page_requisites && depth <= opt.reclevel + 1)
194 /* When -p is specified, we can do one more partial recursion from the
195 "leaf nodes" on the HTML document tree. The recursion is partial in
196 that we won't traverse any <A> or <AREA> tags, nor any <LINK> tags
197 except for <LINK REL="stylesheet">. */
198 dash_p_leaf_HTML = TRUE;
200 /* Either -p wasn't specified or it was and we've already gone the one
201 extra (pseudo-)level that it affords us, so we need to bail out. */
203 DEBUGP (("Recursion depth %d exceeded max. depth %d.\n",
204 depth, opt.reclevel));
210 /* Determine whether this_url is an FTP URL. If it is, it means
211 that the retrieval is done through proxy. In that case, FTP
212 links will be followed by default and recursion will not be
213 turned off when following them. */
214 this_url_ftp = (urlproto (this_url) == URLFTP);
216 /* Get the URL-s from an HTML file: */
217 url_list = get_urls_html (file, canon_this_url ? canon_this_url : this_url,
218 dash_p_leaf_HTML, &meta_disallow_follow);
220 if (opt.use_robots && meta_disallow_follow)
222 /* The META tag says we are not to follow this file. Respect
224 free_urlpos (url_list);
228 /* Decide what to do with each of the URLs. A URL will be loaded if
229 it meets several requirements, discussed later. */
230 for (cur_url = url_list; cur_url; cur_url = cur_url->next)
232 /* If quota was exceeded earlier, bail out. */
233 if (downloaded_exceeds_quota ())
235 /* Parse the URL for convenient use in other functions, as well
236 as to get the optimized form. It also checks URL integrity. */
238 if (parseurl (cur_url->url, u, 0) != URLOK)
240 DEBUGP (("Yuck! A bad URL.\n"));
244 if (u->proto == URLFILE)
246 DEBUGP (("Nothing to do with file:// around here.\n"));
250 assert (u->url != NULL);
251 constr = xstrdup (u->url);
253 /* Several checkings whether a file is acceptable to load:
254 1. check if URL is ftp, and we don't load it
255 2. check for relative links (if relative_only is set)
257 4. check for no-parent
258 5. check for excludes && includes
260 7. check for same host (if spanhost is unset), with possible
261 gethostbyname baggage
262 8. check for robots.txt
264 Addendum: If the URL is FTP, and it is to be loaded, only the
265 domain and suffix settings are "stronger".
267 Note that .html and (yuck) .htm will get loaded regardless of
268 suffix rules (but that is remedied later with unlink) unless
269 the depth equals the maximum depth.
271 More time- and memory- consuming tests should be put later on
274 /* inl is set if the URL we are working on (constr) is stored in
275 undesirable_urls. Using it is crucial to avoid unnecessary
276 repeated continuous hits to the hash table. */
277 inl = string_set_exists (undesirable_urls, constr);
279 /* If it is FTP, and FTP is not followed, chuck it out. */
281 if (u->proto == URLFTP && !opt.follow_ftp && !this_url_ftp)
283 DEBUGP (("Uh, it is FTP but i'm not in the mood to follow FTP.\n"));
284 string_set_add (undesirable_urls, constr);
287 /* If it is absolute link and they are not followed, chuck it
289 if (!inl && u->proto != URLFTP)
290 if (opt.relative_only && !cur_url->link_relative_p)
292 DEBUGP (("It doesn't really look like a relative link.\n"));
293 string_set_add (undesirable_urls, constr);
296 /* If its domain is not to be accepted/looked-up, chuck it out. */
298 if (!accept_domain (u))
300 DEBUGP (("I don't like the smell of that domain.\n"));
301 string_set_add (undesirable_urls, constr);
304 /* Check for parent directory. */
305 if (!inl && opt.no_parent
306 /* If the new URL is FTP and the old was not, ignore
308 && !(!this_url_ftp && u->proto == URLFTP))
310 /* Check for base_dir first. */
311 if (!(base_dir && frontcmp (base_dir, u->dir)))
313 /* Failing that, check for parent dir. */
314 struct urlinfo *ut = newurl ();
315 if (parseurl (this_url, ut, 0) != URLOK)
316 DEBUGP (("Double yuck! The *base* URL is broken.\n"));
317 else if (!frontcmp (ut->dir, u->dir))
319 /* Failing that too, kill the URL. */
320 DEBUGP (("Trying to escape parental guidance with no_parent on.\n"));
321 string_set_add (undesirable_urls, constr);
327 /* If the file does not match the acceptance list, or is on the
328 rejection list, chuck it out. The same goes for the
329 directory exclude- and include- lists. */
330 if (!inl && (opt.includes || opt.excludes))
332 if (!accdir (u->dir, ALLABS))
334 DEBUGP (("%s (%s) is excluded/not-included.\n", constr, u->dir));
335 string_set_add (undesirable_urls, constr);
342 /* We check for acceptance/rejection rules only for non-HTML
343 documents. Since we don't know whether they really are
344 HTML, it will be deduced from (an OR-ed list):
346 1) u->file is "" (meaning it is a directory)
347 2) suffix exists, AND:
351 If the file *is* supposed to be HTML, it will *not* be
352 subject to acc/rej rules, unless a finite maximum depth has
353 been specified and the current depth is the maximum depth. */
356 || (((suf = suffix (constr)) != NULL)
357 && ((!strcmp (suf, "html") || !strcmp (suf, "htm"))
358 && ((opt.reclevel != INFINITE_RECURSION) &&
359 (depth != opt.reclevel))))))
361 if (!acceptable (u->file))
363 DEBUGP (("%s (%s) does not match acc/rej rules.\n",
365 string_set_add (undesirable_urls, constr);
371 /* Optimize the URL (which includes possible DNS lookup) only
372 after all other possibilities have been exhausted. */
375 if (!opt.simple_check)
380 /* Just lowercase the hostname. */
381 for (p = u->host; *p; p++)
384 u->url = str_url (u, 0);
387 constr = xstrdup (u->url);
388 string_set_add (undesirable_urls, constr);
389 if (!inl && !((u->proto == URLFTP) && !this_url_ftp))
390 if (!opt.spanhost && this_url && !same_host (this_url, constr))
392 DEBUGP (("This is not the same hostname as the parent's.\n"));
393 string_set_add (undesirable_urls, constr);
397 /* What about robots.txt? */
398 if (!inl && opt.use_robots && u->proto == URLHTTP)
400 /* Since Wget knows about only one set of robot rules at a
401 time, /robots.txt must be reloaded whenever a new host is
404 robots_host holds the host the current `forbid' variable
406 if (!robots_host || !same_host (robots_host, u->host))
408 FREE_MAYBE (robots_host);
409 /* Now make robots_host the new host, no matter what the
410 result will be. So if there is no /robots.txt on the
411 site, Wget will not retry getting robots all the
413 robots_host = xstrdup (u->host);
414 free_vec (forbidden);
416 err = retrieve_robots (constr, ROBOTS_FILENAME);
419 rurl = robots_url (constr, ROBOTS_FILENAME);
420 rfile = url_filename (rurl);
421 forbidden = parse_robots (rfile);
427 /* Now that we have (or don't have) robots, we can check for
429 if (!robots_match (u, forbidden))
431 DEBUGP (("Stuffing %s because %s forbids it.\n", this_url,
433 string_set_add (undesirable_urls, constr);
439 /* If it wasn't chucked out, do something with it. */
442 DEBUGP (("I've decided to load it -> "));
443 /* Add it to the list of already-loaded URL-s. */
444 string_set_add (undesirable_urls, constr);
445 /* Automatically followed FTPs will *not* be downloaded
447 if (u->proto == URLFTP)
449 /* Don't you adore side-effects? */
452 /* Reset its type. */
455 retrieve_url (constr, &filename, &newloc,
456 canon_this_url ? canon_this_url : this_url, &dt);
457 if (u->proto == URLFTP)
467 /* If there was no error, and the type is text/html, parse
472 recursive_retrieve (filename, constr);
475 DEBUGP (("%s is not text/html so we don't chase.\n",
476 filename ? filename: "(null)"));
478 if (opt.delete_after || (filename && !acceptable (filename)))
479 /* Either --delete-after was specified, or we loaded this otherwise
480 rejected (e.g. by -R) HTML file just so we could harvest its
481 hyperlinks -- in either case, delete the local file. */
483 DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
484 opt.delete_after ? "--delete-after" :
485 "recursive rejection criteria"));
486 logprintf (LOG_VERBOSE,
487 (opt.delete_after ? _("Removing %s.\n")
488 : _("Removing %s since it should be rejected.\n")),
490 if (unlink (filename))
491 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
495 /* If everything was OK, and links are to be converted, let's
496 store the local filename. */
497 if (opt.convert_links && (dt & RETROKF) && (filename != NULL))
499 cur_url->convert = CO_CONVERT_TO_RELATIVE;
500 cur_url->local_name = xstrdup (filename);
504 DEBUGP (("%s already in list, so we don't load.\n", constr));
505 /* Free filename and constr. */
506 FREE_MAYBE (filename);
509 /* Increment the pbuf for the appropriate size. */
511 if (opt.convert_links && !opt.delete_after)
512 /* This is merely the first pass: the links that have been
513 successfully downloaded are converted. In the second pass,
514 convert_all_links() will also convert those links that have NOT
515 been downloaded to their canonical form. */
516 convert_links (file, url_list);
517 /* Free the linked list of URL-s. */
518 free_urlpos (url_list);
519 /* Free the canonical this_url. */
520 FREE_MAYBE (canon_this_url);
521 /* Decrement the recursion depth. */
523 if (downloaded_exceeds_quota ())
530 register_download (const char *url, const char *file)
532 if (!opt.convert_links)
534 if (!dl_file_url_map)
535 dl_file_url_map = make_string_hash_table (0);
536 hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
537 if (!dl_url_file_map)
538 dl_url_file_map = make_string_hash_table (0);
539 hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
543 register_html (const char *url, const char *file)
545 if (!opt.convert_links)
547 downloaded_html_files = slist_prepend (downloaded_html_files, file);
550 /* convert_links() is called from recursive_retrieve() after we're
551 done with an HTML file. This call to convert_links is not complete
552 because it converts only the downloaded files, and Wget cannot know
553 which files will be downloaded afterwards. So, if we have file
556 <a href="/c/something.gif">
558 and /c/something.gif was not downloaded because it exceeded the
559 recursion depth, the reference will *not* be changed.
561 However, later we can encounter /c/something.gif from an "upper"
562 level HTML (let's call it filetwo.html), and it gets downloaded.
564 But now we have a problem because /c/something.gif will be
565 correctly transformed in filetwo.html, but not in fileone.html,
566 since Wget could not have known that /c/something.gif will be
567 downloaded in the future.
569 This is why Wget must, after the whole retrieval, call
570 convert_all_links to go once more through the entire list of
571 retrieved HTMLs, and re-convert them.
573 All the downloaded HTMLs are kept in downloaded_html_files, and downloaded URLs
574 in urls_downloaded. From these two lists information is
577 convert_all_links (void)
581 /* Destructively reverse downloaded_html_files to get it in the right order.
582 recursive_retrieve() used slist_prepend() consistently. */
583 downloaded_html_files = slist_nreverse (downloaded_html_files);
585 for (html = downloaded_html_files; html; html = html->next)
587 urlpos *urls, *cur_url;
590 DEBUGP (("Rescanning %s\n", html->string));
591 /* Determine the URL of the HTML file. get_urls_html will need
593 url = hash_table_get (dl_file_url_map, html->string);
595 DEBUGP (("It should correspond to %s.\n", url));
597 DEBUGP (("I cannot find the corresponding URL.\n"));
598 /* Parse the HTML file... */
599 urls = get_urls_html (html->string, url, FALSE, NULL);
600 /* We don't respect meta_disallow_follow here because, even if
601 the file is not followed, we might still want to convert the
602 links that have been followed from other files. */
603 for (cur_url = urls; cur_url; cur_url = cur_url->next)
607 /* The URL must be in canonical form to be compared. */
608 struct urlinfo *u = newurl ();
609 uerr_t res = parseurl (cur_url->url, u, 0);
615 /* We decide the direction of conversion according to whether
616 a URL was downloaded. Downloaded URLs will be converted
617 ABS2REL, whereas non-downloaded will be converted REL2ABS. */
618 local_name = hash_table_get (dl_url_file_map, u->url);
620 DEBUGP (("%s marked for conversion, local %s\n",
621 u->url, local_name));
622 /* Decide on the conversion direction. */
625 /* We've downloaded this URL. Convert it to relative
626 form. We do this even if the URL already is in
627 relative form, because our directory structure may
628 not be identical to that on the server (think `-nd',
629 `--cut-dirs', etc.) */
630 cur_url->convert = CO_CONVERT_TO_RELATIVE;
631 cur_url->local_name = xstrdup (local_name);
635 /* We haven't downloaded this URL. If it's not already
636 complete (including a full host name), convert it to
637 that form, so it can be reached while browsing this
639 if (!cur_url->link_complete_p)
640 cur_url->convert = CO_CONVERT_TO_COMPLETE;
641 cur_url->local_name = NULL;
645 /* Convert the links in the file. */
646 convert_links (html->string, urls);
652 /* Robots support. */
654 /* Construct the robots URL. */
655 static struct urlinfo *
656 robots_url (const char *url, const char *robots_filename)
658 struct urlinfo *u = newurl ();
661 err = parseurl (url, u, 0);
662 assert (err == URLOK && u->proto == URLHTTP);
666 u->dir = xstrdup ("");
667 u->file = xstrdup (robots_filename);
668 u->url = str_url (u, 0);
672 /* Retrieves the robots_filename from the root server directory, if
673 possible. Returns ROBOTSOK if robots were retrieved OK, and
674 NOROBOTS if robots could not be retrieved for any reason. */
676 retrieve_robots (const char *url, const char *robots_filename)
682 u = robots_url (url, robots_filename);
683 logputs (LOG_VERBOSE, _("Loading robots.txt; please ignore errors.\n"));
684 err = retrieve_url (u->url, NULL, NULL, NULL, &dt);
692 /* Parse the robots_filename and return the disallowed path components
693 in a malloc-ed vector of character pointers.
695 It should be fully compliant with the syntax as described in the
696 file norobots.txt, adopted by the robots mailing list
697 (robots@webcrawler.com). */
699 parse_robots (const char *robots_filename)
703 char *line, *cmd, *str, *p;
704 char *base_version, *version;
706 int wget_matched; /* is the part meant for Wget? */
711 fp = fopen (robots_filename, "rb");
715 /* Kill version number. */
718 STRDUP_ALLOCA (base_version, opt.useragent);
719 STRDUP_ALLOCA (version, opt.useragent);
723 int len = 10 + strlen (version_string);
724 base_version = (char *)alloca (len);
725 sprintf (base_version, "Wget/%s", version_string);
726 version = (char *)alloca (len);
727 sprintf (version, "Wget/%s", version_string);
729 for (p = version; *p; p++)
731 for (p = base_version; *p && *p != '/'; p++)
735 /* Setting this to 1 means that Wget considers itself under
736 restrictions by default, even if the User-Agent field is not
737 present. However, if it finds the user-agent set to anything
738 other than Wget, the rest will be ignored (up to the following
739 User-Agent field). Thus you may have something like:
743 User-Agent: stupid-robot
752 In this case the 1, 2, 5, 6 and 7 disallow lines will be
755 while ((line = read_whole_line (fp)))
757 int len = strlen (line);
758 /* Destroy <CR><LF> if present. */
759 if (len && line[len - 1] == '\n')
761 if (len && line[len - 1] == '\r')
763 /* According to specifications, optional space may be at the
765 DEBUGP (("Line: %s\n", line));
767 for (cmd = line; *cmd && ISSPACE (*cmd); cmd++);
771 DEBUGP (("(chucked out)\n"));
775 for (str = cmd; *str && *str != ':'; str++);
779 DEBUGP (("(chucked out)\n"));
782 /* Zero-terminate the command. */
784 /* Look for the string beginning... */
785 for (; *str && ISSPACE (*str); str++);
786 /* Look for comments or trailing spaces and kill them off. */
787 for (p = str; *p; p++)
788 if (*p && ISSPACE (*p) && ((*(p + 1) == '#') || (*(p + 1) == '\0')))
790 /* We have found either a shell-style comment `<sp>+#' or some
791 trailing spaces. Now rewind to the beginning of the spaces
792 and place '\0' there. */
793 while (p > str && ISSPACE (*p))
801 if (!strcasecmp (cmd, "User-agent"))
804 /* Lowercase the agent string. */
805 for (p = str; *p; p++)
807 /* If the string is `*', it matches. */
808 if (*str == '*' && !*(str + 1))
812 /* If the string contains wildcards, we'll run it through
814 if (has_wildcards_p (str))
816 /* If the string contains '/', compare with the full
817 version. Else, compare it to base_version. */
818 if (strchr (str, '/'))
819 match = !fnmatch (str, version, 0);
821 match = !fnmatch (str, base_version, 0);
823 else /* Substring search */
825 if (strstr (version, str))
831 /* If Wget is not matched, skip all the entries up to the
832 next User-agent field. */
833 wget_matched = match;
835 else if (!wget_matched)
838 DEBUGP (("(chucking out since it is not applicable for Wget)\n"));
841 else if (!strcasecmp (cmd, "Disallow"))
843 /* If "Disallow" is empty, the robot is welcome. */
847 entries = (char **)xmalloc (sizeof (char *));
853 entries = (char **)xrealloc (entries, (num + 2)* sizeof (char *));
854 entries[num] = xstrdup (str);
855 entries[++num] = NULL;
856 /* Strip trailing spaces, according to specifications. */
857 for (i = strlen (str); i >= 0 && ISSPACE (str[i]); i--)
858 if (ISSPACE (str[i]))
864 /* unknown command */
865 DEBUGP (("(chucked out)\n"));
873 /* May the URL url be loaded according to disallowing rules stored in
876 robots_match (struct urlinfo *u, char **fb)
882 DEBUGP (("Matching %s against: ", u->path));
885 DEBUGP (("%s ", *fb));
887 /* If dir is fb, we may not load the file. */
888 if (strncmp (u->path, *fb, l) == 0)
890 DEBUGP (("matched.\n"));
891 return 0; /* Matches, i.e. does not load... */
894 DEBUGP (("not matched.\n"));