/* Handling of recursive HTTP retrieving.
Copyright (C) 1995, 1996, 1997, 2000 Free Software Foundation, Inc.
-This file is part of Wget.
+This file is part of GNU Wget.
-This program is free software; you can redistribute it and/or modify
+GNU Wget is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-This program is distributed in the hope that it will be useful,
+GNU Wget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
+along with Wget; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#include <config.h>
#endif /* HAVE_UNISTD_H */
#include <errno.h>
#include <assert.h>
-#include <ctype.h>
#include <sys/types.h>
#include "wget.h"
#include "host.h"
#include "hash.h"
+#ifndef errno
+extern int errno;
+#endif
+
extern char *version_string;
#define ROBOTS_FILENAME "robots.txt"
static struct hash_table *dl_file_url_map;
static struct hash_table *dl_url_file_map;
-/* List of HTML URLs. */
-static slist *urls_html;
+/* List of HTML files downloaded in this Wget run. Used for link
+ conversion after Wget is done. */
+static slist *downloaded_html_files;
/* List of undesirable-to-load URLs. */
static struct hash_table *undesirable_urls;
undesirable_urls = NULL;
free_vec (forbidden);
forbidden = NULL;
- slist_free (urls_html);
- urls_html = NULL;
+ slist_free (downloaded_html_files);
+ downloaded_html_files = NULL;
FREE_MAYBE (base_dir);
FREE_MAYBE (robots_host);
first_time = 1;
run. They should probably be at a different location. */
if (!undesirable_urls)
undesirable_urls = make_string_hash_table (0);
- if (!dl_file_url_map)
- dl_file_url_map = make_string_hash_table (0);
- if (!dl_url_file_map)
- dl_url_file_map = make_string_hash_table (0);
hash_table_clear (undesirable_urls);
string_set_add (undesirable_urls, this_url);
- hash_table_clear (dl_file_url_map);
- hash_table_clear (dl_url_file_map);
- urls_html = NULL;
/* Enter this_url to the hash table, in original and "enhanced" form. */
u = newurl ();
err = parseurl (this_url, u, 0);
if (err == URLOK)
{
string_set_add (undesirable_urls, u->url);
- hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (u->url));
- hash_table_put (dl_url_file_map, xstrdup (u->url), xstrdup (file));
- urls_html = slist_append (urls_html, file);
if (opt.no_parent)
base_dir = xstrdup (u->dir); /* Set the base dir. */
/* Set the canonical this_url to be sent as referer. This
/* inl is set if the URL we are working on (constr) is stored in
undesirable_urls. Using it is crucial to avoid unnecessary
repeated continuous hits to the hash table. */
- inl = string_set_exists (undesirable_urls, constr);
+ inl = string_set_contains (undesirable_urls, constr);
/* If it is FTP, and FTP is not followed, chuck it out. */
if (!inl)
/* If it is absolute link and they are not followed, chuck it
out. */
if (!inl && u->proto != URLFTP)
- if (opt.relative_only && !(cur_url->flags & URELATIVE))
+ if (opt.relative_only && !cur_url->link_relative_p)
{
DEBUGP (("It doesn't really look like a relative link.\n"));
string_set_add (undesirable_urls, constr);
/* Just lowercase the hostname. */
for (p = u->host; *p; p++)
*p = TOLOWER (*p);
- free (u->url);
+ xfree (u->url);
u->url = str_url (u, 0);
}
- free (constr);
+ xfree (constr);
constr = xstrdup (u->url);
string_set_add (undesirable_urls, constr);
if (!inl && !((u->proto == URLFTP) && !this_url_ftp))
rfile = url_filename (rurl);
forbidden = parse_robots (rfile);
freeurl (rurl, 1);
- free (rfile);
+ xfree (rfile);
}
}
}
if (newloc)
{
- free (constr);
+ xfree (constr);
constr = newloc;
}
- /* In case of convert_links: If there was no error, add it to
- the list of downloaded URLs. We might need it for
- conversion. */
- if (opt.convert_links && filename)
- {
- if (dt & RETROKF)
- {
- hash_table_put (dl_file_url_map,
- xstrdup (filename), xstrdup (constr));
- hash_table_put (dl_url_file_map,
- xstrdup (constr), xstrdup (filename));
- /* If the URL is HTML, note it. */
- if (dt & TEXTHTML)
- urls_html = slist_append (urls_html, filename);
- }
- }
/* If there was no error, and the type is text/html, parse
it recursively. */
if (dt & TEXTHTML)
store the local filename. */
if (opt.convert_links && (dt & RETROKF) && (filename != NULL))
{
- cur_url->flags |= UABS2REL;
+ cur_url->convert = CO_CONVERT_TO_RELATIVE;
cur_url->local_name = xstrdup (filename);
}
}
- DEBUGP (("%s already in list, so we don't load.\n", constr));
+ else
+ DEBUGP (("%s already in list, so we don't load.\n", constr));
/* Free filename and constr. */
FREE_MAYBE (filename);
FREE_MAYBE (constr);
return RETROK;
}
\f
-/* Simple calls to convert_links will often fail because only the
- downloaded files are converted, and Wget cannot know which files
- will be converted in the future. So, if we have file fileone.html
- with:
+void
+register_download (const char *url, const char *file)
+{
+ if (!opt.convert_links)
+ return;
+ if (!dl_file_url_map)
+ dl_file_url_map = make_string_hash_table (0);
+ hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
+ if (!dl_url_file_map)
+ dl_url_file_map = make_string_hash_table (0);
+ hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
+}
- <a href=/c/something.gif>
+void
+register_html (const char *url, const char *file)
+{
+ if (!opt.convert_links)
+ return;
+ downloaded_html_files = slist_prepend (downloaded_html_files, file);
+}
+
+/* convert_links() is called from recursive_retrieve() after we're
+ done with an HTML file. This call to convert_links is not complete
+ because it converts only the downloaded files, and Wget cannot know
+ which files will be downloaded afterwards. So, if we have file
+ fileone.html with:
+
+ <a href="/c/something.gif">
and /c/something.gif was not downloaded because it exceeded the
recursion depth, the reference will *not* be changed.
convert_all_links to go once more through the entire list of
retrieved HTMLs, and re-convert them.
- All the downloaded HTMLs are kept in urls_html, and downloaded URLs
+ All the downloaded HTMLs are kept in downloaded_html_files, and downloaded URLs
in urls_downloaded. From these two lists information is
extracted. */
void
convert_all_links (void)
{
- uerr_t res;
- urlpos *l1, *urls;
- struct urlinfo *u;
slist *html;
- for (html = urls_html; html; html = html->next)
+ /* Destructively reverse downloaded_html_files to get it in the right order.
+ recursive_retrieve() used slist_prepend() consistently. */
+ downloaded_html_files = slist_nreverse (downloaded_html_files);
+
+ for (html = downloaded_html_files; html; html = html->next)
{
- int meta_disallow_follow;
+ urlpos *urls, *cur_url;
char *url;
DEBUGP (("Rescanning %s\n", html->string));
else
DEBUGP (("I cannot find the corresponding URL.\n"));
/* Parse the HTML file... */
- urls = get_urls_html (html->string, url, FALSE, &meta_disallow_follow);
- if (opt.use_robots && meta_disallow_follow)
- {
- /* The META tag says we are not to follow this file.
- Respect that. */
- free_urlpos (urls);
- urls = NULL;
- }
- if (!urls)
- continue;
- for (l1 = urls; l1; l1 = l1->next)
+ urls = get_urls_html (html->string, url, FALSE, NULL);
+ /* We don't respect meta_disallow_follow here because, even if
+ the file is not followed, we might still want to convert the
+ links that have been followed from other files. */
+ for (cur_url = urls; cur_url; cur_url = cur_url->next)
{
char *local_name;
+
/* The URL must be in canonical form to be compared. */
- u = newurl ();
- res = parseurl (l1->url, u, 0);
+ struct urlinfo *u = newurl ();
+ uerr_t res = parseurl (cur_url->url, u, 0);
if (res != URLOK)
{
freeurl (u, 1);
ABS2REL, whereas non-downloaded will be converted REL2ABS. */
local_name = hash_table_get (dl_url_file_map, u->url);
if (local_name)
- DEBUGP (("%s flagged for conversion, local %s\n",
+ DEBUGP (("%s marked for conversion, local %s\n",
u->url, local_name));
- /* Clear the flags. */
- l1->flags &= ~ (UABS2REL | UREL2ABS);
/* Decide on the conversion direction. */
if (local_name)
{
- l1->flags |= UABS2REL;
- l1->local_name = xstrdup (local_name);
+ /* We've downloaded this URL. Convert it to relative
+ form. We do this even if the URL already is in
+ relative form, because our directory structure may
+ not be identical to that on the server (think `-nd',
+ `--cut-dirs', etc.) */
+ cur_url->convert = CO_CONVERT_TO_RELATIVE;
+ cur_url->local_name = xstrdup (local_name);
}
else
{
- l1->flags |= UREL2ABS;
- l1->local_name = NULL;
+ /* We haven't downloaded this URL. If it's not already
+ complete (including a full host name), convert it to
+ that form, so it can be reached while browsing this
+ HTML locally. */
+ if (!cur_url->link_complete_p)
+ cur_url->convert = CO_CONVERT_TO_COMPLETE;
+ cur_url->local_name = NULL;
}
freeurl (u, 1);
}
err = parseurl (url, u, 0);
assert (err == URLOK && u->proto == URLHTTP);
- free (u->file);
- free (u->dir);
- free (u->url);
+ xfree (u->file);
+ xfree (u->dir);
+ xfree (u->url);
u->dir = xstrdup ("");
u->file = xstrdup (robots_filename);
u->url = str_url (u, 0);
char **entries;
char *line, *cmd, *str, *p;
char *base_version, *version;
- int len, num, i;
+ int num, i;
int wget_matched; /* is the part meant for Wget? */
entries = NULL;
return NULL;
/* Kill version number. */
- if (opt.useragent)
- {
- STRDUP_ALLOCA (base_version, opt.useragent);
- STRDUP_ALLOCA (version, opt.useragent);
- }
- else
- {
- int len = 10 + strlen (version_string);
- base_version = (char *)alloca (len);
- sprintf (base_version, "Wget/%s", version_string);
- version = (char *)alloca (len);
- sprintf (version, "Wget/%s", version_string);
- }
+ if (opt.useragent)
+ {
+ STRDUP_ALLOCA (base_version, opt.useragent);
+ STRDUP_ALLOCA (version, opt.useragent);
+ }
+ else
+ {
+ int len = 10 + strlen (version_string);
+ base_version = (char *)alloca (len);
+ sprintf (base_version, "Wget/%s", version_string);
+ version = (char *)alloca (len);
+ sprintf (version, "Wget/%s", version_string);
+ }
for (p = version; *p; p++)
*p = TOLOWER (*p);
for (p = base_version; *p && *p != '/'; p++)
wget_matched = 1;
while ((line = read_whole_line (fp)))
{
- len = strlen (line);
+ int len = strlen (line);
/* Destroy <CR><LF> if present. */
if (len && line[len - 1] == '\n')
line[--len] = '\0';
for (cmd = line; *cmd && ISSPACE (*cmd); cmd++);
if (!*cmd)
{
- free (line);
+ xfree (line);
DEBUGP (("(chucked out)\n"));
continue;
}
for (str = cmd; *str && *str != ':'; str++);
if (!*str)
{
- free (line);
+ xfree (line);
DEBUGP (("(chucked out)\n"));
continue;
}
}
else if (!wget_matched)
{
- free (line);
+ xfree (line);
DEBUGP (("(chucking out since it is not applicable for Wget)\n"));
continue;
}
/* unknown command */
DEBUGP (("(chucked out)\n"));
}
- free (line);
+ xfree (line);
}
fclose (fp);
return entries;
/* May the URL url be loaded according to disallowing rules stored in
forbidden? */
static int
-robots_match (struct urlinfo *u, char **forbidden)
+robots_match (struct urlinfo *u, char **fb)
{
int l;
- if (!forbidden)
+ if (!fb)
return 1;
DEBUGP (("Matching %s against: ", u->path));
- for (; *forbidden; forbidden++)
+ for (; *fb; fb++)
{
- DEBUGP (("%s ", *forbidden));
- l = strlen (*forbidden);
- /* If dir is forbidden, we may not load the file. */
- if (strncmp (u->path, *forbidden, l) == 0)
+ DEBUGP (("%s ", *fb));
+ l = strlen (*fb);
+ /* If dir is fb, we may not load the file. */
+ if (strncmp (u->path, *fb, l) == 0)
{
DEBUGP (("matched.\n"));
return 0; /* Matches, i.e. does not load... */