/* Handling of recursive HTTP retrieving.
- Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1996-2006 Free Software Foundation, Inc.
This file is part of GNU Wget.
GNU Wget is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
+the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
GNU Wget is distributed in the hope that it will be useful,
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
-along with Wget; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+along with Wget. If not, see <http://www.gnu.org/licenses/>.
In addition, as a special exception, the Free Software Foundation
gives permission to link the code of its release of Wget with the
#include <stdio.h>
#include <stdlib.h>
-#ifdef HAVE_STRING_H
-# include <string.h>
-#else
-# include <strings.h>
-#endif /* HAVE_STRING_H */
+#include <string.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif /* HAVE_UNISTD_H */
#include <errno.h>
#include <assert.h>
-#include <sys/types.h>
#include "wget.h"
#include "url.h"
#include "hash.h"
#include "res.h"
#include "convert.h"
-
-#ifndef errno
-extern int errno;
-#endif
-
-extern char *version_string;
-extern LARGE_INT total_downloaded_bytes;
-
-extern struct hash_table *dl_url_file_map;
-extern struct hash_table *downloaded_html_set;
+#include "spider.h"
\f
/* Functions for maintaining the URL queue. */
const char *url; /* the URL to download */
const char *referer; /* the referring document */
int depth; /* the depth */
- unsigned int html_allowed :1; /* whether the document is allowed to
+ bool html_allowed; /* whether the document is allowed to
be treated as HTML. */
struct queue_element *next; /* next element in queue */
static void
url_enqueue (struct url_queue *queue,
- const char *url, const char *referer, int depth, int html_allowed)
+ const char *url, const char *referer, int depth, bool html_allowed)
{
struct queue_element *qel = xnew (struct queue_element);
qel->url = url;
queue->head = queue->tail;
}
-/* Take a URL out of the queue. Return 1 if this operation succeeded,
- or 0 if the queue is empty. */
+/* Take a URL out of the queue. Return true if this operation
+ succeeded, or false if the queue is empty. */
-static int
+static bool
url_dequeue (struct url_queue *queue,
const char **url, const char **referer, int *depth,
- int *html_allowed)
+ bool *html_allowed)
{
struct queue_element *qel = queue->head;
if (!qel)
- return 0;
+ return false;
queue->head = queue->head->next;
if (!queue->head)
DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
xfree (qel);
- return 1;
+ return true;
}
\f
-static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
- struct url *, struct hash_table *));
-static int descend_redirect_p PARAMS ((const char *, const char *, int,
- struct url *, struct hash_table *));
+static bool download_child_p (const struct urlpos *, struct url *, int,
+ struct url *, struct hash_table *);
+static bool descend_redirect_p (const char *, const char *, int,
+ struct url *, struct hash_table *);
/* Retrieve a part of the web beginning with START_URL. This used to
/* Enqueue the starting URL. Use start_url_parsed->url rather than
just URL so we enqueue the canonical form of the URL. */
- url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, 1);
+ url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, true);
string_set_add (blacklist, start_url_parsed->url);
while (1)
{
- int descend = 0;
+ bool descend = false;
char *url, *referer, *file = NULL;
- int depth, html_allowed;
- boolean dash_p_leaf_HTML = FALSE;
+ int depth;
+ bool html_allowed;
+ bool dash_p_leaf_HTML = false;
if (opt.quota && total_downloaded_bytes > opt.quota)
break;
if (html_allowed
&& downloaded_html_set
&& string_set_contains (downloaded_html_set, file))
- descend = 1;
+ descend = true;
}
else
{
int dt = 0;
char *redirected = NULL;
- int oldrec = opt.recursive;
- opt.recursive = 0;
- status = retrieve_url (url, &file, &redirected, referer, &dt);
- opt.recursive = oldrec;
+ status = retrieve_url (url, &file, &redirected, referer, &dt, false);
if (html_allowed && file && status == RETROK
&& (dt & RETROKF) && (dt & TEXTHTML))
- descend = 1;
+ descend = true;
if (redirected)
{
{
if (!descend_redirect_p (redirected, url, depth,
start_url_parsed, blacklist))
- descend = 0;
+ descend = false;
else
/* Make sure that the old pre-redirect form gets
blacklisted. */
}
}
+ if (opt.spider)
+ {
+ visited_url (url, referer);
+ }
+
if (descend
&& depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
{
one, but we allow one more level so that the leaf
pages that contain frames can be loaded
correctly. */
- dash_p_leaf_HTML = TRUE;
+ dash_p_leaf_HTML = true;
}
else
{
affords us, so we need to bail out. */
DEBUGP (("Not descending further; at depth %d, max. %d.\n",
depth, opt.reclevel));
- descend = 0;
+ descend = false;
}
}
if (descend)
{
- int meta_disallow_follow = 0;
+ bool meta_disallow_follow = false;
struct urlpos *children
= get_urls_html (file, url, &meta_disallow_follow);
}
}
- if (opt.delete_after || (file && !acceptable (file)))
+ if (file
+ && (opt.delete_after
+ || opt.spider /* opt.recursive is implicitely true */
+ || !acceptable (file)))
{
/* Either --delete-after was specified, or we loaded this
- otherwise rejected (e.g. by -R) HTML file just so we
- could harvest its hyperlinks -- in either case, delete
- the local file. */
+ (otherwise unneeded because of --spider or rejected by -R)
+ HTML file just to harvest its hyperlinks -- in either case,
+ delete the local file. */
DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
opt.delete_after ? "--delete-after" :
- "recursive rejection criteria"));
+ (opt.spider ? "--spider" :
+ "recursive rejection criteria")));
logprintf (LOG_VERBOSE,
- (opt.delete_after
+ (opt.delete_after || opt.spider
? _("Removing %s.\n")
: _("Removing %s since it should be rejected.\n")),
file);
if (unlink (file))
logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
+ logputs (LOG_VERBOSE, "\n");
register_delete_file (file);
}
xfree (url);
- FREE_MAYBE (referer);
- FREE_MAYBE (file);
+ xfree_null (referer);
+ xfree_null (file);
}
/* If anything is left of the queue due to a premature exit, free it
now. */
{
char *d1, *d2;
- int d3, d4;
+ int d3;
+ bool d4;
while (url_dequeue (queue,
(const char **)&d1, (const char **)&d2, &d3, &d4))
{
xfree (d1);
- FREE_MAYBE (d2);
+ xfree_null (d2);
}
}
url_queue_delete (queue);
by storing these URLs to BLACKLIST. This may or may not help. It
will help if those URLs are encountered many times. */
-static int
+static bool
download_child_p (const struct urlpos *upos, struct url *parent, int depth,
struct url *start_url_parsed, struct hash_table *blacklist)
{
struct url *u = upos->url;
const char *url = u->url;
- int u_scheme_like_http;
+ bool u_scheme_like_http;
DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
if (string_set_contains (blacklist, url))
{
+ if (opt.spider)
+ {
+ char *referrer = url_string (parent, true);
+ DEBUGP (("download_child_p: parent->url is: `%s'\n", parent->url));
+ visited_url (url, referrer);
+ xfree (referrer);
+ }
DEBUGP (("Already on the black list.\n"));
goto out;
}
&& u->port == start_url_parsed->port
&& !(opt.page_requisites && upos->link_inline_p))
{
- if (!frontcmp (start_url_parsed->dir, u->dir))
+ if (!subdir_p (start_url_parsed->dir, u->dir))
{
DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n",
u->dir, start_url_parsed->dir));
exclusion and inclusion lists. */
if (opt.includes || opt.excludes)
{
- if (!accdir (u->dir, ALLABS))
+ if (!accdir (u->dir))
{
DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir));
goto out;
}
/* 6. Check for acceptance/rejection rules. We ignore these rules
- for directories (no file name to match) and for HTML documents,
- which might lead to other files that do need to be downloaded.
- That is, unless we've exhausted the recursion depth anyway. */
+ for directories (no file name to match) and for non-leaf HTMLs,
+ which can lead to other files that do need to be downloaded. (-p
+ automatically implies non-leaf because with -p we can, if
+ necesary, overstep the maximum depth to get the page requisites.) */
if (u->file[0] != '\0'
&& !(has_html_suffix_p (u->file)
- && depth != INFINITE_RECURSION
- && depth < opt.reclevel - 1))
+ /* The exception only applies to non-leaf HTMLs (but -p
+ always implies non-leaf because we can overstep the
+ maximum depth to get the requisites): */
+ && (/* non-leaf */
+ opt.reclevel == INFINITE_RECURSION
+ /* also non-leaf */
+ || depth < opt.reclevel - 1
+ /* -p, which implies non-leaf (see above) */
+ || opt.page_requisites)))
{
if (!acceptable (u->file))
{
download queue. */
DEBUGP (("Decided to load it.\n"));
- return 1;
+ return true;
out:
DEBUGP (("Decided NOT to load it.\n"));
- return 0;
+ return false;
}
/* This function determines whether we will consider downloading the
possibly to another host, etc. It is needed very rarely, and thus
it is merely a simple-minded wrapper around download_child_p. */
-static int
+static bool
descend_redirect_p (const char *redirected, const char *original, int depth,
struct url *start_url_parsed, struct hash_table *blacklist)
{
struct url *orig_parsed, *new_parsed;
struct urlpos *upos;
- int success;
+ bool success;
orig_parsed = url_parse (original, NULL);
assert (orig_parsed != NULL);