GNU Wget is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
+ (at your option) any later version.
GNU Wget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
#include <stdio.h>
#include <stdlib.h>
-#ifdef HAVE_STRING_H
-# include <string.h>
-#else
-# include <strings.h>
-#endif /* HAVE_STRING_H */
+#include <string.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif /* HAVE_UNISTD_H */
#include <errno.h>
#include <assert.h>
-#include <sys/types.h>
#include "wget.h"
#include "url.h"
#include "utils.h"
#include "retr.h"
#include "ftp.h"
-#include "fnmatch.h"
#include "host.h"
#include "hash.h"
#include "res.h"
#include "convert.h"
-#ifndef errno
-extern int errno;
-#endif
-
extern char *version_string;
+extern LARGE_INT total_downloaded_bytes;
extern struct hash_table *dl_url_file_map;
extern struct hash_table *downloaded_html_set;
/* Functions for maintaining the URL queue. */
struct queue_element {
- const char *url;
- const char *referer;
- int depth;
- struct queue_element *next;
+ const char *url; /* the URL to download */
+ const char *referer; /* the referring document */
+ int depth; /* the depth */
+ unsigned int html_allowed :1; /* whether the document is allowed to
+ be treated as HTML. */
+
+ struct queue_element *next; /* next element in queue */
};
struct url_queue {
static struct url_queue *
url_queue_new (void)
{
- struct url_queue *queue = xmalloc (sizeof (*queue));
- memset (queue, '\0', sizeof (*queue));
+ struct url_queue *queue = xnew0 (struct url_queue);
return queue;
}
static void
url_enqueue (struct url_queue *queue,
- const char *url, const char *referer, int depth)
+ const char *url, const char *referer, int depth, bool html_allowed)
{
- struct queue_element *qel = xmalloc (sizeof (*qel));
+ struct queue_element *qel = xnew (struct queue_element);
qel->url = url;
qel->referer = referer;
qel->depth = depth;
+ qel->html_allowed = html_allowed;
qel->next = NULL;
++queue->count;
queue->head = queue->tail;
}
-/* Take a URL out of the queue. Return 1 if this operation succeeded,
- or 0 if the queue is empty. */
+/* Take a URL out of the queue. Return true if this operation
+ succeeded, or false if the queue is empty. */
-static int
+static bool
url_dequeue (struct url_queue *queue,
- const char **url, const char **referer, int *depth)
+ const char **url, const char **referer, int *depth,
+ bool *html_allowed)
{
struct queue_element *qel = queue->head;
if (!qel)
- return 0;
+ return false;
queue->head = queue->head->next;
if (!queue->head)
*url = qel->url;
*referer = qel->referer;
*depth = qel->depth;
+ *html_allowed = qel->html_allowed;
--queue->count;
DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
xfree (qel);
- return 1;
+ return true;
}
\f
-static int download_child_p PARAMS ((const struct urlpos *, struct url *, int,
- struct url *, struct hash_table *));
-static int descend_redirect_p PARAMS ((const char *, const char *, int,
- struct url *, struct hash_table *));
+static bool download_child_p (const struct urlpos *, struct url *, int,
+ struct url *, struct hash_table *);
+static bool descend_redirect_p (const char *, const char *, int,
+ struct url *, struct hash_table *);
/* Retrieve a part of the web beginning with START_URL. This used to
/* Enqueue the starting URL. Use start_url_parsed->url rather than
just URL so we enqueue the canonical form of the URL. */
- url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0);
+ url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, true);
string_set_add (blacklist, start_url_parsed->url);
while (1)
{
- int descend = 0;
+ bool descend = false;
char *url, *referer, *file = NULL;
int depth;
- boolean dash_p_leaf_HTML = FALSE;
+ bool html_allowed;
+ bool dash_p_leaf_HTML = false;
- if (downloaded_exceeds_quota ())
+ if (opt.quota && total_downloaded_bytes > opt.quota)
break;
if (status == FWRITEERR)
break;
if (!url_dequeue (queue,
(const char **)&url, (const char **)&referer,
- &depth))
+ &depth, &html_allowed))
break;
/* ...and download it. Note that this download is in most cases
DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
url, file));
- if (downloaded_html_set
+ if (html_allowed
+ && downloaded_html_set
&& string_set_contains (downloaded_html_set, file))
- descend = 1;
+ descend = true;
}
else
{
int dt = 0;
char *redirected = NULL;
- int oldrec = opt.recursive;
+ bool oldrec = opt.recursive;
- opt.recursive = 0;
+ opt.recursive = false;
status = retrieve_url (url, &file, &redirected, referer, &dt);
opt.recursive = oldrec;
- if (file && status == RETROK
+ if (html_allowed && file && status == RETROK
&& (dt & RETROKF) && (dt & TEXTHTML))
- descend = 1;
+ descend = true;
if (redirected)
{
{
if (!descend_redirect_p (redirected, url, depth,
start_url_parsed, blacklist))
- descend = 0;
+ descend = false;
else
/* Make sure that the old pre-redirect form gets
blacklisted. */
one, but we allow one more level so that the leaf
pages that contain frames can be loaded
correctly. */
- dash_p_leaf_HTML = TRUE;
+ dash_p_leaf_HTML = true;
}
else
{
affords us, so we need to bail out. */
DEBUGP (("Not descending further; at depth %d, max. %d.\n",
depth, opt.reclevel));
- descend = 0;
+ descend = false;
}
}
if (descend)
{
- int meta_disallow_follow = 0;
+ bool meta_disallow_follow = false;
struct urlpos *children
= get_urls_html (file, url, &meta_disallow_follow);
blacklist))
{
url_enqueue (queue, xstrdup (child->url->url),
- xstrdup (url), depth + 1);
+ xstrdup (url), depth + 1,
+ child->link_expect_html);
/* We blacklist the URL we have enqueued, because we
don't want to enqueue (and hence download) the
same URL twice. */
}
xfree (url);
- FREE_MAYBE (referer);
- FREE_MAYBE (file);
+ xfree_null (referer);
+ xfree_null (file);
}
/* If anything is left of the queue due to a premature exit, free it
{
char *d1, *d2;
int d3;
- while (url_dequeue (queue, (const char **)&d1, (const char **)&d2, &d3))
+ bool d4;
+ while (url_dequeue (queue,
+ (const char **)&d1, (const char **)&d2, &d3, &d4))
{
xfree (d1);
- FREE_MAYBE (d2);
+ xfree_null (d2);
}
}
url_queue_delete (queue);
url_free (start_url_parsed);
string_set_free (blacklist);
- if (downloaded_exceeds_quota ())
+ if (opt.quota && total_downloaded_bytes > opt.quota)
return QUOTEXC;
else if (status == FWRITEERR)
return FWRITEERR;
by storing these URLs to BLACKLIST. This may or may not help. It
will help if those URLs are encountered many times. */
-static int
+static bool
download_child_p (const struct urlpos *upos, struct url *parent, int depth,
struct url *start_url_parsed, struct hash_table *blacklist)
{
struct url *u = upos->url;
const char *url = u->url;
- int u_scheme_like_http;
+ bool u_scheme_like_http;
DEBUGP (("Deciding whether to enqueue \"%s\".\n", url));
}
/* 6. Check for acceptance/rejection rules. We ignore these rules
- for directories (no file name to match) and for HTML documents,
- which might lead to other files that do need to be downloaded.
- That is, unless we've exhausted the recursion depth anyway. */
+ for directories (no file name to match) and for non-leaf HTMLs,
+ which can lead to other files that do need to be downloaded. (-p
+ automatically implies non-leaf because with -p we can, if
+ necesary, overstep the maximum depth to get the page requisites.) */
if (u->file[0] != '\0'
&& !(has_html_suffix_p (u->file)
- && depth != INFINITE_RECURSION
- && depth < opt.reclevel - 1))
+ /* The exception only applies to non-leaf HTMLs (but -p
+ always implies non-leaf because we can overstep the
+ maximum depth to get the requisites): */
+ && (/* non-leaf */
+ opt.reclevel == INFINITE_RECURSION
+ /* also non-leaf */
+ || depth < opt.reclevel - 1
+ /* -p, which implies non-leaf (see above) */
+ || opt.page_requisites)))
{
if (!acceptable (u->file))
{
download queue. */
DEBUGP (("Decided to load it.\n"));
- return 1;
+ return true;
out:
DEBUGP (("Decided NOT to load it.\n"));
- return 0;
+ return false;
}
/* This function determines whether we will consider downloading the
possibly to another host, etc. It is needed very rarely, and thus
it is merely a simple-minded wrapper around download_child_p. */
-static int
+static bool
descend_redirect_p (const char *redirected, const char *original, int depth,
struct url *start_url_parsed, struct hash_table *blacklist)
{
struct url *orig_parsed, *new_parsed;
struct urlpos *upos;
- int success;
+ bool success;
orig_parsed = url_parse (original, NULL);
assert (orig_parsed != NULL);
new_parsed = url_parse (redirected, NULL);
assert (new_parsed != NULL);
- upos = xmalloc (sizeof (struct urlpos));
- memset (upos, 0, sizeof (*upos));
+ upos = xnew0 (struct urlpos);
upos->url = new_parsed;
success = download_child_p (upos, orig_parsed, depth,