/* Handling of recursive HTTP retrieving.
- Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
- 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+ 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation,
+ Inc.
This file is part of GNU Wget.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#ifdef HAVE_UNISTD_H
-# include <unistd.h>
-#endif /* HAVE_UNISTD_H */
+#include <unistd.h>
#include <errno.h>
#include <assert.h>
if (queue->count > queue->maxcount)
queue->maxcount = queue->count;
- DEBUGP (("Enqueuing %s at depth %d\n", url, depth));
+ DEBUGP (("Enqueuing %s at depth %d\n",
+ quotearg_n_style (0, escape_quoting_style, url), depth));
DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
if (i)
--queue->count;
- DEBUGP (("Dequeuing %s at depth %d\n", qel->url, qel->depth));
+ DEBUGP (("Dequeuing %s at depth %d\n",
+ quotearg_n_style (0, escape_quoting_style, qel->url), qel->depth));
DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount));
xfree (qel);
the queue, but haven't been downloaded yet. */
struct hash_table *blacklist;
- int up_error_code;
struct iri *i = iri_new ();
#define COPYSTR(x) (x) ? xstrdup(x) : NULL;
the second time. */
if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
{
+ bool is_css_bool;
+
file = xstrdup (hash_table_get (dl_url_file_map, url));
DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
url, file));
- /* this sucks, needs to be combined! */
- if (html_allowed
- && downloaded_html_set
- && string_set_contains (downloaded_html_set, file))
- {
- descend = true;
- is_css = false;
- }
- if (css_allowed
- && downloaded_css_set
- && string_set_contains (downloaded_css_set, file))
- {
- descend = true;
- is_css = true;
- }
+ if ((is_css_bool = (css_allowed
+ && downloaded_css_set
+ && string_set_contains (downloaded_css_set, file)))
+ || (html_allowed
+ && downloaded_html_set
+ && string_set_contains (downloaded_html_set, file)))
+ {
+ descend = true;
+ is_css = is_css_bool;
+ }
}
else
{
int dt = 0, url_err;
char *redirected = NULL;
- struct url *url_parsed = url_parse (url, &url_err, i, false);
+ struct url *url_parsed = url_parse (url, &url_err, i, true);
status = retrieve_url (url_parsed, url, &file, &redirected, referer,
- &dt, false, i);
+ &dt, false, i, true);
if (html_allowed && file && status == RETROK
&& (dt & RETROKF) && (dt & TEXTHTML))
xfree (url);
url = redirected;
}
+ else
+ {
+ xfree (url);
+ url = xstrdup (url_parsed->url);
+ }
url_free(url_parsed);
}
if (children)
{
struct urlpos *child = children;
- struct url *url_parsed = url_parse (url, NULL, i, false);
+ struct url *url_parsed = url_parse (url, NULL, i, true);
struct iri *ci;
char *referer_url = url;
bool strip_auth = (url_parsed != NULL
}
/* Several things to check for:
- 1. if scheme is not http, and we don't load it
- 2. check for relative links (if relative_only is set)
- 3. check for domain
- 4. check for no-parent
- 5. check for excludes && includes
- 6. check for suffix
- 7. check for same host (if spanhost is unset), with possible
+ 1. if scheme is not https and https_only requested
+ 2. if scheme is not http, and we don't load it
+ 3. check for relative links (if relative_only is set)
+ 4. check for domain
+ 5. check for no-parent
+ 6. check for excludes && includes
+ 7. check for suffix
+ 8. check for same host (if spanhost is unset), with possible
gethostbyname baggage
- 8. check for robots.txt
+ 9. check for robots.txt
Addendum: If the URL is FTP, and it is to be loaded, only the
domain and suffix settings are "stronger".
More time- and memory- consuming tests should be put later on
the list. */
+#ifdef HAVE_SSL
+ if (opt.https_only && u->scheme != SCHEME_HTTPS)
+ {
+ DEBUGP (("Not following non-HTTPS links.\n"));
+ goto out;
+ }
+#endif
+
/* Determine whether URL under consideration has a HTTP-like scheme. */
u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP);
if (opt.no_parent
&& schemes_are_similar_p (u->scheme, start_url_parsed->scheme)
&& 0 == strcasecmp (u->host, start_url_parsed->host)
- && u->port == start_url_parsed->port
+ && (u->scheme != start_url_parsed->scheme
+ || u->port == start_url_parsed->port)
&& !(opt.page_requisites && upos->link_inline_p))
{
if (!subdir_p (start_url_parsed->dir, u->dir))
goto out;
}
}
+ if (!accept_url (url))
+ {
+ DEBUGP (("%s is excluded/not-included through regex.\n", url));
+ goto out;
+ }
/* 6. Check for acceptance/rejection rules. We ignore these rules
for directories (no file name to match) and for non-leaf HTMLs,
files after downloading or we're just running a spider. */
if (opt.delete_after || opt.spider)
{
- logprintf (LOG_VERBOSE, "Removing %s.\n", rfile);
+ logprintf (LOG_VERBOSE, _("Removing %s.\n"), rfile);
if (unlink (rfile))
logprintf (LOG_NOTQUIET, "unlink: %s\n",
strerror (errno));