X-Git-Url: http://sjero.net/git/?p=wget;a=blobdiff_plain;f=src%2Frecur.c;h=b6ba1d9558cb3b163eda8b33e3ac6f11046e553e;hp=fbdb5cefdf2c6e1675e2dc20cf610ae3d82efe54;hb=c9c0e4c6418350d913638d73e0a50bebdb5fd983;hpb=260b9593dc03da095df30efc3eed251a231d09a9 diff --git a/src/recur.c b/src/recur.c index fbdb5cef..b6ba1d95 100644 --- a/src/recur.c +++ b/src/recur.c @@ -1,6 +1,7 @@ /* Handling of recursive HTTP retrieving. Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, - 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, + Inc. This file is part of GNU Wget. @@ -33,9 +34,7 @@ as that of the covered work. */ #include #include #include -#ifdef HAVE_UNISTD_H -# include -#endif /* HAVE_UNISTD_H */ +#include #include #include @@ -254,26 +253,23 @@ retrieve_tree (struct url *start_url_parsed, struct iri *pi) the second time. */ if (dl_url_file_map && hash_table_contains (dl_url_file_map, url)) { + bool is_css_bool; + file = xstrdup (hash_table_get (dl_url_file_map, url)); DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n", url, file)); - /* this sucks, needs to be combined! */ - if (html_allowed - && downloaded_html_set - && string_set_contains (downloaded_html_set, file)) - { - descend = true; - is_css = false; - } - if (css_allowed - && downloaded_css_set - && string_set_contains (downloaded_css_set, file)) - { - descend = true; - is_css = true; - } + if ((is_css_bool = (css_allowed + && downloaded_css_set + && string_set_contains (downloaded_css_set, file))) + || (html_allowed + && downloaded_html_set + && string_set_contains (downloaded_html_set, file))) + { + descend = true; + is_css = is_css_bool; + } } else { @@ -564,7 +560,8 @@ download_child_p (const struct urlpos *upos, struct url *parent, int depth, if (opt.no_parent && schemes_are_similar_p (u->scheme, start_url_parsed->scheme) && 0 == strcasecmp (u->host, start_url_parsed->host) - && u->port == start_url_parsed->port + && (u->scheme != start_url_parsed->scheme + || u->port == start_url_parsed->port) && !(opt.page_requisites && upos->link_inline_p)) { if (!subdir_p (start_url_parsed->dir, u->dir)) @@ -586,6 +583,11 @@ download_child_p (const struct urlpos *upos, struct url *parent, int depth, goto out; } } + if (!accept_url (url)) + { + DEBUGP (("%s is excluded/not-included through regex.\n", url)); + goto out; + } /* 6. Check for acceptance/rejection rules. We ignore these rules for directories (no file name to match) and for non-leaf HTMLs, @@ -636,7 +638,7 @@ download_child_p (const struct urlpos *upos, struct url *parent, int depth, files after downloading or we're just running a spider. */ if (opt.delete_after || opt.spider) { - logprintf (LOG_VERBOSE, "Removing %s.\n", rfile); + logprintf (LOG_VERBOSE, _("Removing %s.\n"), rfile); if (unlink (rfile)) logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));