assert (this_url != NULL);
assert (file != NULL);
/* If quota was exceeded earlier, bail out. */
- if (opt.quota && (opt.downloaded > opt.quota))
+ if (downloaded_exceeds_quota ())
return QUOTEXC;
/* Cache the current URL in the list. */
if (first_time)
for (cur_url = url_list; cur_url; cur_url = cur_url->next)
{
/* If quota was exceeded earlier, bail out. */
- if (opt.quota && (opt.downloaded > opt.quota))
+ if (downloaded_exceeds_quota ())
break;
/* Parse the URL for convenient use in other functions, as well
as to get the optimized form. It also checks URL integrity. */
else
DEBUGP (("%s is not text/html so we don't chase.\n",
filename ? filename: "(null)"));
- /* If an suffix-rejected file was loaded only because it was HTML,
- undo the error now */
+
if (opt.delete_after || (filename && !acceptable (filename)))
+ /* Either --delete-after was specified, or we loaded this otherwise
+ rejected (e.g. by -R) HTML file just so we could harvest its
+ hyperlinks -- in either case, delete the local file. */
{
+ DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
+ opt.delete_after ? "--delete-after" :
+ "recursive rejection criteria"));
logprintf (LOG_VERBOSE,
(opt.delete_after ? _("Removing %s.\n")
: _("Removing %s since it should be rejected.\n")),
logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
dt &= ~RETROKF;
}
+
/* If everything was OK, and links are to be converted, let's
store the local filename. */
if (opt.convert_links && (dt & RETROKF) && (filename != NULL))
freeurl (u, 1);
/* Increment the pbuf for the appropriate size. */
}
- if (opt.convert_links)
+ if (opt.convert_links && !opt.delete_after)
convert_links (file, url_list);
/* Free the linked list of URL-s. */
free_urlpos (url_list);
FREE_MAYBE (canon_this_url);
/* Decrement the recursion depth. */
--depth;
- if (opt.quota && (opt.downloaded > opt.quota))
+ if (downloaded_exceeds_quota ())
return QUOTEXC;
else
return RETROK;