GNU Wget is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
+the Free Software Foundation; either version 2 of the License, or (at
+your option) any later version.
GNU Wget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
You should have received a copy of the GNU General Public License
along with Wget; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+In addition, as a special exception, the Free Software Foundation
+gives permission to link the code of its release of Wget with the
+OpenSSL project's "OpenSSL" library (or with modified versions of it
+that use the same license as the "OpenSSL" library), and distribute
+the linked executables. You must obey the GNU General Public License
+in all respects for all of the code used other than "OpenSSL". If you
+modify this file, you may extend this exception to your version of the
+file, but you are not obligated to do so. If you do not wish to do
+so, delete this exception statement from your version. */
#include <config.h>
#include "host.h"
#include "connect.h"
#include "hash.h"
+#include "convert.h"
#ifdef HAVE_SSL
# include "gen_sslfunc.h" /* for ssl_iread */
/* See the comment in gethttp() why this is needed. */
int global_download_count;
+/* Total size of downloaded files. Used to enforce quota. */
+LARGE_INT total_downloaded_bytes;
+
\f
static struct {
- long bytes;
- long dltime;
+ long chunk_bytes;
+ double chunk_start;
+ double sleep_adjust;
} limit_data;
static void
limit_bandwidth_reset (void)
{
- limit_data.bytes = 0;
- limit_data.dltime = 0;
+ limit_data.chunk_bytes = 0;
+ limit_data.chunk_start = 0;
}
/* Limit the bandwidth by pausing the download for an amount of time.
- BYTES is the number of bytes received from the network, DELTA is
- how long it took to receive them, DLTIME the current download time,
- TIMER the timer, and ADJUSTMENT the previous. */
+ BYTES is the number of bytes received from the network, and TIMER
+ is the timer that started at the beginning of download. */
static void
-limit_bandwidth (long bytes, long delta)
+limit_bandwidth (long bytes, struct wget_timer *timer)
{
- long expected;
+ double delta_t = wtimer_read (timer) - limit_data.chunk_start;
+ double expected;
- limit_data.bytes += bytes;
- limit_data.dltime += delta;
+ limit_data.chunk_bytes += bytes;
- expected = (long)(1000.0 * limit_data.bytes / opt.limit_rate);
+ /* Calculate the amount of time we expect downloading the chunk
+ should take. If in reality it took less time, sleep to
+ compensate for the difference. */
+ expected = 1000.0 * limit_data.chunk_bytes / opt.limit_rate;
- if (expected > limit_data.dltime)
+ if (expected > delta_t)
{
- long slp = expected - limit_data.dltime;
+ double slp = expected - delta_t + limit_data.sleep_adjust;
+ double t0, t1;
if (slp < 200)
{
- DEBUGP (("deferring a %ld ms sleep (%ld/%ld) until later.\n",
- slp, limit_data.bytes, limit_data.dltime));
+ DEBUGP (("deferring a %.2f ms sleep (%ld/%.2f).\n",
+ slp, limit_data.chunk_bytes, delta_t));
return;
}
- DEBUGP (("sleeping %ld ms\n", slp));
- usleep (1000 * slp);
+ DEBUGP (("\nsleeping %.2f ms for %ld bytes, adjust %.2f ms\n",
+ slp, limit_data.chunk_bytes, limit_data.sleep_adjust));
+
+ t0 = wtimer_read (timer);
+ xsleep (slp / 1000);
+ wtimer_update (timer);
+ t1 = wtimer_read (timer);
+
+ /* Due to scheduling, we probably slept slightly longer (or
+ shorter) than desired. Calculate the difference between the
+ desired and the actual sleep, and adjust the next sleep by
+ that amount. */
+ limit_data.sleep_adjust = slp - (t1 - t0);
}
- limit_data.bytes = 0;
- limit_data.dltime = 0;
+ limit_data.chunk_bytes = 0;
+ limit_data.chunk_start = wtimer_read (timer);
}
#define MIN(i, j) ((i) <= (j) ? (i) : (j))
/* Reads the contents of file descriptor FD, until it is closed, or a
read error occurs. The data is read in 8K chunks, and stored to
- stream fp, which should have been open for writing. If BUF is
- non-NULL and its file descriptor is equal to FD, flush RBUF first.
- This function will *not* use the rbuf_* functions!
+ stream fp, which should have been open for writing.
The EXPECTED argument is passed to show_progress() unchanged, but
otherwise ignored.
The function exits and returns codes of 0, -1 and -2 if the
connection was closed, there was a read error, or if it could not
- write to the output stream, respectively.
+ write to the output stream, respectively. */
- IMPORTANT: The function flushes the contents of the buffer in
- rbuf_flush() before actually reading from fd. If you wish to read
- from fd immediately, flush or discard the buffer. */
int
-get_contents (int fd, FILE *fp, long *len, long restval, long expected,
- struct rbuf *rbuf, int use_expected, long *elapsed)
+fd_read_body (int fd, FILE *out, long *len, long restval, long expected,
+ int use_expected, double *elapsed)
{
int res = 0;
- static char c[8192];
- void *progress = NULL;
+
+ static char dlbuf[16384];
+ int dlbufsize = sizeof (dlbuf);
+
struct wget_timer *timer = wtimer_allocate ();
- long dltime = 0, last_dltime = 0;
+ double last_successful_read_tm;
+
+ /* The progress gauge, set according to the user preferences. */
+ void *progress = NULL;
+
+ /* Non-zero if the progress gauge is interactive, i.e. if it can
+ continually update the display. When true, smaller timeout
+ values are used so that the gauge can update the display when
+ data arrives slowly. */
+ int progress_interactive = 0;
*len = restval;
if (opt.verbose)
- progress = progress_create (restval, expected);
-
- if (rbuf && RBUF_FD (rbuf) == fd)
{
- int sz = 0;
- while ((res = rbuf_flush (rbuf, c, sizeof (c))) != 0)
- {
- fwrite (c, sizeof (char), res, fp);
- *len += res;
- sz += res;
- }
- if (sz)
- fflush (fp);
- if (ferror (fp))
- {
- res = -2;
- goto out;
- }
- if (opt.verbose)
- progress_update (progress, sz, 0);
+ progress = progress_create (restval, expected);
+ progress_interactive = progress_interactive_p (progress);
}
if (opt.limit_rate)
limit_bandwidth_reset ();
wtimer_reset (timer);
+ last_successful_read_tm = 0;
+
+ /* Use a smaller buffer for low requested bandwidths. For example,
+ with --limit-rate=2k, it doesn't make sense to slurp in 16K of
+ data and then sleep for 8s. With buffer size equal to the limit,
+ we never have to sleep for more than one second. */
+ if (opt.limit_rate && opt.limit_rate < dlbufsize)
+ dlbufsize = opt.limit_rate;
/* Read from fd while there is available data.
while (!use_expected || (*len < expected))
{
int amount_to_read = (use_expected
- ? MIN (expected - *len, sizeof (c))
- : sizeof (c));
-#ifdef HAVE_SSL
- if (rbuf->ssl!=NULL)
- res = ssl_iread (rbuf->ssl, c, amount_to_read);
- else
-#endif /* HAVE_SSL */
- res = iread (fd, c, amount_to_read);
+ ? MIN (expected - *len, dlbufsize) : dlbufsize);
+ double tmout = opt.read_timeout;
+ if (progress_interactive)
+ {
+ double waittm;
+ /* For interactive progress gauges, always specify a ~1s
+ timeout, so that the gauge can be updated regularly even
+ when the data arrives very slowly or stalls. */
+ tmout = 0.95;
+ waittm = (wtimer_read (timer) - last_successful_read_tm) / 1000;
+ if (waittm + tmout > opt.read_timeout)
+ {
+ /* Don't allow waiting time to exceed read timeout. */
+ tmout = opt.read_timeout - waittm;
+ if (tmout < 0)
+ {
+ /* We've already exceeded the timeout. */
+ res = -1, errno = ETIMEDOUT;
+ break;
+ }
+ }
+ }
+ res = fd_read (fd, dlbuf, amount_to_read, tmout);
+
+ if (res == 0 || (res < 0 && errno != ETIMEDOUT))
+ break;
+ else if (res < 0)
+ res = 0; /* timeout */
+ wtimer_update (timer);
if (res > 0)
{
- fwrite (c, sizeof (char), res, fp);
+ fwrite (dlbuf, 1, res, out);
/* Always flush the contents of the network packet. This
- should not be adverse to performance, as the network
- packets typically won't be too tiny anyway. */
- fflush (fp);
- if (ferror (fp))
+ should not hinder performance: fast downloads will be
+ received in 16K chunks (which stdio would write out
+ anyway), and slow downloads won't be limited by disk
+ performance. */
+ fflush (out);
+ if (ferror (out))
{
res = -2;
goto out;
}
+ last_successful_read_tm = wtimer_read (timer);
+ }
- /* If bandwidth is not limited, one call to wtimer_elapsed
- is sufficient. */
- dltime = wtimer_elapsed (timer);
- if (opt.limit_rate)
- {
- limit_bandwidth (res, dltime - last_dltime);
- dltime = wtimer_elapsed (timer);
- last_dltime = dltime;
- }
+ if (opt.limit_rate)
+ limit_bandwidth (res, timer);
- if (opt.verbose)
- progress_update (progress, res, dltime);
- *len += res;
- }
- else
- break;
+ *len += res;
+ if (progress)
+ progress_update (progress, res, wtimer_read (timer));
+#ifdef WINDOWS
+ if (use_expected && expected > 0)
+ ws_percenttitle (100.0 * (double)(*len) / (double)expected);
+#endif
}
if (res < -1)
res = -1;
out:
- if (opt.verbose)
- progress_finish (progress, dltime);
+ if (progress)
+ progress_finish (progress, wtimer_read (timer));
if (elapsed)
- *elapsed = dltime;
+ *elapsed = wtimer_read (timer);
wtimer_delete (timer);
return res;
}
\f
+typedef const char *(*finder_t) PARAMS ((const char *, int, int));
+
+/* Driver for fd_read_line and fd_read_head: keeps reading data until
+ a terminator (as decided by FINDER) occurs in the data. The trick
+ is that the data is first peeked at, and only then actually read.
+ That way the data after the terminator is never read. */
+
+static char *
+fd_read_until (int fd, finder_t finder, int bufsize)
+{
+ int size = bufsize, tail = 0;
+ char *buf = xmalloc (size);
+
+ while (1)
+ {
+ const char *end;
+ int pklen, rdlen, remain;
+
+ /* First, peek at the available data. */
+
+ pklen = fd_peek (fd, buf + tail, size - tail, -1);
+ if (pklen < 0)
+ {
+ xfree (buf);
+ return NULL;
+ }
+ end = finder (buf, tail, pklen);
+ if (end)
+ {
+ /* The data contains the terminator: we'll read the data up
+ to the end of the terminator. */
+ remain = end - (buf + tail);
+ /* Note +1 for trailing \0. */
+ if (size < tail + remain + 1)
+ {
+ size = tail + remain + 1;
+ buf = xrealloc (buf, size);
+ }
+ }
+ else
+ /* No terminator: simply read the data we know is (or should
+ be) available. */
+ remain = pklen;
+
+ /* Now, read the data. Note that we make no assumptions about
+ how much data we'll get. (Some TCP stacks are notorious for
+ read returning less data than the previous MSG_PEEK.) */
+
+ rdlen = fd_read (fd, buf + tail, remain, 0);
+ if (rdlen < 0)
+ {
+ xfree_null (buf);
+ return NULL;
+ }
+ if (rdlen == 0)
+ {
+ if (tail == 0)
+ {
+ /* EOF without anything having been read */
+ xfree (buf);
+ errno = 0;
+ return NULL;
+ }
+ /* Return what we received so far. */
+ if (size < tail + 1)
+ {
+ size = tail + 1; /* expand the buffer to receive the
+ terminating \0 */
+ buf = xrealloc (buf, size);
+ }
+ buf[tail] = '\0';
+ return buf;
+ }
+ tail += rdlen;
+ if (end && rdlen == remain)
+ {
+ /* The end was seen and the data read -- we got what we came
+ for. */
+ buf[tail] = '\0';
+ return buf;
+ }
+
+ /* Keep looping until all the data arrives. */
+
+ if (tail == size)
+ {
+ size <<= 1;
+ buf = xrealloc (buf, size);
+ }
+ }
+}
+
+static const char *
+line_terminator (const char *buf, int tail, int peeklen)
+{
+ const char *p = memchr (buf + tail, '\n', peeklen);
+ if (p)
+ /* p+1 because we want the line to include '\n' */
+ return p + 1;
+ return NULL;
+}
+
+/* Read one line from FD and return it. The line is allocated using
+ malloc.
+
+ If an error occurs, or if no data can be read, NULL is returned.
+ In the former case errno indicates the error condition, and in the
+ latter case, errno is NULL. */
+
+char *
+fd_read_line (int fd)
+{
+ return fd_read_until (fd, line_terminator, 128);
+}
+
+static const char *
+head_terminator (const char *buf, int tail, int peeklen)
+{
+ const char *start, *end;
+ if (tail < 4)
+ start = buf;
+ else
+ start = buf + tail - 4;
+ end = buf + tail + peeklen;
+
+ for (; start < end - 1; start++)
+ if (*start == '\n')
+ {
+ if (start < end - 2
+ && start[1] == '\r'
+ && start[2] == '\n')
+ return start + 3;
+ if (start[1] == '\n')
+ return start + 2;
+ }
+ return NULL;
+}
+
+/* Read the request head from FD and return it. The chunk of data is
+ allocated using malloc.
+
+ If an error occurs, or if no data can be read, NULL is returned.
+ In the former case errno indicates the error condition, and in the
+ latter case, errno is NULL. */
+
+char *
+fd_read_head (int fd)
+{
+ return fd_read_until (fd, head_terminator, 512);
+}
+\f
/* Return a printed representation of the download rate, as
appropriate for the speed. If PAD is non-zero, strings will be
padded to the width of 7 characters (xxxx.xx). */
char *
-retr_rate (long bytes, long msecs, int pad)
+retr_rate (long bytes, double msecs, int pad)
{
static char res[20];
static char *rate_names[] = {"B/s", "KB/s", "MB/s", "GB/s" };
UNITS is zero for B/s, one for KB/s, two for MB/s, and three for
GB/s. */
double
-calc_rate (long bytes, long msecs, int *units)
+calc_rate (long bytes, double msecs, int *units)
{
double dlrate;
assert (bytes >= 0);
if (msecs == 0)
- /* If elapsed time is 0, it means we're under the granularity of
- the timer. This often happens on systems that use time() for
- the timer. */
+ /* If elapsed time is exactly zero, it means we're under the
+ granularity of the timer. This often happens on systems that
+ use time() for the timer. */
msecs = wtimer_granularity ();
dlrate = (double)1000 * bytes / msecs;
else if (dlrate < 1024.0 * 1024.0 * 1024.0)
*units = 2, dlrate /= (1024.0 * 1024.0);
else
- /* Maybe someone will need this one day. More realistically, it
- will get tickled by buggy timers. */
+ /* Maybe someone will need this, one day. */
*units = 3, dlrate /= (1024.0 * 1024.0 * 1024.0);
return dlrate;
}
\f
-static int
-register_redirections_mapper (void *key, void *value, void *arg)
-{
- const char *redirected_from = (const char *)key;
- const char *redirected_to = (const char *)arg;
- if (0 != strcmp (redirected_from, redirected_to))
- register_redirection (redirected_from, redirected_to);
- return 0;
-}
-
-/* Register the redirections that lead to the successful download of
- this URL. This is necessary so that the link converter can convert
- redirected URLs to the local file. */
-
-static void
-register_all_redirections (struct hash_table *redirections, const char *final)
-{
- hash_table_map (redirections, register_redirections_mapper, (void *)final);
-}
-
-#define USE_PROXY_P(u) (opt.use_proxy && getproxy((u)->scheme) \
- && no_proxy_match((u)->host, \
- (const char **)opt.no_proxy))
-
/* Maximum number of allowed redirections. 20 was chosen as a
"reasonable" value, which is low enough to not cause havoc, yet
high enough to guarantee that normal retrievals will not be hurt by
#define MAX_REDIRECTIONS 20
+#define SUSPEND_POST_DATA do { \
+ post_data_suspended = 1; \
+ saved_post_data = opt.post_data; \
+ saved_post_file_name = opt.post_file_name; \
+ opt.post_data = NULL; \
+ opt.post_file_name = NULL; \
+} while (0)
+
+#define RESTORE_POST_DATA do { \
+ if (post_data_suspended) \
+ { \
+ opt.post_data = saved_post_data; \
+ opt.post_file_name = saved_post_file_name; \
+ post_data_suspended = 0; \
+ } \
+} while (0)
+
+static char *getproxy PARAMS ((struct url *));
+
/* Retrieve the given URL. Decides which loop to call -- HTTP, FTP,
FTP, proxy, etc. */
+/* #### This function should be rewritten so it doesn't return from
+ multiple points. */
+
uerr_t
retrieve_url (const char *origurl, char **file, char **newloc,
const char *refurl, int *dt)
uerr_t result;
char *url;
int location_changed, dummy;
- int use_proxy;
char *mynewloc, *proxy;
- struct url *u;
+ struct url *u, *proxy_url;
int up_error_code; /* url parse error code */
char *local_file;
- struct hash_table *redirections = NULL;
int redirection_count = 0;
- /* If dt is NULL, just ignore it. */
+ int post_data_suspended = 0;
+ char *saved_post_data = NULL;
+ char *saved_post_file_name = NULL;
+
+ /* If dt is NULL, use local storage. */
if (!dt)
- dt = &dummy;
+ {
+ dt = &dummy;
+ dummy = 0;
+ }
url = xstrdup (origurl);
if (newloc)
*newloc = NULL;
if (!u)
{
logprintf (LOG_NOTQUIET, "%s: %s.\n", url, url_error (up_error_code));
- if (redirections)
- string_set_free (redirections);
xfree (url);
return URLERROR;
}
result = NOCONERROR;
mynewloc = NULL;
local_file = NULL;
+ proxy_url = NULL;
- use_proxy = USE_PROXY_P (u);
- if (use_proxy)
+ proxy = getproxy (u);
+ if (proxy)
{
- struct url *proxy_url;
-
- /* Get the proxy server for the current scheme. */
- proxy = getproxy (u->scheme);
- if (!proxy)
- {
- logputs (LOG_NOTQUIET, _("Could not find proxy host.\n"));
- url_free (u);
- if (redirections)
- string_set_free (redirections);
- xfree (url);
- return PROXERR;
- }
-
/* Parse the proxy URL. */
proxy_url = url_parse (proxy, &up_error_code);
if (!proxy_url)
{
logprintf (LOG_NOTQUIET, _("Error parsing proxy URL %s: %s.\n"),
proxy, url_error (up_error_code));
- if (redirections)
- string_set_free (redirections);
xfree (url);
+ RESTORE_POST_DATA;
return PROXERR;
}
- if (proxy_url->scheme != SCHEME_HTTP)
+ if (proxy_url->scheme != SCHEME_HTTP && proxy_url->scheme != u->scheme)
{
logprintf (LOG_NOTQUIET, _("Error in proxy URL %s: Must be HTTP.\n"), proxy);
url_free (proxy_url);
- if (redirections)
- string_set_free (redirections);
xfree (url);
+ RESTORE_POST_DATA;
return PROXERR;
}
-
- result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url);
- url_free (proxy_url);
}
- else if (u->scheme == SCHEME_HTTP
+
+ if (u->scheme == SCHEME_HTTP
#ifdef HAVE_SSL
|| u->scheme == SCHEME_HTTPS
#endif
- )
+ || (proxy_url && proxy_url->scheme == SCHEME_HTTP))
{
- result = http_loop (u, &mynewloc, &local_file, refurl, dt, NULL);
+ result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url);
}
else if (u->scheme == SCHEME_FTP)
{
retrieval, so we save recursion to oldrec, and restore it
later. */
int oldrec = opt.recursive;
- if (redirections)
+ if (redirection_count)
opt.recursive = 0;
- result = ftp_loop (u, dt);
+ result = ftp_loop (u, dt, proxy_url);
opt.recursive = oldrec;
/* There is a possibility of having HTTP being redirected to
FTP. In these cases we must decide whether the text is HTML
- according to the suffix. The HTML suffixes are `.html' and
- `.htm', case-insensitive. */
- if (redirections && local_file && u->scheme == SCHEME_FTP)
+ according to the suffix. The HTML suffixes are `.html',
+ `.htm' and a few others, case-insensitive. */
+ if (redirection_count && local_file && u->scheme == SCHEME_FTP)
{
- char *suf = suffix (local_file);
- if (suf && (!strcasecmp (suf, "html") || !strcasecmp (suf, "htm")))
+ if (has_html_suffix_p (local_file))
*dt |= TEXTHTML;
}
}
+
+ if (proxy_url)
+ {
+ url_free (proxy_url);
+ proxy_url = NULL;
+ }
+
location_changed = (result == NEWLOCATION);
if (location_changed)
{
logprintf (LOG_NOTQUIET, "%s: %s.\n", mynewloc,
url_error (up_error_code));
url_free (u);
- if (redirections)
- string_set_free (redirections);
xfree (url);
xfree (mynewloc);
+ RESTORE_POST_DATA;
return result;
}
xfree (mynewloc);
mynewloc = xstrdup (newloc_parsed->url);
- if (!redirections)
- {
- redirections = make_string_hash_table (0);
- /* Add current URL immediately so we can detect it as soon
- as possible in case of a cycle. */
- string_set_add (redirections, u->url);
- }
-
- /* The new location is OK. Check for max. number of
- redirections. */
+ /* Check for max. number of redirections. */
if (++redirection_count > MAX_REDIRECTIONS)
{
logprintf (LOG_NOTQUIET, _("%d redirections exceeded.\n"),
MAX_REDIRECTIONS);
url_free (newloc_parsed);
url_free (u);
- if (redirections)
- string_set_free (redirections);
- xfree (url);
- xfree (mynewloc);
- return WRONGCODE;
- }
-
- /*Check for redirection cycle by
- peeking through the history of redirections. */
- if (string_set_contains (redirections, newloc_parsed->url))
- {
- logprintf (LOG_NOTQUIET, _("%s: Redirection cycle detected.\n"),
- mynewloc);
- url_free (newloc_parsed);
- url_free (u);
- if (redirections)
- string_set_free (redirections);
xfree (url);
xfree (mynewloc);
+ RESTORE_POST_DATA;
return WRONGCODE;
}
- string_set_add (redirections, newloc_parsed->url);
xfree (url);
url = mynewloc;
url_free (u);
u = newloc_parsed;
+
+ /* If we're being redirected from POST, we don't want to POST
+ again. Many requests answer POST with a redirection to an
+ index page; that redirection is clearly a GET. We "suspend"
+ POST data for the duration of the redirections, and restore
+ it when we're done. */
+ if (!post_data_suspended)
+ SUSPEND_POST_DATA;
+
goto redirected;
}
{
if (*dt & RETROKF)
{
- register_download (url, local_file);
- if (redirections)
- register_all_redirections (redirections, url);
+ register_download (u->url, local_file);
+ if (redirection_count && 0 != strcmp (origurl, u->url))
+ register_redirection (origurl, u->url);
if (*dt & TEXTHTML)
- register_html (url, local_file);
+ register_html (u->url, local_file);
}
}
if (file)
*file = local_file ? local_file : NULL;
else
- FREE_MAYBE (local_file);
+ xfree_null (local_file);
url_free (u);
- if (redirections)
+ if (redirection_count)
{
- string_set_free (redirections);
if (newloc)
*newloc = url;
else
}
++global_download_count;
+ RESTORE_POST_DATA;
return result;
}
them. If HTML is non-zero, treat the file as HTML, and construct
the URLs accordingly.
- If opt.recursive is set, call recursive_retrieve() for each file. */
+ If opt.recursive is set, call retrieve_tree() for each file. */
+
uerr_t
retrieve_from_file (const char *file, int html, int *count)
{
if (cur_url->ignore_when_downloading)
continue;
- if (downloaded_exceeds_quota ())
+ if (opt.quota && total_downloaded_bytes > opt.quota)
{
status = QUOTEXC;
break;
}
- if (opt.recursive && cur_url->url->scheme != SCHEME_FTP)
+ if ((opt.recursive || opt.page_requisites)
+ && cur_url->url->scheme != SCHEME_FTP)
status = retrieve_tree (cur_url->url->url);
else
status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, &dt);
dt &= ~RETROKF;
}
- FREE_MAYBE (new_file);
- FREE_MAYBE (filename);
+ xfree_null (new_file);
+ xfree_null (filename);
}
/* Free the linked list of URL-s. */
logputs (LOG_VERBOSE, (n1 == n2) ? _("Giving up.\n\n") : _("Retrying.\n\n"));
}
-/* Increment opt.downloaded by BY_HOW_MUCH. If an overflow occurs,
- set opt.downloaded_overflow to 1. */
-void
-downloaded_increase (unsigned long by_how_much)
-{
- VERY_LONG_TYPE old;
- if (opt.downloaded_overflow)
- return;
- old = opt.downloaded;
- opt.downloaded += by_how_much;
- if (opt.downloaded < old) /* carry flag, where are you when I
- need you? */
- {
- /* Overflow. */
- opt.downloaded_overflow = 1;
- opt.downloaded = ~((VERY_LONG_TYPE)0);
- }
-}
-
-/* Return non-zero if the downloaded amount of bytes exceeds the
- desired quota. If quota is not set or if the amount overflowed, 0
- is returned. */
-int
-downloaded_exceeds_quota (void)
-{
- if (!opt.quota)
- return 0;
- if (opt.downloaded_overflow)
- /* We don't really know. (Wildly) assume not. */
- return 0;
-
- return opt.downloaded > opt.quota;
-}
-
/* If opt.wait or opt.waitretry are specified, and if certain
conditions are met, sleep the appropriate number of seconds. See
the documentation of --wait and --waitretry for more information.
{
static int first_retrieval = 1;
- if (first_retrieval && opt.random_wait)
- /* --random-wait uses the RNG, so seed it. */
- srand (time (NULL));
+ if (first_retrieval)
+ {
+ /* Don't sleep before the very first retrieval. */
+ first_retrieval = 0;
+ return;
+ }
- if (!first_retrieval && (opt.wait || opt.waitretry))
+ if (opt.waitretry && count > 1)
+ {
+ /* If opt.waitretry is specified and this is a retry, wait for
+ COUNT-1 number of seconds, or for opt.waitretry seconds. */
+ if (count <= opt.waitretry)
+ xsleep (count - 1);
+ else
+ xsleep (opt.waitretry);
+ }
+ else if (opt.wait)
{
- if (opt.waitretry && count > 1)
+ if (!opt.random_wait || count > 1)
+ /* If random-wait is not specified, or if we are sleeping
+ between retries of the same download, sleep the fixed
+ interval. */
+ xsleep (opt.wait);
+ else
{
- /* If opt.waitretry is specified and this is a retry, wait
- for COUNT-1 number of seconds, or for opt.waitretry
- seconds. */
- if (count <= opt.waitretry)
- sleep (count - 1);
- else
- sleep (opt.waitretry);
+ /* Sleep a random amount of time averaging in opt.wait
+ seconds. The sleeping amount ranges from 0 to
+ opt.wait*2, inclusive. */
+ double waitsecs = 2 * opt.wait * random_float ();
+ DEBUGP (("sleep_between_retrievals: avg=%f,sleep=%f\n",
+ opt.wait, waitsecs));
+ xsleep (waitsecs);
}
- else if (opt.wait)
- {
- /* Otherwise, check if opt.wait is specified. If so, sleep. */
- if (count > 1 || !opt.random_wait)
- sleep (opt.wait);
- else
- {
- int waitmax = 2 * opt.wait;
- /* This is equivalent to rand() % waitmax, but uses the
- high-order bits for better randomness. */
- int waitsecs = (double)waitmax * rand () / (RAND_MAX + 1.0);
+ }
+}
- DEBUGP (("sleep_between_retrievals: norm=%ld,fuzz=%ld,sleep=%d\n",
- opt.wait, waitsecs - opt.wait, waitsecs));
+/* Free the linked list of urlpos. */
+void
+free_urlpos (struct urlpos *l)
+{
+ while (l)
+ {
+ struct urlpos *next = l->next;
+ if (l->url)
+ url_free (l->url);
+ xfree_null (l->local_name);
+ xfree (l);
+ l = next;
+ }
+}
- if (waitsecs)
- sleep (waitsecs);
- }
- }
+/* Rotate FNAME opt.backups times */
+void
+rotate_backups(const char *fname)
+{
+ int maxlen = strlen (fname) + 1 + numdigit (opt.backups) + 1;
+ char *from = (char *)alloca (maxlen);
+ char *to = (char *)alloca (maxlen);
+ struct stat sb;
+ int i;
+
+ if (stat (fname, &sb) == 0)
+ if (S_ISREG (sb.st_mode) == 0)
+ return;
+
+ for (i = opt.backups; i > 1; i--)
+ {
+ sprintf (from, "%s.%d", fname, i - 1);
+ sprintf (to, "%s.%d", fname, i);
+ rename (from, to);
}
- if (first_retrieval)
- first_retrieval = 0;
+
+ sprintf (to, "%s.%d", fname, 1);
+ rename(fname, to);
+}
+
+static int no_proxy_match PARAMS ((const char *, const char **));
+
+/* Return the URL of the proxy appropriate for url U. */
+
+static char *
+getproxy (struct url *u)
+{
+ char *proxy = NULL;
+ char *rewritten_url;
+ static char rewritten_storage[1024];
+
+ if (!opt.use_proxy)
+ return NULL;
+ if (!no_proxy_match (u->host, (const char **)opt.no_proxy))
+ return NULL;
+
+ switch (u->scheme)
+ {
+ case SCHEME_HTTP:
+ proxy = opt.http_proxy ? opt.http_proxy : getenv ("http_proxy");
+ break;
+#ifdef HAVE_SSL
+ case SCHEME_HTTPS:
+ proxy = opt.https_proxy ? opt.https_proxy : getenv ("https_proxy");
+ break;
+#endif
+ case SCHEME_FTP:
+ proxy = opt.ftp_proxy ? opt.ftp_proxy : getenv ("ftp_proxy");
+ break;
+ case SCHEME_INVALID:
+ break;
+ }
+ if (!proxy || !*proxy)
+ return NULL;
+
+ /* Handle shorthands. `rewritten_storage' is a kludge to allow
+ getproxy() to return static storage. */
+ rewritten_url = rewrite_shorthand_url (proxy);
+ if (rewritten_url)
+ {
+ strncpy (rewritten_storage, rewritten_url, sizeof(rewritten_storage));
+ rewritten_storage[sizeof (rewritten_storage) - 1] = '\0';
+ proxy = rewritten_storage;
+ }
+
+ return proxy;
+}
+
+/* Should a host be accessed through proxy, concerning no_proxy? */
+int
+no_proxy_match (const char *host, const char **no_proxy)
+{
+ if (!no_proxy)
+ return 1;
+ else
+ return !sufmatch (no_proxy, host);
}