#include "connect.h"
#include "hash.h"
+#ifdef HAVE_SSL
+# include "gen_sslfunc.h" /* for ssl_iread */
+#endif
+
#ifndef errno
extern int errno;
#endif
int global_download_count;
\f
+static struct {
+ long bytes;
+ long dltime;
+} limit_data;
+
+static void
+limit_bandwidth_reset (void)
+{
+ limit_data.bytes = 0;
+ limit_data.dltime = 0;
+}
+
+/* Limit the bandwidth by pausing the download for an amount of time.
+ BYTES is the number of bytes received from the network, DELTA is
+ how long it took to receive them, DLTIME the current download time,
+ TIMER the timer, and ADJUSTMENT the previous. */
+
+static void
+limit_bandwidth (long bytes, long delta)
+{
+ long expected;
+
+ limit_data.bytes += bytes;
+ limit_data.dltime += delta;
+
+ expected = (long)(1000.0 * limit_data.bytes / opt.limit_rate);
+
+ if (expected > limit_data.dltime)
+ {
+ long slp = expected - limit_data.dltime;
+ if (slp < 200)
+ {
+ DEBUGP (("deferring a %ld ms sleep (%ld/%ld) until later.\n",
+ slp, limit_data.bytes, limit_data.dltime));
+ return;
+ }
+ DEBUGP (("sleeping %ld ms\n", slp));
+ usleep (1000 * slp);
+ }
+
+ limit_data.bytes = 0;
+ limit_data.dltime = 0;
+}
+
#define MIN(i, j) ((i) <= (j) ? (i) : (j))
/* Reads the contents of file descriptor FD, until it is closed, or a
from fd immediately, flush or discard the buffer. */
int
get_contents (int fd, FILE *fp, long *len, long restval, long expected,
- struct rbuf *rbuf, int use_expected)
+ struct rbuf *rbuf, int use_expected, long *elapsed)
{
int res = 0;
static char c[8192];
void *progress = NULL;
+ struct wget_timer *timer = wtimer_allocate ();
+ long dltime = 0, last_dltime = 0;
*len = restval;
+
if (opt.verbose)
progress = progress_create (restval, expected);
if (rbuf && RBUF_FD (rbuf) == fd)
{
- int need_flush = 0;
+ int sz = 0;
while ((res = rbuf_flush (rbuf, c, sizeof (c))) != 0)
{
- if (fwrite (c, sizeof (char), res, fp) < res)
- return -2;
- if (opt.verbose)
- progress_update (progress, res);
+ fwrite (c, sizeof (char), res, fp);
*len += res;
- need_flush = 1;
+ sz += res;
}
- if (need_flush)
+ if (sz)
fflush (fp);
if (ferror (fp))
- return -2;
+ {
+ res = -2;
+ goto out;
+ }
+ if (opt.verbose)
+ progress_update (progress, sz, 0);
}
+
+ if (opt.limit_rate)
+ limit_bandwidth_reset ();
+ wtimer_reset (timer);
+
/* Read from fd while there is available data.
Normally, if expected is 0, it means that it is not known how
? MIN (expected - *len, sizeof (c))
: sizeof (c));
#ifdef HAVE_SSL
- if (rbuf->ssl!=NULL) {
- res = ssl_iread (rbuf->ssl, c, amount_to_read);
- } else {
-#endif /* HAVE_SSL */
- res = iread (fd, c, amount_to_read);
-#ifdef HAVE_SSL
- }
+ if (rbuf->ssl!=NULL)
+ res = ssl_iread (rbuf->ssl, c, amount_to_read);
+ else
#endif /* HAVE_SSL */
+ res = iread (fd, c, amount_to_read);
+
if (res > 0)
{
fwrite (c, sizeof (char), res, fp);
packets typically won't be too tiny anyway. */
fflush (fp);
if (ferror (fp))
- return -2;
+ {
+ res = -2;
+ goto out;
+ }
+
+ /* If bandwidth is not limited, one call to wtimer_elapsed
+ is sufficient. */
+ dltime = wtimer_elapsed (timer);
+ if (opt.limit_rate)
+ {
+ limit_bandwidth (res, dltime - last_dltime);
+ dltime = wtimer_elapsed (timer);
+ last_dltime = dltime;
+ }
+
if (opt.verbose)
- progress_update (progress, res);
+ progress_update (progress, res, dltime);
*len += res;
}
else
}
if (res < -1)
res = -1;
+
+ out:
if (opt.verbose)
- progress_finish (progress);
+ progress_finish (progress, dltime);
+ if (elapsed)
+ *elapsed = dltime;
+ wtimer_delete (timer);
+
return res;
}
\f
&& no_proxy_match((u)->host, \
(const char **)opt.no_proxy))
-/* Retrieve the given URL. Decides which loop to call -- HTTP(S), FTP,
- or simply copy it with file:// (#### the latter not yet
- implemented!). */
+/* Maximum number of allowed redirections. 20 was chosen as a
+ "reasonable" value, which is low enough to not cause havoc, yet
+ high enough to guarantee that normal retrievals will not be hurt by
+ the check. */
+
+#define MAX_REDIRECTIONS 20
+
+/* Retrieve the given URL. Decides which loop to call -- HTTP, FTP,
+ FTP, proxy, etc. */
+
uerr_t
retrieve_url (const char *origurl, char **file, char **newloc,
const char *refurl, int *dt)
int up_error_code; /* url parse error code */
char *local_file;
struct hash_table *redirections = NULL;
+ int redirection_count = 0;
/* If dt is NULL, just ignore it. */
if (!dt)
opt.recursive = 0;
result = ftp_loop (u, dt);
opt.recursive = oldrec;
-#if 0
+
/* There is a possibility of having HTTP being redirected to
FTP. In these cases we must decide whether the text is HTML
according to the suffix. The HTML suffixes are `.html' and
`.htm', case-insensitive. */
- if (redirections && u->local && (u->scheme == SCHEME_FTP))
+ if (redirections && local_file && u->scheme == SCHEME_FTP)
{
- char *suf = suffix (u->local);
+ char *suf = suffix (local_file);
if (suf && (!strcasecmp (suf, "html") || !strcasecmp (suf, "htm")))
*dt |= TEXTHTML;
}
-#endif
}
location_changed = (result == NEWLOCATION);
if (location_changed)
string_set_add (redirections, u->url);
}
- /* The new location is OK. Check for redirection cycle by
+ /* The new location is OK. Check for max. number of
+ redirections. */
+ if (++redirection_count > MAX_REDIRECTIONS)
+ {
+ logprintf (LOG_NOTQUIET, _("%d redirections exceeded.\n"),
+ MAX_REDIRECTIONS);
+ url_free (newloc_parsed);
+ url_free (u);
+ if (redirections)
+ string_set_free (redirections);
+ xfree (url);
+ xfree (mynewloc);
+ return WRONGCODE;
+ }
+
+ /*Check for redirection cycle by
peeking through the history of redirections. */
if (string_set_contains (redirections, newloc_parsed->url))
{
{
if (*dt & RETROKF)
{
- register_download (url, local_file);
+ register_download (u->url, local_file);
if (redirections)
- register_all_redirections (redirections, url);
+ register_all_redirections (redirections, u->url);
if (*dt & TEXTHTML)
- register_html (url, local_file);
+ register_html (u->url, local_file);
}
}
FREE_MAYBE (local_file);
url_free (u);
- if (redirections)
- string_set_free (redirections);
- if (newloc)
- *newloc = url;
+ if (redirections)
+ {
+ string_set_free (redirections);
+ if (newloc)
+ *newloc = url;
+ else
+ xfree (url);
+ }
else
- xfree (url);
+ {
+ if (newloc)
+ *newloc = NULL;
+ xfree (url);
+ }
++global_download_count;
uerr_t status;
struct urlpos *url_list, *cur_url;
- url_list = (html ? get_urls_html (file, NULL, FALSE, NULL)
+ url_list = (html ? get_urls_html (file, NULL, NULL)
: get_urls_file (file));
status = RETROK; /* Suppose everything is OK. */
*count = 0; /* Reset the URL count. */
for (cur_url = url_list; cur_url; cur_url = cur_url->next, ++*count)
{
- char *filename = NULL, *new_file;
+ char *filename = NULL, *new_file = NULL;
int dt;
if (cur_url->ignore_when_downloading)
{
static int first_retrieval = 1;
+ if (first_retrieval && opt.random_wait)
+ /* --random-wait uses the RNG, so seed it. */
+ srand (time (NULL));
+
if (!first_retrieval && (opt.wait || opt.waitretry))
{
if (opt.waitretry && count > 1)
sleep (opt.waitretry);
}
else if (opt.wait)
- /* Otherwise, check if opt.wait is specified. If so, sleep. */
- sleep (opt.wait);
+ {
+ /* Otherwise, check if opt.wait is specified. If so, sleep. */
+ if (count > 1 || !opt.random_wait)
+ sleep (opt.wait);
+ else
+ {
+ int waitmax = 2 * opt.wait;
+ /* This is equivalent to rand() % waitmax, but uses the
+ high-order bits for better randomness. */
+ int waitsecs = (double)waitmax * rand () / (RAND_MAX + 1.0);
+
+ DEBUGP (("sleep_between_retrievals: norm=%ld,fuzz=%ld,sleep=%d\n",
+ opt.wait, waitsecs - opt.wait, waitsecs));
+
+ if (waitsecs)
+ sleep (waitsecs);
+ }
+ }
}
if (first_retrieval)
first_retrieval = 0;