#include <stdio.h>
#include <stdlib.h>
-#include <sys/types.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif /* HAVE_UNISTD_H */
#include <errno.h>
-#ifdef HAVE_STRING_H
-# include <string.h>
-#else
-# include <strings.h>
-#endif /* HAVE_STRING_H */
+#include <string.h>
#include <assert.h>
#include "wget.h"
#include "url.h"
#include "recur.h"
#include "ftp.h"
+#include "http.h"
#include "host.h"
#include "connect.h"
#include "hash.h"
#include "convert.h"
#include "ptimer.h"
-#ifdef HAVE_SSL
-# include "gen_sslfunc.h" /* for ssl_iread */
-#endif
-
-#ifndef errno
-extern int errno;
-#endif
-
/* Total size of downloaded files. Used to enforce quota. */
-LARGE_INT total_downloaded_bytes;
+SUM_SIZE_INT total_downloaded_bytes;
+
+/* Total download time in milliseconds. */
+double total_download_time;
/* If non-NULL, the stream to which output should be written. This
stream is initialized when `-O' is used. */
/* Whether output_document is a regular file we can manipulate,
i.e. not `-' or a device file. */
-int output_stream_regular;
+bool output_stream_regular;
\f
static struct {
wgint chunk_bytes;
{
limit_data.chunk_bytes = 0;
limit_data.chunk_start = 0;
+ limit_data.sleep_adjust = 0;
}
/* Limit the bandwidth by pausing the download for an amount of time.
desired and the actual sleep, and adjust the next sleep by
that amount. */
limit_data.sleep_adjust = slp - (t1 - t0);
+ /* If sleep_adjust is very large, it's likely due to suspension
+ and not clock inaccuracy. Don't enforce those. */
+ if (limit_data.sleep_adjust > 500)
+ limit_data.sleep_adjust = 500;
+ else if (limit_data.sleep_adjust < -500)
+ limit_data.sleep_adjust = -500;
}
limit_data.chunk_bytes = 0;
continually update the display. When true, smaller timeout
values are used so that the gauge can update the display when
data arrives slowly. */
- int progress_interactive = 0;
+ bool progress_interactive = false;
- int exact = flags & rb_read_exactly;
+ bool exact = !!(flags & rb_read_exactly);
wgint skip = 0;
/* How much data we've read/written. */
}
ret = fd_read (fd, dlbuf, rdsize, tmout);
- if (ret == 0 || (ret < 0 && errno != ETIMEDOUT))
- break; /* read error */
- else if (ret < 0)
- ret = 0; /* read timeout */
+ if (progress_interactive && ret < 0 && errno == ETIMEDOUT)
+ ret = 0; /* interactive timeout, handled above */
+ else if (ret <= 0)
+ break; /* EOF or read error */
if (progress || opt.limit_rate)
{
return fd_read_hunk (fd, line_terminator, 128, FD_READ_LINE_MAX);
}
\f
-/* Return a printed representation of the download rate, as
- appropriate for the speed. If PAD is non-zero, strings will be
- padded to the width of 7 characters (xxxx.xx). */
-char *
-retr_rate (wgint bytes, double msecs, int pad)
+/* Return a printed representation of the download rate, along with
+ the units appropriate for the download speed. */
+
+const char *
+retr_rate (wgint bytes, double msecs)
{
static char res[20];
static const char *rate_names[] = {"B/s", "KB/s", "MB/s", "GB/s" };
int units = 0;
double dlrate = calc_rate (bytes, msecs, &units);
- sprintf (res, pad ? "%7.2f %s" : "%.2f %s", dlrate, rate_names[units]);
+ sprintf (res, "%.2f %s", dlrate, rate_names[units]);
return res;
}
if (msecs == 0)
/* If elapsed time is exactly zero, it means we're under the
- granularity of the timer. This can easily happen on systems
+ resolution of the timer. This can easily happen on systems
that use time() for the timer. Since the interval lies between
- 0 and the timer's granularity, assume half the granularity. */
- msecs = ptimer_granularity () / 2.0;
+ 0 and the timer's resolution, assume half the resolution. */
+ msecs = ptimer_resolution () / 2.0;
dlrate = 1000.0 * bytes / msecs;
if (dlrate < 1024.0)
#define MAX_REDIRECTIONS 20
#define SUSPEND_POST_DATA do { \
- post_data_suspended = 1; \
+ post_data_suspended = true; \
saved_post_data = opt.post_data; \
saved_post_file_name = opt.post_file_name; \
opt.post_data = NULL; \
{ \
opt.post_data = saved_post_data; \
opt.post_file_name = saved_post_file_name; \
- post_data_suspended = 0; \
+ post_data_suspended = false; \
} \
} while (0)
-static char *getproxy PARAMS ((struct url *));
+static char *getproxy (struct url *);
/* Retrieve the given URL. Decides which loop to call -- HTTP, FTP,
FTP, proxy, etc. */
{
uerr_t result;
char *url;
- int location_changed, dummy;
+ bool location_changed;
+ int dummy;
char *mynewloc, *proxy;
struct url *u, *proxy_url;
int up_error_code; /* url parse error code */
char *local_file;
int redirection_count = 0;
- int post_data_suspended = 0;
+ bool post_data_suspended = false;
char *saved_post_data = NULL;
char *saved_post_file_name = NULL;
}
else if (u->scheme == SCHEME_FTP)
{
- /* If this is a redirection, we must not allow recursive FTP
- retrieval, so we save recursion to oldrec, and restore it
- later. */
- int oldrec = opt.recursive;
+ /* If this is a redirection, temporarily turn off opt.ftp_glob
+ and opt.recursive, both being undesirable when following
+ redirects. */
+ bool oldrec = opt.recursive, oldglob = opt.ftp_glob;
if (redirection_count)
- opt.recursive = 0;
+ opt.recursive = opt.ftp_glob = false;
+
result = ftp_loop (u, dt, proxy_url);
opt.recursive = oldrec;
+ opt.ftp_glob = oldglob;
/* There is a possibility of having HTTP being redirected to
FTP. In these cases we must decide whether the text is HTML
return result;
}
-/* Find the URLs in the file and call retrieve_url() for each of
- them. If HTML is non-zero, treat the file as HTML, and construct
- the URLs accordingly.
+/* Find the URLs in the file and call retrieve_url() for each of them.
+ If HTML is true, treat the file as HTML, and construct the URLs
+ accordingly.
If opt.recursive is set, call retrieve_tree() for each file. */
uerr_t
-retrieve_from_file (const char *file, int html, int *count)
+retrieve_from_file (const char *file, bool html, int *count)
{
uerr_t status;
struct urlpos *url_list, *cur_url;
if (filename && opt.delete_after && file_exists_p (filename))
{
- DEBUGP (("Removing file due to --delete-after in"
- " retrieve_from_file():\n"));
+ DEBUGP (("\
+Removing file due to --delete-after in retrieve_from_file():\n"));
logprintf (LOG_VERBOSE, _("Removing %s.\n"), filename);
if (unlink (filename))
logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
void
sleep_between_retrievals (int count)
{
- static int first_retrieval = 1;
+ static bool first_retrieval = true;
if (first_retrieval)
{
/* Don't sleep before the very first retrieval. */
- first_retrieval = 0;
+ first_retrieval = false;
return;
}
rename(fname, to);
}
-static int no_proxy_match PARAMS ((const char *, const char **));
+static bool no_proxy_match (const char *, const char **);
/* Return the URL of the proxy appropriate for url U. */
}
/* Should a host be accessed through proxy, concerning no_proxy? */
-int
+static bool
no_proxy_match (const char *host, const char **no_proxy)
{
if (!no_proxy)
- return 1;
+ return true;
else
return !sufmatch (no_proxy, host);
}