2 Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU Wget.
6 GNU Wget is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 GNU Wget is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with Wget; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
24 #include <sys/types.h>
27 #endif /* HAVE_UNISTD_H */
33 #endif /* HAVE_STRING_H */
51 /* See the comment in gethttp() why this is needed. */
52 int global_download_count;
55 #define MIN(i, j) ((i) <= (j) ? (i) : (j))
57 /* Reads the contents of file descriptor FD, until it is closed, or a
58 read error occurs. The data is read in 8K chunks, and stored to
59 stream fp, which should have been open for writing. If BUF is
60 non-NULL and its file descriptor is equal to FD, flush RBUF first.
61 This function will *not* use the rbuf_* functions!
63 The EXPECTED argument is passed to show_progress() unchanged, but
66 If opt.verbose is set, the progress is also shown. RESTVAL
67 represents a value from which to start downloading (which will be
68 shown accordingly). If RESTVAL is non-zero, the stream should have
69 been open for appending.
71 The function exits and returns codes of 0, -1 and -2 if the
72 connection was closed, there was a read error, or if it could not
73 write to the output stream, respectively.
75 IMPORTANT: The function flushes the contents of the buffer in
76 rbuf_flush() before actually reading from fd. If you wish to read
77 from fd immediately, flush or discard the buffer. */
79 get_contents (int fd, FILE *fp, long *len, long restval, long expected,
80 struct rbuf *rbuf, int use_expected)
84 void *progress = NULL;
88 progress = progress_create (restval, expected);
90 if (rbuf && RBUF_FD (rbuf) == fd)
93 while ((res = rbuf_flush (rbuf, c, sizeof (c))) != 0)
95 if (fwrite (c, sizeof (char), res, fp) < res)
98 progress_update (progress, res);
107 /* Read from fd while there is available data.
109 Normally, if expected is 0, it means that it is not known how
110 much data is expected. However, if use_expected is specified,
111 then expected being zero means exactly that. */
112 while (!use_expected || (*len < expected))
114 int amount_to_read = (use_expected
115 ? MIN (expected - *len, sizeof (c))
118 if (rbuf->ssl!=NULL) {
119 res = ssl_iread (rbuf->ssl, c, amount_to_read);
121 #endif /* HAVE_SSL */
122 res = iread (fd, c, amount_to_read);
125 #endif /* HAVE_SSL */
128 fwrite (c, sizeof (char), res, fp);
129 /* Always flush the contents of the network packet. This
130 should not be adverse to performance, as the network
131 packets typically won't be too tiny anyway. */
136 progress_update (progress, res);
145 progress_finish (progress);
149 /* Return a printed representation of the download rate, as
150 appropriate for the speed. If PAD is non-zero, strings will be
151 padded to the width of 7 characters (xxxx.xx). */
153 retr_rate (long bytes, long msecs, int pad)
156 static char *rate_names[] = {"B/s", "KB/s", "MB/s", "GB/s" };
159 double dlrate = calc_rate (bytes, msecs, &units);
160 sprintf (res, pad ? "%7.2f %s" : "%.2f %s", dlrate, rate_names[units]);
165 /* Calculate the download rate and trim it as appropriate for the
166 speed. Appropriate means that if rate is greater than 1K/s,
167 kilobytes are used, and if rate is greater than 1MB/s, megabytes
170 UNITS is zero for B/s, one for KB/s, two for MB/s, and three for
173 calc_rate (long bytes, long msecs, int *units)
181 /* If elapsed time is 0, it means we're under the granularity of
182 the timer. This often happens on systems that use time() for
184 msecs = wtimer_granularity ();
186 dlrate = (double)1000 * bytes / msecs;
189 else if (dlrate < 1024.0 * 1024.0)
190 *units = 1, dlrate /= 1024.0;
191 else if (dlrate < 1024.0 * 1024.0 * 1024.0)
192 *units = 2, dlrate /= (1024.0 * 1024.0);
194 /* Maybe someone will need this one day. More realistically, it
195 will get tickled by buggy timers. */
196 *units = 3, dlrate /= (1024.0 * 1024.0 * 1024.0);
202 register_redirections_mapper (void *key, void *value, void *arg)
204 const char *redirected_from = (const char *)key;
205 const char *redirected_to = (const char *)arg;
206 if (0 != strcmp (redirected_from, redirected_to))
207 register_redirection (redirected_from, redirected_to);
211 /* Register the redirections that lead to the successful download of
212 this URL. This is necessary so that the link converter can convert
213 redirected URLs to the local file. */
216 register_all_redirections (struct hash_table *redirections, const char *final)
218 hash_table_map (redirections, register_redirections_mapper, (void *)final);
221 #define USE_PROXY_P(u) (opt.use_proxy && getproxy((u)->scheme) \
222 && no_proxy_match((u)->host, \
223 (const char **)opt.no_proxy))
225 /* Retrieve the given URL. Decides which loop to call -- HTTP(S), FTP,
226 or simply copy it with file:// (#### the latter not yet
229 retrieve_url (const char *origurl, char **file, char **newloc,
230 const char *refurl, int *dt)
234 int location_changed, dummy;
236 char *mynewloc, *proxy;
238 int up_error_code; /* url parse error code */
240 struct hash_table *redirections = NULL;
242 /* If dt is NULL, just ignore it. */
245 url = xstrdup (origurl);
251 u = url_parse (url, &up_error_code);
254 logprintf (LOG_NOTQUIET, "%s: %s.\n", url, url_error (up_error_code));
256 string_set_free (redirections);
262 refurl = opt.referer;
270 use_proxy = USE_PROXY_P (u);
273 struct url *proxy_url;
275 /* Get the proxy server for the current scheme. */
276 proxy = getproxy (u->scheme);
279 logputs (LOG_NOTQUIET, _("Could not find proxy host.\n"));
282 string_set_free (redirections);
287 /* Parse the proxy URL. */
288 proxy_url = url_parse (proxy, &up_error_code);
291 logprintf (LOG_NOTQUIET, _("Error parsing proxy URL %s: %s.\n"),
292 proxy, url_error (up_error_code));
294 string_set_free (redirections);
298 if (proxy_url->scheme != SCHEME_HTTP)
300 logprintf (LOG_NOTQUIET, _("Error in proxy URL %s: Must be HTTP.\n"), proxy);
301 url_free (proxy_url);
303 string_set_free (redirections);
308 result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url);
309 url_free (proxy_url);
311 else if (u->scheme == SCHEME_HTTP
313 || u->scheme == SCHEME_HTTPS
317 result = http_loop (u, &mynewloc, &local_file, refurl, dt, NULL);
319 else if (u->scheme == SCHEME_FTP)
321 /* If this is a redirection, we must not allow recursive FTP
322 retrieval, so we save recursion to oldrec, and restore it
324 int oldrec = opt.recursive;
327 result = ftp_loop (u, dt);
328 opt.recursive = oldrec;
330 /* There is a possibility of having HTTP being redirected to
331 FTP. In these cases we must decide whether the text is HTML
332 according to the suffix. The HTML suffixes are `.html' and
333 `.htm', case-insensitive. */
334 if (redirections && u->local && (u->scheme == SCHEME_FTP))
336 char *suf = suffix (u->local);
337 if (suf && (!strcasecmp (suf, "html") || !strcasecmp (suf, "htm")))
343 location_changed = (result == NEWLOCATION);
344 if (location_changed)
346 char *construced_newloc;
347 struct url *newloc_parsed;
349 assert (mynewloc != NULL);
354 /* The HTTP specs only allow absolute URLs to appear in
355 redirects, but a ton of boneheaded webservers and CGIs out
356 there break the rules and use relative URLs, and popular
357 browsers are lenient about this, so wget should be too. */
358 construced_newloc = uri_merge (url, mynewloc);
360 mynewloc = construced_newloc;
362 /* Now, see if this new location makes sense. */
363 newloc_parsed = url_parse (mynewloc, &up_error_code);
366 logprintf (LOG_NOTQUIET, "%s: %s.\n", mynewloc,
367 url_error (up_error_code));
370 string_set_free (redirections);
376 /* Now mynewloc will become newloc_parsed->url, because if the
377 Location contained relative paths like .././something, we
378 don't want that propagating as url. */
380 mynewloc = xstrdup (newloc_parsed->url);
384 redirections = make_string_hash_table (0);
385 /* Add current URL immediately so we can detect it as soon
386 as possible in case of a cycle. */
387 string_set_add (redirections, u->url);
390 /* The new location is OK. Check for redirection cycle by
391 peeking through the history of redirections. */
392 if (string_set_contains (redirections, newloc_parsed->url))
394 logprintf (LOG_NOTQUIET, _("%s: Redirection cycle detected.\n"),
396 url_free (newloc_parsed);
399 string_set_free (redirections);
404 string_set_add (redirections, newloc_parsed->url);
417 register_download (url, local_file);
419 register_all_redirections (redirections, url);
421 register_html (url, local_file);
426 *file = local_file ? local_file : NULL;
428 FREE_MAYBE (local_file);
432 string_set_free (redirections);
439 ++global_download_count;
444 /* Find the URLs in the file and call retrieve_url() for each of
445 them. If HTML is non-zero, treat the file as HTML, and construct
446 the URLs accordingly.
448 If opt.recursive is set, call recursive_retrieve() for each file. */
450 retrieve_from_file (const char *file, int html, int *count)
453 struct urlpos *url_list, *cur_url;
455 url_list = (html ? get_urls_html (file, NULL, FALSE, NULL)
456 : get_urls_file (file));
457 status = RETROK; /* Suppose everything is OK. */
458 *count = 0; /* Reset the URL count. */
460 for (cur_url = url_list; cur_url; cur_url = cur_url->next, ++*count)
462 char *filename = NULL, *new_file;
465 if (downloaded_exceeds_quota ())
470 if (opt.recursive && cur_url->url->scheme != SCHEME_FTP)
471 status = retrieve_tree (cur_url->url->url);
473 status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, &dt);
475 if (filename && opt.delete_after && file_exists_p (filename))
477 DEBUGP (("Removing file due to --delete-after in"
478 " retrieve_from_file():\n"));
479 logprintf (LOG_VERBOSE, _("Removing %s.\n"), filename);
480 if (unlink (filename))
481 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
485 FREE_MAYBE (new_file);
486 FREE_MAYBE (filename);
489 /* Free the linked list of URL-s. */
490 free_urlpos (url_list);
495 /* Print `giving up', or `retrying', depending on the impending
496 action. N1 and N2 are the attempt number and the attempt limit. */
498 printwhat (int n1, int n2)
500 logputs (LOG_VERBOSE, (n1 == n2) ? _("Giving up.\n\n") : _("Retrying.\n\n"));
503 /* Increment opt.downloaded by BY_HOW_MUCH. If an overflow occurs,
504 set opt.downloaded_overflow to 1. */
506 downloaded_increase (unsigned long by_how_much)
509 if (opt.downloaded_overflow)
511 old = opt.downloaded;
512 opt.downloaded += by_how_much;
513 if (opt.downloaded < old) /* carry flag, where are you when I
517 opt.downloaded_overflow = 1;
518 opt.downloaded = ~((VERY_LONG_TYPE)0);
522 /* Return non-zero if the downloaded amount of bytes exceeds the
523 desired quota. If quota is not set or if the amount overflowed, 0
526 downloaded_exceeds_quota (void)
530 if (opt.downloaded_overflow)
531 /* We don't really know. (Wildly) assume not. */
534 return opt.downloaded > opt.quota;
537 /* If opt.wait or opt.waitretry are specified, and if certain
538 conditions are met, sleep the appropriate number of seconds. See
539 the documentation of --wait and --waitretry for more information.
541 COUNT is the count of current retrieval, beginning with 1. */
544 sleep_between_retrievals (int count)
546 static int first_retrieval = 1;
548 if (!first_retrieval && (opt.wait || opt.waitretry))
550 if (opt.waitretry && count > 1)
552 /* If opt.waitretry is specified and this is a retry, wait
553 for COUNT-1 number of seconds, or for opt.waitretry
555 if (count <= opt.waitretry)
558 sleep (opt.waitretry);
561 /* Otherwise, check if opt.wait is specified. If so, sleep. */