1 /* Conversion of links to local files.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free
3 Software Foundation, Inc.
5 This file is part of GNU Wget.
7 GNU Wget is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 GNU Wget is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Wget. If not, see <http://www.gnu.org/licenses/>.
20 Additional permission under GNU GPL version 3 section 7
22 If you modify this program, or any covered work, by linking or
23 combining it with the OpenSSL project's OpenSSL library (or a
24 modified version of that library), containing parts covered by the
25 terms of the OpenSSL or SSLeay licenses, the Free Software Foundation
26 grants you additional permission to convey the resulting work.
27 Corresponding Source for a non-source form of such a combination
28 shall include the source code for the parts of OpenSSL used as well
29 as that of the covered work. */
38 #endif /* HAVE_UNISTD_H */
52 static struct hash_table *dl_file_url_map;
53 struct hash_table *dl_url_file_map;
55 /* Set of HTML/CSS files downloaded in this Wget run, used for link
56 conversion after Wget is done. */
57 struct hash_table *downloaded_html_set;
58 struct hash_table *downloaded_css_set;
60 static void convert_links (const char *, struct urlpos *);
64 convert_links_in_hashtable (struct hash_table *downloaded_set,
75 cnt = hash_table_count (downloaded_set);
78 file_array = alloca_array (char *, cnt);
79 string_set_to_array (downloaded_set, file_array);
81 for (i = 0; i < cnt; i++)
83 struct urlpos *urls, *cur_url;
85 char *file = file_array[i];
87 /* Determine the URL of the file. get_urls_{html,css} will need
89 url = hash_table_get (dl_file_url_map, file);
92 DEBUGP (("Apparently %s has been removed.\n", file));
96 DEBUGP (("Scanning %s (from %s)\n", file, url));
98 /* Parse the file... */
99 urls = is_css ? get_urls_css_file (file, url) :
100 get_urls_html (file, url, NULL, NULL);
102 /* We don't respect meta_disallow_follow here because, even if
103 the file is not followed, we might still want to convert the
104 links that have been followed from other files. */
106 for (cur_url = urls; cur_url; cur_url = cur_url->next)
112 if (cur_url->link_base_p)
114 /* Base references have been resolved by our parser, so
115 we turn the base URL into an empty string. (Perhaps
116 we should remove the tag entirely?) */
117 cur_url->convert = CO_NULLIFY_BASE;
121 /* We decide the direction of conversion according to whether
122 a URL was downloaded. Downloaded URLs will be converted
123 ABS2REL, whereas non-downloaded will be converted REL2ABS. */
126 set_uri_encoding (pi, opt.locale, true);
128 u = url_parse (cur_url->url->url, NULL, pi, true);
129 local_name = hash_table_get (dl_url_file_map, u->url);
131 /* Decide on the conversion type. */
134 /* We've downloaded this URL. Convert it to relative
135 form. We do this even if the URL already is in
136 relative form, because our directory structure may
137 not be identical to that on the server (think `-nd',
138 `--cut-dirs', etc.) */
139 cur_url->convert = CO_CONVERT_TO_RELATIVE;
140 cur_url->local_name = xstrdup (local_name);
141 DEBUGP (("will convert url %s to local %s\n", u->url, local_name));
145 /* We haven't downloaded this URL. If it's not already
146 complete (including a full host name), convert it to
147 that form, so it can be reached while browsing this
149 if (!cur_url->link_complete_p)
150 cur_url->convert = CO_CONVERT_TO_COMPLETE;
151 cur_url->local_name = NULL;
152 DEBUGP (("will convert url %s to complete\n", u->url));
159 /* Convert the links in the file. */
160 convert_links (file, urls);
168 /* This function is called when the retrieval is done to convert the
169 links that have been downloaded. It has to be called at the end of
170 the retrieval, because only then does Wget know conclusively which
171 URLs have been downloaded, and which not, so it can tell which
172 direction to convert to.
174 The "direction" means that the URLs to the files that have been
175 downloaded get converted to the relative URL which will point to
176 that file. And the other URLs get converted to the remote URL on
179 All the downloaded HTMLs are kept in downloaded_html_files, and
180 downloaded URLs in urls_downloaded. All the information is
181 extracted from these two lists. */
184 convert_all_links (void)
189 struct ptimer *timer = ptimer_new ();
191 convert_links_in_hashtable (downloaded_html_set, 0, &file_count);
192 convert_links_in_hashtable (downloaded_css_set, 1, &file_count);
194 secs = ptimer_measure (timer);
195 logprintf (LOG_VERBOSE, _("Converted %d files in %s seconds.\n"),
196 file_count, print_decimal (secs));
198 ptimer_destroy (timer);
201 static void write_backup_file (const char *, downloaded_file_t);
202 static const char *replace_plain (const char*, int, FILE*, const char *);
203 static const char *replace_attr (const char *, int, FILE *, const char *);
204 static const char *replace_attr_refresh_hack (const char *, int, FILE *,
206 static char *local_quote_string (const char *);
207 static char *construct_relative (const char *, const char *);
209 /* Change the links in one file. LINKS is a list of links in the
210 document, along with their positions and the desired direction of
213 convert_links (const char *file, struct urlpos *links)
215 struct file_memory *fm;
218 downloaded_file_t downloaded_file_return;
221 int to_url_count = 0, to_file_count = 0;
223 logprintf (LOG_VERBOSE, _("Converting %s... "), file);
226 /* First we do a "dry run": go through the list L and see whether
227 any URL needs to be converted in the first place. If not, just
228 leave the file alone. */
231 for (dry = links; dry; dry = dry->next)
232 if (dry->convert != CO_NOCONVERT)
236 logputs (LOG_VERBOSE, _("nothing to do.\n"));
241 fm = wget_read_file (file);
244 logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
245 file, strerror (errno));
249 downloaded_file_return = downloaded_file (CHECK_FOR_FILE, file);
250 if (opt.backup_converted && downloaded_file_return)
251 write_backup_file (file, downloaded_file_return);
253 /* Before opening the file for writing, unlink the file. This is
254 important if the data in FM is mmaped. In such case, nulling the
255 file, which is what fopen() below does, would make us read all
256 zeroes from the mmaped region. */
257 if (unlink (file) < 0 && errno != ENOENT)
259 logprintf (LOG_NOTQUIET, _("Unable to delete %s: %s\n"),
260 quote (file), strerror (errno));
261 wget_read_file_free (fm);
264 /* Now open the file for writing. */
265 fp = fopen (file, "wb");
268 logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
269 file, strerror (errno));
270 wget_read_file_free (fm);
274 /* Here we loop through all the URLs in file, replacing those of
275 them that are downloaded with relative references. */
277 for (link = links; link; link = link->next)
279 char *url_start = fm->content + link->pos;
281 if (link->pos >= fm->length)
283 DEBUGP (("Something strange is going on. Please investigate."));
286 /* If the URL is not to be converted, skip it. */
287 if (link->convert == CO_NOCONVERT)
289 DEBUGP (("Skipping %s at position %d.\n", link->url->url, link->pos));
293 /* Echo the file contents, up to the offending URL's opening
294 quote, to the outfile. */
295 fwrite (p, 1, url_start - p, fp);
298 switch (link->convert)
300 case CO_CONVERT_TO_RELATIVE:
301 /* Convert absolute URL to relative. */
303 char *newname = construct_relative (file, link->local_name);
304 char *quoted_newname = local_quote_string (newname);
306 if (link->link_css_p)
307 p = replace_plain (p, link->size, fp, quoted_newname);
308 else if (!link->link_refresh_p)
309 p = replace_attr (p, link->size, fp, quoted_newname);
311 p = replace_attr_refresh_hack (p, link->size, fp, quoted_newname,
312 link->refresh_timeout);
314 DEBUGP (("TO_RELATIVE: %s to %s at position %d in %s.\n",
315 link->url->url, newname, link->pos, file));
317 xfree (quoted_newname);
321 case CO_CONVERT_TO_COMPLETE:
322 /* Convert the link to absolute URL. */
324 char *newlink = link->url->url;
325 char *quoted_newlink = html_quote_string (newlink);
327 if (link->link_css_p)
328 p = replace_plain (p, link->size, fp, quoted_newlink);
329 else if (!link->link_refresh_p)
330 p = replace_attr (p, link->size, fp, quoted_newlink);
332 p = replace_attr_refresh_hack (p, link->size, fp, quoted_newlink,
333 link->refresh_timeout);
335 DEBUGP (("TO_COMPLETE: <something> to %s at position %d in %s.\n",
336 newlink, link->pos, file));
337 xfree (quoted_newlink);
341 case CO_NULLIFY_BASE:
342 /* Change the base href to "". */
343 p = replace_attr (p, link->size, fp, "");
351 /* Output the rest of the file. */
352 if (p - fm->content < fm->length)
353 fwrite (p, 1, fm->length - (p - fm->content), fp);
355 wget_read_file_free (fm);
357 logprintf (LOG_VERBOSE, "%d-%d\n", to_file_count, to_url_count);
360 /* Construct and return a link that points from BASEFILE to LINKFILE.
361 Both files should be local file names, BASEFILE of the referrering
362 file, and LINKFILE of the referred file.
366 cr("foo", "bar") -> "bar"
367 cr("A/foo", "A/bar") -> "bar"
368 cr("A/foo", "A/B/bar") -> "B/bar"
369 cr("A/X/foo", "A/Y/bar") -> "../Y/bar"
370 cr("X/", "Y/bar") -> "../Y/bar" (trailing slash does matter in BASE)
372 Both files should be absolute or relative, otherwise strange
373 results might ensue. The function makes no special efforts to
374 handle "." and ".." in links, so make sure they're not there
375 (e.g. using path_simplify). */
378 construct_relative (const char *basefile, const char *linkfile)
385 /* First, skip the initial directory components common to both
388 for (b = basefile, l = linkfile; *b == *l && *b != '\0'; ++b, ++l)
391 start = (b - basefile) + 1;
396 /* With common directories out of the way, the situation we have is
398 b - b1/b2/[...]/bfile
399 l - l1/l2/[...]/lfile
401 The link we're constructing needs to be:
402 lnk - ../../l1/l2/[...]/lfile
404 Where the number of ".."'s equals the number of bN directory
407 /* Count the directory components in B. */
409 for (b = basefile; *b; b++)
415 /* Construct LINK as explained above. */
416 link = xmalloc (3 * basedirs + strlen (linkfile) + 1);
417 for (i = 0; i < basedirs; i++)
418 memcpy (link + 3 * i, "../", 3);
419 strcpy (link + 3 * i, linkfile);
423 /* Used by write_backup_file to remember which files have been
425 static struct hash_table *converted_files;
428 write_backup_file (const char *file, downloaded_file_t downloaded_file_return)
430 /* Rather than just writing over the original .html file with the
431 converted version, save the former to *.orig. Note we only do
432 this for files we've _successfully_ downloaded, so we don't
433 clobber .orig files sitting around from previous invocations.
434 On VMS, use "_orig" instead of ".orig". See "wget.h". */
436 /* Construct the backup filename as the original name plus ".orig". */
437 size_t filename_len = strlen (file);
438 char* filename_plus_orig_suffix;
440 /* TODO: hack this to work with css files */
441 if (downloaded_file_return == FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED)
443 /* Just write "orig" over "html". We need to do it this way
444 because when we're checking to see if we've downloaded the
445 file before (to see if we can skip downloading it), we don't
446 know if it's a text/html file. Therefore we don't know yet
447 at that stage that -E is going to cause us to tack on
448 ".html", so we need to compare vs. the original URL plus
449 ".orig", not the original URL plus ".html.orig". */
450 filename_plus_orig_suffix = alloca (filename_len + 1);
451 strcpy (filename_plus_orig_suffix, file);
452 strcpy ((filename_plus_orig_suffix + filename_len) - 4, "orig");
454 else /* downloaded_file_return == FILE_DOWNLOADED_NORMALLY */
456 /* Append ".orig" to the name. */
457 filename_plus_orig_suffix = alloca (filename_len + sizeof (ORIG_SFX));
458 strcpy (filename_plus_orig_suffix, file);
459 strcpy (filename_plus_orig_suffix + filename_len, ORIG_SFX);
462 if (!converted_files)
463 converted_files = make_string_hash_table (0);
465 /* We can get called twice on the same URL thanks to the
466 convert_all_links() call in main(). If we write the .orig file
467 each time in such a case, it'll end up containing the first-pass
468 conversion, not the original file. So, see if we've already been
469 called on this file. */
470 if (!string_set_contains (converted_files, file))
472 /* Rename <file> to <file>.orig before former gets written over. */
473 if (rename (file, filename_plus_orig_suffix) != 0)
474 logprintf (LOG_NOTQUIET, _("Cannot back up %s as %s: %s\n"),
475 file, filename_plus_orig_suffix, strerror (errno));
477 /* Remember that we've already written a .orig backup for this file.
478 Note that we never free this memory since we need it till the
479 convert_all_links() call, which is one of the last things the
480 program does before terminating. BTW, I'm not sure if it would be
481 safe to just set 'converted_file_ptr->string' to 'file' below,
482 rather than making a copy of the string... Another note is that I
483 thought I could just add a field to the urlpos structure saying
484 that we'd written a .orig file for this URL, but that didn't work,
485 so I had to make this separate list.
486 -- Dan Harkless <wget@harkless.org>
488 This [adding a field to the urlpos structure] didn't work
489 because convert_file() is called from convert_all_links at
490 the end of the retrieval with a freshly built new urlpos
492 -- Hrvoje Niksic <hniksic@xemacs.org>
494 string_set_add (converted_files, file);
498 static bool find_fragment (const char *, int, const char **, const char **);
500 /* Replace a string with NEW_TEXT. Ignore quoting. */
502 replace_plain (const char *p, int size, FILE *fp, const char *new_text)
504 fputs (new_text, fp);
509 /* Replace an attribute's original text with NEW_TEXT. */
512 replace_attr (const char *p, int size, FILE *fp, const char *new_text)
514 bool quote_flag = false;
515 char quote_char = '\"'; /* use "..." for quoting, unless the
516 original value is quoted, in which
517 case reuse its quoting char. */
518 const char *frag_beg, *frag_end;
520 /* Structure of our string is:
522 <--- size ---> (with quotes)
525 <--- size --> (no quotes) */
527 if (*p == '\"' || *p == '\'')
532 size -= 2; /* disregard opening and closing quote */
534 putc (quote_char, fp);
535 fputs (new_text, fp);
537 /* Look for fragment identifier, if any. */
538 if (find_fragment (p, size, &frag_beg, &frag_end))
539 fwrite (frag_beg, 1, frag_end - frag_beg, fp);
543 putc (quote_char, fp);
548 /* The same as REPLACE_ATTR, but used when replacing
549 <meta http-equiv=refresh content="new_text"> because we need to
550 append "timeout_value; URL=" before the next_text. */
553 replace_attr_refresh_hack (const char *p, int size, FILE *fp,
554 const char *new_text, int timeout)
557 char *new_with_timeout = (char *)alloca (numdigit (timeout)
561 sprintf (new_with_timeout, "%d; URL=%s", timeout, new_text);
563 return replace_attr (p, size, fp, new_with_timeout);
566 /* Find the first occurrence of '#' in [BEG, BEG+SIZE) that is not
567 preceded by '&'. If the character is not found, return zero. If
568 the character is found, return true and set BP and EP to point to
569 the beginning and end of the region.
571 This is used for finding the fragment indentifiers in URLs. */
574 find_fragment (const char *beg, int size, const char **bp, const char **ep)
576 const char *end = beg + size;
577 bool saw_amp = false;
578 for (; beg < end; beg++)
600 /* Quote FILE for use as local reference to an HTML file.
602 We quote ? as %3F to avoid passing part of the file name as the
603 parameter when browsing the converted file through HTTP. However,
604 it is safe to do this only when `--adjust-extension' is turned on.
605 This is because converting "index.html?foo=bar" to
606 "index.html%3Ffoo=bar" would break local browsing, as the latter
607 isn't even recognized as an HTML file! However, converting
608 "index.html?foo=bar.html" to "index.html%3Ffoo=bar.html" should be
609 safe for both local and HTTP-served browsing.
611 We always quote "#" as "%23", "%" as "%25" and ";" as "%3B"
612 because those characters have special meanings in URLs. */
615 local_quote_string (const char *file)
620 char *any = strpbrk (file, "?#%;");
622 return html_quote_string (file);
624 /* Allocate space assuming the worst-case scenario, each character
625 having to be quoted. */
626 to = newname = (char *)alloca (3 * strlen (file) + 1);
627 for (from = file; *from; from++)
646 if (opt.adjust_extension)
659 return html_quote_string (newname);
662 /* Book-keeping code for dl_file_url_map, dl_url_file_map,
663 downloaded_html_list, and downloaded_html_set. Other code calls
664 these functions to let us know that a file has been downloaded. */
666 #define ENSURE_TABLES_EXIST do { \
667 if (!dl_file_url_map) \
668 dl_file_url_map = make_string_hash_table (0); \
669 if (!dl_url_file_map) \
670 dl_url_file_map = make_string_hash_table (0); \
673 /* Return true if S1 and S2 are the same, except for "/index.html".
674 The three cases in which it returns one are (substitute any
675 substring for "foo"):
677 m("foo/index.html", "foo/") ==> 1
678 m("foo/", "foo/index.html") ==> 1
679 m("foo", "foo/index.html") ==> 1
680 m("foo", "foo/" ==> 1
681 m("foo", "foo") ==> 1 */
684 match_except_index (const char *s1, const char *s2)
689 /* Skip common substring. */
690 for (i = 0; *s1 && *s2 && *s1 == *s2; s1++, s2++, i++)
693 /* Strings differ at the very beginning -- bail out. We need to
694 check this explicitly to avoid `lng - 1' reading outside the
699 /* Both strings hit EOF -- strings are equal. */
702 /* Strings are randomly different, e.g. "/foo/bar" and "/foo/qux". */
705 /* S1 is the longer one. */
708 /* S2 is the longer one. */
712 /* foo/index.html */ /* or */ /* foo/index.html */
716 /* The right-hand case. */
719 if (*lng == '/' && *(lng + 1) == '\0')
724 return 0 == strcmp (lng, "/index.html");
728 dissociate_urls_from_file_mapper (void *key, void *value, void *arg)
730 char *mapping_url = (char *)key;
731 char *mapping_file = (char *)value;
732 char *file = (char *)arg;
734 if (0 == strcmp (mapping_file, file))
736 hash_table_remove (dl_url_file_map, mapping_url);
738 xfree (mapping_file);
741 /* Continue mapping. */
745 /* Remove all associations from various URLs to FILE from dl_url_file_map. */
748 dissociate_urls_from_file (const char *file)
750 /* Can't use hash_table_iter_* because the table mutates while mapping. */
751 hash_table_for_each (dl_url_file_map, dissociate_urls_from_file_mapper,
755 /* Register that URL has been successfully downloaded to FILE. This
756 is used by the link conversion code to convert references to URLs
757 to references to local files. It is also being used to check if a
758 URL has already been downloaded. */
761 register_download (const char *url, const char *file)
763 char *old_file, *old_url;
767 /* With some forms of retrieval, it is possible, although not likely
768 or particularly desirable. If both are downloaded, the second
769 download will override the first one. When that happens,
770 dissociate the old file name from the URL. */
772 if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
774 if (0 == strcmp (url, old_url))
775 /* We have somehow managed to download the same URL twice.
779 if (match_except_index (url, old_url)
780 && !hash_table_contains (dl_url_file_map, url))
781 /* The two URLs differ only in the "index.html" ending. For
782 example, one is "http://www.server.com/", and the other is
783 "http://www.server.com/index.html". Don't remove the old
784 one, just add the new one as a non-canonical entry. */
787 hash_table_remove (dl_file_url_map, file);
791 /* Remove all the URLs that point to this file. Yes, there can
792 be more than one such URL, because we store redirections as
793 multiple entries in dl_url_file_map. For example, if URL1
794 redirects to URL2 which gets downloaded to FILE, we map both
795 URL1 and URL2 to FILE in dl_url_file_map. (dl_file_url_map
796 only points to URL2.) When another URL gets loaded to FILE,
797 we want both URL1 and URL2 dissociated from it.
799 This is a relatively expensive operation because it performs
800 a linear search of the whole hash table, but it should be
801 called very rarely, only when two URLs resolve to the same
802 file name, *and* the "<file>.1" extensions are turned off.
803 In other words, almost never. */
804 dissociate_urls_from_file (file);
807 hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
810 /* A URL->FILE mapping is not possible without a FILE->URL mapping.
811 If the latter were present, it should have been removed by the
812 above `if'. So we could write:
814 assert (!hash_table_contains (dl_url_file_map, url));
816 The above is correct when running in recursive mode where the
817 same URL always resolves to the same file. But if you do
822 then the first URL will resolve to "FILE", and the other to
823 "FILE.1". In that case, FILE.1 will not be found in
824 dl_file_url_map, but URL will still point to FILE in
826 if (hash_table_get_pair (dl_url_file_map, url, &old_url, &old_file))
828 hash_table_remove (dl_url_file_map, url);
833 hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
836 /* Register that FROM has been redirected to TO. This assumes that TO
837 is successfully downloaded and already registered using
838 register_download() above. */
841 register_redirection (const char *from, const char *to)
847 file = hash_table_get (dl_url_file_map, to);
848 assert (file != NULL);
849 if (!hash_table_contains (dl_url_file_map, from))
850 hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file));
853 /* Register that the file has been deleted. */
856 register_delete_file (const char *file)
858 char *old_url, *old_file;
862 if (!hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
865 hash_table_remove (dl_file_url_map, file);
868 dissociate_urls_from_file (file);
871 /* Register that FILE is an HTML file that has been downloaded. */
874 register_html (const char *url, const char *file)
876 if (!downloaded_html_set)
877 downloaded_html_set = make_string_hash_table (0);
878 string_set_add (downloaded_html_set, file);
881 /* Register that FILE is a CSS file that has been downloaded. */
884 register_css (const char *url, const char *file)
886 if (!downloaded_css_set)
887 downloaded_css_set = make_string_hash_table (0);
888 string_set_add (downloaded_css_set, file);
891 static void downloaded_files_free (void);
893 /* Cleanup the data structures associated with this file. */
896 convert_cleanup (void)
900 free_keys_and_values (dl_file_url_map);
901 hash_table_destroy (dl_file_url_map);
902 dl_file_url_map = NULL;
906 free_keys_and_values (dl_url_file_map);
907 hash_table_destroy (dl_url_file_map);
908 dl_url_file_map = NULL;
910 if (downloaded_html_set)
911 string_set_free (downloaded_html_set);
912 downloaded_files_free ();
914 string_set_free (converted_files);
917 /* Book-keeping code for downloaded files that enables extension
920 /* This table should really be merged with dl_file_url_map and
921 downloaded_html_files. This was originally a list, but I changed
922 it to a hash table beause it was actually taking a lot of time to
923 find things in it. */
925 static struct hash_table *downloaded_files_hash;
927 /* We're storing "modes" of type downloaded_file_t in the hash table.
928 However, our hash tables only accept pointers for keys and values.
929 So when we need a pointer, we use the address of a
930 downloaded_file_t variable of static storage. */
932 static downloaded_file_t *
933 downloaded_mode_to_ptr (downloaded_file_t mode)
935 static downloaded_file_t
936 v1 = FILE_NOT_ALREADY_DOWNLOADED,
937 v2 = FILE_DOWNLOADED_NORMALLY,
938 v3 = FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED,
943 case FILE_NOT_ALREADY_DOWNLOADED:
945 case FILE_DOWNLOADED_NORMALLY:
947 case FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED:
955 /* Remembers which files have been downloaded. In the standard case,
956 should be called with mode == FILE_DOWNLOADED_NORMALLY for each
957 file we actually download successfully (i.e. not for ones we have
958 failures on or that we skip due to -N).
960 When we've downloaded a file and tacked on a ".html" extension due
961 to -E, call this function with
962 FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED rather than
963 FILE_DOWNLOADED_NORMALLY.
965 If you just want to check if a file has been previously added
966 without adding it, call with mode == CHECK_FOR_FILE. Please be
967 sure to call this function with local filenames, not remote
971 downloaded_file (downloaded_file_t mode, const char *file)
973 downloaded_file_t *ptr;
975 if (mode == CHECK_FOR_FILE)
977 if (!downloaded_files_hash)
978 return FILE_NOT_ALREADY_DOWNLOADED;
979 ptr = hash_table_get (downloaded_files_hash, file);
981 return FILE_NOT_ALREADY_DOWNLOADED;
985 if (!downloaded_files_hash)
986 downloaded_files_hash = make_string_hash_table (0);
988 ptr = hash_table_get (downloaded_files_hash, file);
992 ptr = downloaded_mode_to_ptr (mode);
993 hash_table_put (downloaded_files_hash, xstrdup (file), ptr);
995 return FILE_NOT_ALREADY_DOWNLOADED;
999 downloaded_files_free (void)
1001 if (downloaded_files_hash)
1003 hash_table_iterator iter;
1004 for (hash_table_iterate (downloaded_files_hash, &iter);
1005 hash_table_iter_next (&iter);
1008 hash_table_destroy (downloaded_files_hash);
1009 downloaded_files_hash = NULL;
1013 /* The function returns the pointer to the malloc-ed quoted version of
1014 string s. It will recognize and quote numeric and special graphic
1015 entities, as per RFC1866:
1023 No other entities are recognized or replaced. */
1025 html_quote_string (const char *s)
1031 /* Pass through the string, and count the new size. */
1032 for (i = 0; *s; s++, i++)
1035 i += 4; /* `amp;' */
1036 else if (*s == '<' || *s == '>')
1037 i += 3; /* `lt;' and `gt;' */
1038 else if (*s == '\"')
1039 i += 5; /* `quot;' */
1043 res = xmalloc (i + 1);
1045 for (p = res; *s; s++)
1058 *p++ = (*s == '<' ? 'l' : 'g');