X-Git-Url: http://sjero.net/git/?a=blobdiff_plain;f=src%2Fhtml-url.c;h=218659d454d4a23b34c0288f1b848585a14b1e4b;hb=da2ac85f4063a8990f68dfadcfd47e53c0b63bf1;hp=c9cf28f6df39fd470966b236b13d16c7e0f184b0;hpb=caae3b70f46bd519857b595f7f06ea0179551336;p=wget diff --git a/src/html-url.c b/src/html-url.c index c9cf28f6..218659d4 100644 --- a/src/html-url.c +++ b/src/html-url.c @@ -174,6 +174,10 @@ static const char *additional_attributes[] = { static struct hash_table *interesting_tags; static struct hash_table *interesting_attributes; +/* Will contains the (last) charset found in 'http-equiv=content-type' + meta tags */ +static char *meta_charset; + static void init_interesting (void) { @@ -186,7 +190,7 @@ init_interesting (void) matches the user's preferences as specified through --ignore-tags and --follow-tags. */ - int i; + size_t i; interesting_tags = make_nocase_string_hash_table (countof (known_tags)); /* First, add all the tags we know hot to handle, mapped to their @@ -284,7 +288,7 @@ append_url (const char *link_uri, int position, int size, return NULL; } - url = url_parse (link_uri, NULL); + url = url_parse (link_uri, NULL, NULL, false); if (!url) { DEBUGP (("%s: link \"%s\" doesn't parse.\n", @@ -300,10 +304,13 @@ append_url (const char *link_uri, int position, int size, char *complete_uri = uri_merge (base, link_uri); - DEBUGP (("%s: merge(\"%s\", \"%s\") -> %s\n", - ctx->document_file, base, link_uri, complete_uri)); + DEBUGP (("%s: merge(%s, %s) -> %s\n", + quotearg_n_style (0, escape_quoting_style, ctx->document_file), + quote_n (1, base), + quote_n (2, link_uri), + quotearg_n_style (3, escape_quoting_style, complete_uri))); - url = url_parse (complete_uri, NULL); + url = url_parse (complete_uri, NULL, NULL, false); if (!url) { DEBUGP (("%s: merged link \"%s\" doesn't parse.\n", @@ -314,7 +321,7 @@ append_url (const char *link_uri, int position, int size, xfree (complete_uri); } - DEBUGP (("appending \"%s\" to urlpos.\n", url->url)); + DEBUGP (("appending %s to urlpos.\n", quote (url->url))); newel = xnew0 (struct urlpos); newel->url = url; @@ -360,7 +367,8 @@ check_style_attr (struct taginfo *tag, struct map_context *ctx) static void tag_find_urls (int tagid, struct taginfo *tag, struct map_context *ctx) { - int i, attrind; + size_t i; + int attrind; int first = -1; for (i = 0; i < countof (tag_url_attributes); i++) @@ -387,7 +395,7 @@ tag_find_urls (int tagid, struct taginfo *tag, struct map_context *ctx) /* Find whether TAG/ATTRIND is a combination that contains a URL. */ char *link = tag->attrs[attrind].value; - const int size = countof (tag_url_attributes); + const size_t size = countof (tag_url_attributes); /* If you're cringing at the inefficiency of the nested loops, remember that they both iterate over a very small number of @@ -552,6 +560,23 @@ tag_handle_meta (int tagid, struct taginfo *tag, struct map_context *ctx) entry->link_expect_html = 1; } } + else if (http_equiv && 0 == strcasecmp (http_equiv, "content-type")) + { + /* Handle stuff like: + */ + + char *mcharset; + char *content = find_attr (tag, "content", NULL); + if (!content) + return; + + mcharset = parse_charset (content); + if (!mcharset) + return; + + xfree_null (meta_charset); + meta_charset = mcharset; + } else if (name && 0 == strcasecmp (name, "robots")) { /* Handle stuff like: @@ -565,15 +590,25 @@ tag_handle_meta (int tagid, struct taginfo *tag, struct map_context *ctx) { while (*content) { - /* Find the next occurrence of ',' or the end of - the string. */ - char *end = strchr (content, ','); - if (end) - ++end; - else - end = content + strlen (content); + char *end; + /* Skip any initial whitespace. */ + content += strspn (content, " \f\n\r\t\v"); + /* Find the next occurrence of ',' or whitespace, + * or the end of the string. */ + end = content + strcspn (content, ", \f\n\r\t\v"); if (!strncasecmp (content, "nofollow", end - content)) ctx->nofollow = true; + /* Skip past the next comma, if any. */ + if (*end == ',') + ++end; + else + { + end = strchr (end, ','); + if (end) + ++end; + else + end = content + strlen (content); + } content = end; } } @@ -616,7 +651,8 @@ collect_tags_mapper (struct taginfo *tag, void *arg) and does the right thing. */ struct urlpos * -get_urls_html (const char *file, const char *url, bool *meta_disallow_follow) +get_urls_html (const char *file, const char *url, bool *meta_disallow_follow, + struct iri *iri) { struct file_memory *fm; struct map_context ctx; @@ -656,6 +692,10 @@ get_urls_html (const char *file, const char *url, bool *meta_disallow_follow) map_html_tags (fm->content, fm->length, collect_tags_mapper, &ctx, flags, NULL, interesting_attributes); + /* If meta charset isn't null, override content encoding */ + if (iri && meta_charset) + set_content_encoding (iri, meta_charset); + DEBUGP (("no-follow in %s: %d\n", file, ctx.nofollow)); if (meta_disallow_follow) *meta_disallow_follow = ctx.nofollow; @@ -725,12 +765,14 @@ get_urls_file (const char *file) url_text = merged; } - url = url_parse (url_text, &up_error_code); + url = url_parse (url_text, &up_error_code, NULL, false); if (!url) { + char *error = url_error (url_text, up_error_code); logprintf (LOG_NOTQUIET, _("%s: Invalid URL %s: %s\n"), - file, url_text, url_error (up_error_code)); + file, url_text, error); xfree (url_text); + xfree (error); continue; } xfree (url_text);