/* Collect URLs from HTML source.
- Copyright (C) 1998, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1998, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GNU Wget.
GNU Wget is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
+ (at your option) any later version.
GNU Wget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
You should have received a copy of the GNU General Public License
along with Wget; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+In addition, as a special exception, the Free Software Foundation
+gives permission to link the code of its release of Wget with the
+OpenSSL project's "OpenSSL" library (or with modified versions of it
+that use the same license as the "OpenSSL" library), and distribute
+the linked executables. You must obey the GNU General Public License
+in all respects for all of the code used other than "OpenSSL". If you
+modify this file, you may extend this exception to your version of the
+file, but you are not obligated to do so. If you do not wish to do
+so, delete this exception statement from your version. */
#include <config.h>
#include "html-parse.h"
#include "url.h"
#include "utils.h"
+#include "hash.h"
+#include "convert.h"
#ifndef errno
extern int errno;
#endif
-enum tag_category { TC_LINK, TC_SPEC };
+struct map_context;
+
+typedef void (*tag_handler_t) PARAMS ((int, struct taginfo *,
+ struct map_context *));
+
+#define DECLARE_TAG_HANDLER(fun) \
+ static void fun PARAMS ((int, struct taginfo *, struct map_context *))
+
+DECLARE_TAG_HANDLER (tag_find_urls);
+DECLARE_TAG_HANDLER (tag_handle_base);
+DECLARE_TAG_HANDLER (tag_handle_form);
+DECLARE_TAG_HANDLER (tag_handle_link);
+DECLARE_TAG_HANDLER (tag_handle_meta);
+
+enum {
+ TAG_A,
+ TAG_APPLET,
+ TAG_AREA,
+ TAG_BASE,
+ TAG_BGSOUND,
+ TAG_BODY,
+ TAG_EMBED,
+ TAG_FIG,
+ TAG_FORM,
+ TAG_FRAME,
+ TAG_IFRAME,
+ TAG_IMG,
+ TAG_INPUT,
+ TAG_LAYER,
+ TAG_LINK,
+ TAG_META,
+ TAG_OVERLAY,
+ TAG_SCRIPT,
+ TAG_TABLE,
+ TAG_TD,
+ TAG_TH
+};
-/* Here we try to categorize the known tags. Each tag has its ID and
- cetegory. Category TC_LINK means that one or more of its
- attributes contain links that should be retrieved. TC_SPEC means
- that the tag is specific in some way, and has to be handled
- specially. */
-static struct {
+/* The list of known tags and functions used for handling them. Most
+ tags are simply harvested for URLs. */
+static struct known_tag {
+ int tagid;
const char *name;
- enum tag_category category;
+ tag_handler_t handler;
} known_tags[] = {
-#define TAG_A 0
- { "a", TC_LINK },
-#define TAG_APPLET 1
- { "applet", TC_LINK },
-#define TAG_AREA 2
- { "area", TC_LINK },
-#define TAG_BASE 3
- { "base", TC_SPEC },
-#define TAG_BGSOUND 4
- { "bgsound", TC_LINK },
-#define TAG_BODY 5
- { "body", TC_LINK },
-#define TAG_EMBED 6
- { "embed", TC_LINK },
-#define TAG_FIG 7
- { "fig", TC_LINK },
-#define TAG_FRAME 8
- { "frame", TC_LINK },
-#define TAG_IFRAME 9
- { "iframe", TC_LINK },
-#define TAG_IMG 10
- { "img", TC_LINK },
-#define TAG_INPUT 11
- { "input", TC_LINK },
-#define TAG_LAYER 12
- { "layer", TC_LINK },
-#define TAG_LINK 13
- { "link", TC_SPEC },
-#define TAG_META 14
- { "meta", TC_SPEC },
-#define TAG_OVERLAY 15
- { "overlay", TC_LINK },
-#define TAG_SCRIPT 16
- { "script", TC_LINK },
-#define TAG_TABLE 17
- { "table", TC_LINK },
-#define TAG_TD 18
- { "td", TC_LINK },
-#define TAG_TH 19
- { "th", TC_LINK }
+ { TAG_A, "a", tag_find_urls },
+ { TAG_APPLET, "applet", tag_find_urls },
+ { TAG_AREA, "area", tag_find_urls },
+ { TAG_BASE, "base", tag_handle_base },
+ { TAG_BGSOUND, "bgsound", tag_find_urls },
+ { TAG_BODY, "body", tag_find_urls },
+ { TAG_EMBED, "embed", tag_find_urls },
+ { TAG_FIG, "fig", tag_find_urls },
+ { TAG_FORM, "form", tag_handle_form },
+ { TAG_FRAME, "frame", tag_find_urls },
+ { TAG_IFRAME, "iframe", tag_find_urls },
+ { TAG_IMG, "img", tag_find_urls },
+ { TAG_INPUT, "input", tag_find_urls },
+ { TAG_LAYER, "layer", tag_find_urls },
+ { TAG_LINK, "link", tag_handle_link },
+ { TAG_META, "meta", tag_handle_meta },
+ { TAG_OVERLAY, "overlay", tag_find_urls },
+ { TAG_SCRIPT, "script", tag_find_urls },
+ { TAG_TABLE, "table", tag_find_urls },
+ { TAG_TD, "td", tag_find_urls },
+ { TAG_TH, "th", tag_find_urls }
};
+/* tag_url_attributes documents which attributes of which tags contain
+ URLs to harvest. It is used by tag_find_urls. */
-/* Flags for specific url-attr pairs handled through TC_LINK: */
+/* Defines for the FLAGS field; currently only one flag is defined. */
/* This tag points to an external document not necessary for rendering this
document (i.e. it's not an inlined image, stylesheet, etc.). */
-#define AF_EXTERNAL 1
-
+#define TUA_EXTERNAL 1
-/* For tags handled by TC_LINK: attributes that contain URLs to
+/* For tags handled by tag_find_urls: attributes that contain URLs to
download. */
static struct {
int tagid;
const char *attr_name;
int flags;
-} url_tag_attr_map[] = {
- { TAG_A, "href", AF_EXTERNAL },
+} tag_url_attributes[] = {
+ { TAG_A, "href", TUA_EXTERNAL },
{ TAG_APPLET, "code", 0 },
- { TAG_AREA, "href", AF_EXTERNAL },
+ { TAG_AREA, "href", TUA_EXTERNAL },
{ TAG_BGSOUND, "src", 0 },
{ TAG_BODY, "background", 0 },
+ { TAG_EMBED, "href", TUA_EXTERNAL },
{ TAG_EMBED, "src", 0 },
{ TAG_FIG, "src", 0 },
{ TAG_FRAME, "src", 0 },
from the information above. However, some places in the code refer
to the attributes not mentioned here. We add them manually. */
static const char *additional_attributes[] = {
- "rel", /* for TAG_LINK */
- "http-equiv", /* for TAG_META */
- "name", /* for TAG_META */
- "content" /* for TAG_META */
+ "rel", /* used by tag_handle_link */
+ "http-equiv", /* used by tag_handle_meta */
+ "name", /* used by tag_handle_meta */
+ "content", /* used by tag_handle_meta */
+ "action" /* used by tag_handle_form */
};
-static const char **interesting_tags;
-static const char **interesting_attributes;
+struct hash_table *interesting_tags;
+struct hash_table *interesting_attributes;
static void
init_interesting (void)
matches the user's preferences as specified through --ignore-tags
and --follow-tags. */
- {
- int i, ind = 0;
- int size = ARRAY_SIZE (known_tags);
- interesting_tags = (const char **)xmalloc ((size + 1) * sizeof (char *));
-
- for (i = 0; i < size; i++)
- {
- const char *name = known_tags[i].name;
-
- /* Normally here we could say:
- interesting_tags[i] = name;
- But we need to respect the settings of --ignore-tags and
- --follow-tags, so the code gets a bit hairier. */
-
- if (opt.ignore_tags)
- {
- /* --ignore-tags was specified. Do not match these
- specific tags. --ignore-tags takes precedence over
- --follow-tags, so we process --ignore first and fall
- through if there's no match. */
- int j, lose = 0;
- for (j = 0; opt.ignore_tags[j] != NULL; j++)
- /* Loop through all the tags this user doesn't care about. */
- if (strcasecmp(opt.ignore_tags[j], name) == EQ)
- {
- lose = 1;
- break;
- }
- if (lose)
- continue;
- }
-
- if (opt.follow_tags)
- {
- /* --follow-tags was specified. Only match these specific tags, so
- continue back to top of for if we don't match one of them. */
- int j, win = 0;
- for (j = 0; opt.follow_tags[j] != NULL; j++)
- /* Loop through all the tags this user cares about. */
- if (strcasecmp(opt.follow_tags[j], name) == EQ)
- {
- win = 1;
- break;
- }
- if (!win)
- continue; /* wasn't one of the explicitly desired tags */
- }
-
- /* If we get to here, --follow-tags isn't being used or the
- tag is among the ones that are followed, and --ignore-tags,
- if specified, didn't include this tag, so it's an
- "interesting" one. */
- interesting_tags[ind++] = name;
- }
- interesting_tags[ind] = NULL;
- }
-
- /* The same for attributes, except we loop through url_tag_attr_map.
- Here we also need to make sure that the list of attributes is
- unique, and to include the attributes from additional_attributes. */
- {
- int i, ind;
- const char **att = xmalloc ((ARRAY_SIZE (additional_attributes) + 1)
- * sizeof (char *));
- /* First copy the "additional" attributes. */
- for (i = 0; i < ARRAY_SIZE (additional_attributes); i++)
- att[i] = additional_attributes[i];
- ind = i;
- att[ind] = NULL;
- for (i = 0; i < ARRAY_SIZE (url_tag_attr_map); i++)
- {
- int j, seen = 0;
- const char *look_for = url_tag_attr_map[i].attr_name;
- for (j = 0; j < ind - 1; j++)
- if (!strcmp (att[j], look_for))
- {
- seen = 1;
- break;
- }
- if (!seen)
- {
- att = xrealloc (att, (ind + 2) * sizeof (*att));
- att[ind++] = look_for;
- att[ind] = NULL;
- }
- }
- interesting_attributes = att;
- }
-}
-
-static int
-find_tag (const char *tag_name)
-{
int i;
+ interesting_tags = make_nocase_string_hash_table (countof (known_tags));
- /* This is linear search; if the number of tags grow, we can switch
- to binary search. */
+ /* First, add all the tags we know hot to handle, mapped to their
+ respective entries in known_tags. */
+ for (i = 0; i < countof (known_tags); i++)
+ hash_table_put (interesting_tags, known_tags[i].name, known_tags + i);
- for (i = 0; i < ARRAY_SIZE (known_tags); i++)
+ /* Then remove the tags ignored through --ignore-tags. */
+ if (opt.ignore_tags)
{
- int cmp = strcasecmp (known_tags[i].name, tag_name);
- /* known_tags are sorted alphabetically, so we can
- micro-optimize. */
- if (cmp > 0)
- break;
- else if (cmp == 0)
- return i;
+ char **ignored;
+ for (ignored = opt.ignore_tags; *ignored; ignored++)
+ hash_table_remove (interesting_tags, *ignored);
}
- return -1;
+
+ /* If --follow-tags is specified, use only those tags. */
+ if (opt.follow_tags)
+ {
+ /* Create a new table intersecting --follow-tags and known_tags,
+ and use it as interesting_tags. */
+ struct hash_table *intersect = make_nocase_string_hash_table (0);
+ char **followed;
+ for (followed = opt.follow_tags; *followed; followed++)
+ {
+ struct known_tag *t = hash_table_get (interesting_tags, *followed);
+ if (!t)
+ continue; /* ignore unknown --follow-tags entries. */
+ hash_table_put (intersect, *followed, t);
+ }
+ hash_table_destroy (interesting_tags);
+ interesting_tags = intersect;
+ }
+
+ /* Add the attributes we care about. */
+ interesting_attributes = make_nocase_string_hash_table (10);
+ for (i = 0; i < countof (additional_attributes); i++)
+ string_set_add (interesting_attributes, additional_attributes[i]);
+ for (i = 0; i < countof (tag_url_attributes); i++)
+ string_set_add (interesting_attributes, tag_url_attributes[i].attr_name);
}
/* Find the value of attribute named NAME in the taginfo TAG. If the
- attribute is not present, return NULL. If ATTRID is non-NULL, the
- exact identity of the attribute will be returned. */
+ attribute is not present, return NULL. If ATTRIND is non-NULL, the
+ index of the attribute in TAG will be stored there. */
+
static char *
-find_attr (struct taginfo *tag, const char *name, int *attrid)
+find_attr (struct taginfo *tag, const char *name, int *attrind)
{
int i;
for (i = 0; i < tag->nattrs; i++)
if (!strcasecmp (tag->attrs[i].name, name))
{
- if (attrid)
- *attrid = i;
+ if (attrind)
+ *attrind = i;
return tag->attrs[i].value;
}
return NULL;
}
-struct collect_urls_closure {
+struct map_context {
char *text; /* HTML text. */
char *base; /* Base URI of the document, possibly
changed through <base href=...>. */
- struct urlpos *head, *tail; /* List of URLs */
const char *parent_base; /* Base of the current document. */
const char *document_file; /* File name of this document. */
int nofollow; /* whether NOFOLLOW was specified in a
<meta name=robots> tag. */
+
+ struct urlpos *head, *tail; /* List of URLs that is being
+ built. */
};
-/* Resolve LINK_URI and append it to closure->tail. TAG and ATTRID
- are the necessary context to store the position and size. */
+/* Append LINK_URI to the urlpos structure that is being built.
+
+ LINK_URI will be merged with the current document base. TAG and
+ ATTRIND are the necessary context to store the position and
+ size. */
static struct urlpos *
-handle_link (struct collect_urls_closure *closure, const char *link_uri,
- struct taginfo *tag, int attrid)
+append_one_url (const char *link_uri, int inlinep,
+ struct taginfo *tag, int attrind, struct map_context *ctx)
{
int link_has_scheme = url_has_scheme (link_uri);
struct urlpos *newel;
- const char *base = closure->base ? closure->base : closure->parent_base;
+ const char *base = ctx->base ? ctx->base : ctx->parent_base;
struct url *url;
if (!base)
{
DEBUGP (("%s: no base, merge will use \"%s\".\n",
- closure->document_file, link_uri));
+ ctx->document_file, link_uri));
if (!link_has_scheme)
{
- /* We have no base, and the link does not have a host
- attached to it. Nothing we can do. */
- /* #### Should we print a warning here? Wget 1.5.x used to. */
+ /* Base URL is unavailable, and the link does not have a
+ location attached to it -- we have to give up. Since
+ this can only happen when using `--force-html -i', print
+ a warning. */
+ logprintf (LOG_NOTQUIET,
+ _("%s: Cannot resolve incomplete link %s.\n"),
+ ctx->document_file, link_uri);
return NULL;
}
if (!url)
{
DEBUGP (("%s: link \"%s\" doesn't parse.\n",
- closure->document_file, link_uri));
+ ctx->document_file, link_uri));
return NULL;
}
}
char *complete_uri = uri_merge (base, link_uri);
DEBUGP (("%s: merge(\"%s\", \"%s\") -> %s\n",
- closure->document_file, base, link_uri, complete_uri));
+ ctx->document_file, base, link_uri, complete_uri));
url = url_parse (complete_uri, NULL);
if (!url)
{
DEBUGP (("%s: merged link \"%s\" doesn't parse.\n",
- closure->document_file, complete_uri));
+ ctx->document_file, complete_uri));
xfree (complete_uri);
return NULL;
}
xfree (complete_uri);
}
- newel = (struct urlpos *)xmalloc (sizeof (struct urlpos));
+ DEBUGP (("appending \"%s\" to urlpos.\n", url->url));
+ newel = (struct urlpos *)xmalloc (sizeof (struct urlpos));
memset (newel, 0, sizeof (*newel));
+
newel->next = NULL;
newel->url = url;
- newel->pos = tag->attrs[attrid].value_raw_beginning - closure->text;
- newel->size = tag->attrs[attrid].value_raw_size;
+ newel->pos = tag->attrs[attrind].value_raw_beginning - ctx->text;
+ newel->size = tag->attrs[attrind].value_raw_size;
+ newel->link_inline_p = inlinep;
/* A URL is relative if the host is not named, and the name does not
start with `/'. */
else if (link_has_scheme)
newel->link_complete_p = 1;
- if (closure->tail)
+ if (ctx->tail)
{
- closure->tail->next = newel;
- closure->tail = newel;
+ ctx->tail->next = newel;
+ ctx->tail = newel;
}
else
- closure->tail = closure->head = newel;
+ ctx->tail = ctx->head = newel;
return newel;
}
+\f
+/* All the tag_* functions are called from collect_tags_mapper, as
+ specified by KNOWN_TAGS. */
-/* Examine name and attributes of TAG and take appropriate action.
- What will be done depends on TAG's category and attribute values.
- Tags of TC_LINK category have attributes that contain links to
- follow; tags of TC_SPEC category need to be handled specially.
-
- #### It would be nice to split this into several functions. */
+/* Default tag handler: collect URLs from attributes specified for
+ this tag by tag_url_attributes. */
static void
-collect_tags_mapper (struct taginfo *tag, void *arg)
+tag_find_urls (int tagid, struct taginfo *tag, struct map_context *ctx)
{
- struct collect_urls_closure *closure = (struct collect_urls_closure *)arg;
- int tagid = find_tag (tag->name);
- assert (tagid != -1);
+ int i, attrind;
+ int first = -1;
- switch (known_tags[tagid].category)
- {
- case TC_LINK:
+ for (i = 0; i < countof (tag_url_attributes); i++)
+ if (tag_url_attributes[i].tagid == tagid)
{
- int i, id, first;
- int size = ARRAY_SIZE (url_tag_attr_map);
- for (i = 0; i < size; i++)
- if (url_tag_attr_map[i].tagid == tagid)
- break;
- /* We've found the index of url_tag_attr_map where the
- attributes of our tags begin. Now, look for every one of
- them, and handle it. */
- /* Need to process the attributes in the order they appear in
- the tag, as this is required if we convert links. */
+ /* We've found the index of tag_url_attributes where the
+ attributes of our tag begin. */
first = i;
- for (id = 0; id < tag->nattrs; id++)
- {
- /* This nested loop may seem inefficient (O(n^2)), but it's
- not, since the number of attributes (n) we loop over is
- extremely small. In the worst case of IMG with all its
- possible attributes, n^2 will be only 9. */
- for (i = first; (i < size && url_tag_attr_map[i].tagid == tagid);
- i++)
- {
- if (0 == strcasecmp (tag->attrs[id].name,
- url_tag_attr_map[i].attr_name))
- {
- char *attr_value = tag->attrs[id].value;
- if (attr_value)
- {
- struct urlpos *entry;
- entry = handle_link (closure, attr_value, tag, id);
- if (entry != NULL
- && !(url_tag_attr_map[i].flags & AF_EXTERNAL))
- entry->link_inline_p = 1;
- }
- }
- }
- }
+ break;
}
- break;
- case TC_SPEC:
- switch (tagid)
+ assert (first != -1);
+
+ /* Loop over the "interesting" attributes of this tag. In this
+ example, it will loop over "src" and "lowsrc".
+
+ <img src="foo.png" lowsrc="bar.png">
+
+ This has to be done in the outer loop so that the attributes are
+ processed in the same order in which they appear in the page.
+ This is required when converting links. */
+
+ for (attrind = 0; attrind < tag->nattrs; attrind++)
+ {
+ /* Find whether TAG/ATTRIND is a combination that contains a
+ URL. */
+ char *link = tag->attrs[attrind].value;
+ const int size = countof (tag_url_attributes);
+
+ /* If you're cringing at the inefficiency of the nested loops,
+ remember that they both iterate over a very small number of
+ items. The worst-case inner loop is for the IMG tag, which
+ has three attributes. */
+ for (i = first; i < size && tag_url_attributes[i].tagid == tagid; i++)
+ {
+ if (0 == strcasecmp (tag->attrs[attrind].name,
+ tag_url_attributes[i].attr_name))
+ {
+ int flags = tag_url_attributes[i].flags;
+ append_one_url (link, !(flags & TUA_EXTERNAL), tag, attrind, ctx);
+ }
+ }
+ }
+}
+
+/* Handle the BASE tag, for <base href=...>. */
+
+static void
+tag_handle_base (int tagid, struct taginfo *tag, struct map_context *ctx)
+{
+ struct urlpos *base_urlpos;
+ int attrind;
+ char *newbase = find_attr (tag, "href", &attrind);
+ if (!newbase)
+ return;
+
+ base_urlpos = append_one_url (newbase, 0, tag, attrind, ctx);
+ if (!base_urlpos)
+ return;
+ base_urlpos->ignore_when_downloading = 1;
+ base_urlpos->link_base_p = 1;
+
+ if (ctx->base)
+ xfree (ctx->base);
+ if (ctx->parent_base)
+ ctx->base = uri_merge (ctx->parent_base, newbase);
+ else
+ ctx->base = xstrdup (newbase);
+}
+
+/* Mark the URL found in <form action=...> for conversion. */
+
+static void
+tag_handle_form (int tagid, struct taginfo *tag, struct map_context *ctx)
+{
+ int attrind;
+ char *action = find_attr (tag, "action", &attrind);
+ if (action)
+ {
+ struct urlpos *action_urlpos = append_one_url (action, 0, tag,
+ attrind, ctx);
+ if (action_urlpos)
+ action_urlpos->ignore_when_downloading = 1;
+ }
+}
+
+/* Handle the LINK tag. It requires special handling because how its
+ links will be followed in -p mode depends on the REL attribute. */
+
+static void
+tag_handle_link (int tagid, struct taginfo *tag, struct map_context *ctx)
+{
+ int attrind;
+ char *href = find_attr (tag, "href", &attrind);
+
+ /* All <link href="..."> link references are external, except those
+ known not to be, such as style sheet and shortcut icon:
+
+ <link rel="stylesheet" href="...">
+ <link rel="shortcut icon" href="...">
+ */
+ if (href)
+ {
+ char *rel = find_attr (tag, "rel", NULL);
+ int inlinep = (rel
+ && (0 == strcasecmp (rel, "stylesheet")
+ || 0 == strcasecmp (rel, "shortcut icon")));
+ append_one_url (href, inlinep, tag, attrind, ctx);
+ }
+}
+
+/* Handle the META tag. This requires special handling because of the
+ refresh feature and because of robot exclusion. */
+
+static void
+tag_handle_meta (int tagid, struct taginfo *tag, struct map_context *ctx)
+{
+ char *name = find_attr (tag, "name", NULL);
+ char *http_equiv = find_attr (tag, "http-equiv", NULL);
+
+ if (http_equiv && 0 == strcasecmp (http_equiv, "refresh"))
+ {
+ /* Some pages use a META tag to specify that the page be
+ refreshed by a new page after a given number of seconds. The
+ general format for this is:
+
+ <meta http-equiv=Refresh content="NUMBER; URL=index2.html">
+
+ So we just need to skip past the "NUMBER; URL=" garbage to
+ get to the URL. */
+
+ struct urlpos *entry;
+ int attrind;
+ int timeout = 0;
+ char *p;
+
+ char *refresh = find_attr (tag, "content", &attrind);
+ if (!refresh)
+ return;
+
+ for (p = refresh; ISDIGIT (*p); p++)
+ timeout = 10 * timeout + *p - '0';
+ if (*p++ != ';')
+ return;
+
+ while (ISSPACE (*p))
+ ++p;
+ if (!( TOUPPER (*p) == 'U'
+ && TOUPPER (*(p + 1)) == 'R'
+ && TOUPPER (*(p + 2)) == 'L'
+ && *(p + 3) == '='))
+ return;
+ p += 4;
+ while (ISSPACE (*p))
+ ++p;
+
+ entry = append_one_url (p, 0, tag, attrind, ctx);
+ if (entry)
{
- case TAG_BASE:
- {
- struct urlpos *base_urlpos;
- int id;
- char *newbase = find_attr (tag, "href", &id);
- if (!newbase)
- break;
-
- base_urlpos = handle_link (closure, newbase, tag, id);
- if (!base_urlpos)
- break;
- base_urlpos->ignore_when_downloading = 1;
- base_urlpos->link_base_p = 1;
-
- if (closure->base)
- xfree (closure->base);
- if (closure->parent_base)
- closure->base = uri_merge (closure->parent_base, newbase);
- else
- closure->base = xstrdup (newbase);
- }
- break;
- case TAG_LINK:
- {
- int id;
- char *href = find_attr (tag, "href", &id);
-
- /* All <link href="..."> link references are external,
- except for <link rel="stylesheet" href="...">. */
- if (href)
- {
- struct urlpos *entry;
- entry = handle_link (closure, href, tag, id);
- if (entry != NULL)
- {
- char *rel = find_attr (tag, "rel", NULL);
- if (rel && 0 == strcasecmp (rel, "stylesheet"))
- entry->link_inline_p = 1;
- }
- }
- }
- break;
- case TAG_META:
- /* Some pages use a META tag to specify that the page be
- refreshed by a new page after a given number of seconds.
- The general format for this is:
-
- <meta http-equiv=Refresh content="NUMBER; URL=index2.html">
-
- So we just need to skip past the "NUMBER; URL=" garbage
- to get to the URL. */
- {
- char *name = find_attr (tag, "name", NULL);
- char *http_equiv = find_attr (tag, "http-equiv", NULL);
- if (http_equiv && !strcasecmp (http_equiv, "refresh"))
- {
- struct urlpos *entry;
-
- int id;
- char *p, *refresh = find_attr (tag, "content", &id);
- int timeout = 0;
-
- for (p = refresh; ISDIGIT (*p); p++)
- timeout = 10 * timeout + *p - '0';
- if (*p++ != ';')
- return;
-
- while (ISSPACE (*p))
- ++p;
- if (!(TOUPPER (*p) == 'U'
- && TOUPPER (*(p + 1)) == 'R'
- && TOUPPER (*(p + 2)) == 'L'
- && *(p + 3) == '='))
- return;
- p += 4;
- while (ISSPACE (*p))
- ++p;
-
- entry = handle_link (closure, p, tag, id);
- if (entry)
- {
- entry->link_refresh_p = 1;
- entry->refresh_timeout = timeout;
- }
- }
- else if (name && !strcasecmp (name, "robots"))
- {
- /* Handle stuff like:
- <meta name="robots" content="index,nofollow"> */
- char *content = find_attr (tag, "content", NULL);
- if (!content)
- return;
- if (!strcasecmp (content, "none"))
- closure->nofollow = 1;
- else
- {
- while (*content)
- {
- /* Find the next occurrence of ',' or the end of
- the string. */
- char *end = strchr (content, ',');
- if (end)
- ++end;
- else
- end = content + strlen (content);
- if (!strncasecmp (content, "nofollow", end - content))
- closure->nofollow = 1;
- content = end;
- }
- }
- }
- }
- break;
- default:
- /* Category is TC_SPEC, but tag name is unhandled. This
- must not be. */
- abort ();
+ entry->link_refresh_p = 1;
+ entry->refresh_timeout = timeout;
+ }
+ }
+ else if (name && 0 == strcasecmp (name, "robots"))
+ {
+ /* Handle stuff like:
+ <meta name="robots" content="index,nofollow"> */
+ char *content = find_attr (tag, "content", NULL);
+ if (!content)
+ return;
+ if (!strcasecmp (content, "none"))
+ ctx->nofollow = 1;
+ else
+ {
+ while (*content)
+ {
+ /* Find the next occurrence of ',' or the end of
+ the string. */
+ char *end = strchr (content, ',');
+ if (end)
+ ++end;
+ else
+ end = content + strlen (content);
+ if (!strncasecmp (content, "nofollow", end - content))
+ ctx->nofollow = 1;
+ content = end;
+ }
}
- break;
}
}
+/* Dispatch the tag handler appropriate for the tag we're mapping
+ over. See known_tags[] for definition of tag handlers. */
+
+static void
+collect_tags_mapper (struct taginfo *tag, void *arg)
+{
+ struct map_context *ctx = (struct map_context *)arg;
+
+ /* Find the tag in our table of tags. This must not fail because
+ map_html_tags only returns tags found in interesting_tags. */
+ struct known_tag *t = hash_table_get (interesting_tags, tag->name);
+ assert (t != NULL);
+
+ t->handler (t->tagid, tag, ctx);
+}
+\f
/* Analyze HTML tags FILE and construct a list of URLs referenced from
it. It merges relative links in FILE with URL. It is aware of
<base href=...> and does the right thing. */
+
struct urlpos *
get_urls_html (const char *file, const char *url, int *meta_disallow_follow)
{
struct file_memory *fm;
- struct collect_urls_closure closure;
+ struct map_context ctx;
+ int flags;
/* Load the file. */
fm = read_file (file);
}
DEBUGP (("Loaded %s (size %ld).\n", file, fm->length));
- closure.text = fm->content;
- closure.head = closure.tail = NULL;
- closure.base = NULL;
- closure.parent_base = url ? url : opt.base_href;
- closure.document_file = file;
- closure.nofollow = 0;
+ ctx.text = fm->content;
+ ctx.head = ctx.tail = NULL;
+ ctx.base = NULL;
+ ctx.parent_base = url ? url : opt.base_href;
+ ctx.document_file = file;
+ ctx.nofollow = 0;
if (!interesting_tags)
init_interesting ();
- map_html_tags (fm->content, fm->length, interesting_tags,
- interesting_attributes, collect_tags_mapper, &closure);
+ /* Specify MHT_TRIM_VALUES because of buggy HTML generators that
+ generate <a href=" foo"> instead of <a href="foo"> (Netscape
+ ignores spaces as well.) If you really mean space, use &32; or
+ %20. */
+ flags = MHT_TRIM_VALUES;
+ if (opt.strict_comments)
+ flags |= MHT_STRICT_COMMENTS;
- DEBUGP (("no-follow in %s: %d\n", file, closure.nofollow));
+ map_html_tags (fm->content, fm->length, collect_tags_mapper, &ctx, flags,
+ interesting_tags, interesting_attributes);
+
+ DEBUGP (("no-follow in %s: %d\n", file, ctx.nofollow));
if (meta_disallow_follow)
- *meta_disallow_follow = closure.nofollow;
+ *meta_disallow_follow = ctx.nofollow;
+
+ FREE_MAYBE (ctx.base);
+ read_file_free (fm);
+ return ctx.head;
+}
+
+/* This doesn't really have anything to do with HTML, but it's similar
+ to get_urls_html, so we put it here. */
- FREE_MAYBE (closure.base);
+struct urlpos *
+get_urls_file (const char *file)
+{
+ struct file_memory *fm;
+ struct urlpos *head, *tail;
+ const char *text, *text_end;
+
+ /* Load the file. */
+ fm = read_file (file);
+ if (!fm)
+ {
+ logprintf (LOG_NOTQUIET, "%s: %s\n", file, strerror (errno));
+ return NULL;
+ }
+ DEBUGP (("Loaded %s (size %ld).\n", file, fm->length));
+
+ head = tail = NULL;
+ text = fm->content;
+ text_end = fm->content + fm->length;
+ while (text < text_end)
+ {
+ int up_error_code;
+ char *url_text;
+ struct urlpos *entry;
+ struct url *url;
+
+ const char *line_beg = text;
+ const char *line_end = memchr (text, '\n', text_end - text);
+ if (!line_end)
+ line_end = text_end;
+ else
+ ++line_end;
+ text = line_end;
+
+ /* Strip whitespace from the beginning and end of line. */
+ while (line_beg < line_end && ISSPACE (*line_beg))
+ ++line_beg;
+ while (line_end > line_beg && ISSPACE (*(line_end - 1)))
+ --line_end;
+
+ if (line_beg == line_end)
+ continue;
+
+ /* The URL is in the [line_beg, line_end) region. */
+
+ /* We must copy the URL to a zero-terminated string, and we
+ can't use alloca because we're in a loop. *sigh*. */
+ url_text = strdupdelim (line_beg, line_end);
+
+ if (opt.base_href)
+ {
+ /* Merge opt.base_href with URL. */
+ char *merged = uri_merge (opt.base_href, url_text);
+ xfree (url_text);
+ url_text = merged;
+ }
+
+ url = url_parse (url_text, &up_error_code);
+ if (!url)
+ {
+ logprintf (LOG_NOTQUIET, "%s: Invalid URL %s: %s\n",
+ file, url_text, url_error (up_error_code));
+ xfree (url_text);
+ continue;
+ }
+ xfree (url_text);
+
+ entry = (struct urlpos *)xmalloc (sizeof (struct urlpos));
+ memset (entry, 0, sizeof (*entry));
+ entry->next = NULL;
+ entry->url = url;
+
+ if (!head)
+ head = entry;
+ else
+ tail->next = entry;
+ tail = entry;
+ }
read_file_free (fm);
- return closure.head;
+ return head;
}
void