/* Collect URLs from HTML source.
- Copyright (C) 1998, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1998, 2000, 2001, 2002 Free Software Foundation, Inc.
This file is part of GNU Wget.
You should have received a copy of the GNU General Public License
along with Wget; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+In addition, as a special exception, the Free Software Foundation
+gives permission to link the code of its release of Wget with the
+OpenSSL project's "OpenSSL" library (or with modified versions of it
+that use the same license as the "OpenSSL" library), and distribute
+the linked executables. You must obey the GNU General Public License
+in all respects for all of the code used other than "OpenSSL". If you
+modify this file, you may extend this exception to your version of the
+file, but you are not obligated to do so. If you do not wish to do
+so, delete this exception statement from your version. */
#include <config.h>
#include "html-parse.h"
#include "url.h"
#include "utils.h"
+#include "convert.h"
#ifndef errno
extern int errno;
DECLARE_TAG_HANDLER (tag_find_urls);
DECLARE_TAG_HANDLER (tag_handle_base);
+DECLARE_TAG_HANDLER (tag_handle_form);
DECLARE_TAG_HANDLER (tag_handle_link);
DECLARE_TAG_HANDLER (tag_handle_meta);
{ "embed", tag_find_urls },
#define TAG_FIG 7
{ "fig", tag_find_urls },
-#define TAG_FRAME 8
+#define TAG_FORM 8
+ { "form", tag_handle_form },
+#define TAG_FRAME 9
{ "frame", tag_find_urls },
-#define TAG_IFRAME 9
+#define TAG_IFRAME 10
{ "iframe", tag_find_urls },
-#define TAG_IMG 10
+#define TAG_IMG 11
{ "img", tag_find_urls },
-#define TAG_INPUT 11
+#define TAG_INPUT 12
{ "input", tag_find_urls },
-#define TAG_LAYER 12
+#define TAG_LAYER 13
{ "layer", tag_find_urls },
-#define TAG_LINK 13
+#define TAG_LINK 14
{ "link", tag_handle_link },
-#define TAG_META 14
+#define TAG_META 15
{ "meta", tag_handle_meta },
-#define TAG_OVERLAY 15
+#define TAG_OVERLAY 16
{ "overlay", tag_find_urls },
-#define TAG_SCRIPT 16
+#define TAG_SCRIPT 17
{ "script", tag_find_urls },
-#define TAG_TABLE 17
+#define TAG_TABLE 18
{ "table", tag_find_urls },
-#define TAG_TD 18
+#define TAG_TD 19
{ "td", tag_find_urls },
-#define TAG_TH 19
+#define TAG_TH 20
{ "th", tag_find_urls }
};
{ TAG_AREA, "href", TUA_EXTERNAL },
{ TAG_BGSOUND, "src", 0 },
{ TAG_BODY, "background", 0 },
- { TAG_EMBED, "href", 0 },
+ { TAG_EMBED, "href", TUA_EXTERNAL },
{ TAG_EMBED, "src", 0 },
{ TAG_FIG, "src", 0 },
{ TAG_FRAME, "src", 0 },
from the information above. However, some places in the code refer
to the attributes not mentioned here. We add them manually. */
static const char *additional_attributes[] = {
- "rel", /* for TAG_LINK */
- "http-equiv", /* for TAG_META */
- "name", /* for TAG_META */
- "content" /* for TAG_META */
+ "rel", /* used by tag_handle_link */
+ "http-equiv", /* used by tag_handle_meta */
+ "name", /* used by tag_handle_meta */
+ "content", /* used by tag_handle_meta */
+ "action" /* used by tag_handle_form */
};
static const char **interesting_tags;
{
int i, ind = 0;
- int size = ARRAY_SIZE (known_tags);
+ int size = countof (known_tags);
interesting_tags = (const char **)xmalloc ((size + 1) * sizeof (char *));
for (i = 0; i < size; i++)
unique, and to include the attributes from additional_attributes. */
{
int i, ind;
- const char **att = xmalloc ((ARRAY_SIZE (additional_attributes) + 1)
+ const char **att = xmalloc ((countof (additional_attributes) + 1)
* sizeof (char *));
/* First copy the "additional" attributes. */
- for (i = 0; i < ARRAY_SIZE (additional_attributes); i++)
+ for (i = 0; i < countof (additional_attributes); i++)
att[i] = additional_attributes[i];
ind = i;
att[ind] = NULL;
- for (i = 0; i < ARRAY_SIZE (tag_url_attributes); i++)
+ for (i = 0; i < countof (tag_url_attributes); i++)
{
int j, seen = 0;
const char *look_for = tag_url_attributes[i].attr_name;
}
}
+/* Find tag with name TAG_NAME in KNOWN_TAGS and return its index. */
+
static int
find_tag (const char *tag_name)
{
- int i;
+ /* Originally implemented as linear search. In Wget 1.9 known_tags
+ contains 21 elements, for which binary search requires max. 5
+ comparisons, whereas linear search performs 10 on average. */
- /* This is linear search; if the number of tags grow, we can switch
- to binary search. */
+ int lo = 0, hi = countof (known_tags) - 1;
- for (i = 0; i < ARRAY_SIZE (known_tags); i++)
+ while (lo <= hi)
{
- int cmp = strcasecmp (known_tags[i].name, tag_name);
- /* known_tags are sorted alphabetically, so we can
- micro-optimize. */
- if (cmp > 0)
- break;
- else if (cmp == 0)
- return i;
+ int mid = (lo + hi) >> 1;
+ int cmp = strcasecmp (tag_name, known_tags[mid].name);
+ if (cmp < 0)
+ hi = mid - 1;
+ else if (cmp > 0)
+ lo = mid + 1;
+ else
+ return mid;
}
+
return -1;
}
/* Find the value of attribute named NAME in the taginfo TAG. If the
attribute is not present, return NULL. If ATTRIND is non-NULL, the
index of the attribute in TAG will be stored there. */
+
static char *
find_attr (struct taginfo *tag, const char *name, int *attrind)
{
if (!link_has_scheme)
{
- /* We have no base, and the link does not have a host
- attached to it. Nothing we can do. */
- /* #### Should we print a warning here? Wget 1.5.x used to. */
+ /* Base URL is unavailable, and the link does not have a
+ location attached to it -- we have to give up. Since
+ this can only happen when using `--force-html -i', print
+ a warning. */
+ logprintf (LOG_NOTQUIET,
+ _("%s: Cannot resolve incomplete link %s.\n"),
+ ctx->document_file, link_uri);
return NULL;
}
xfree (complete_uri);
}
+ DEBUGP (("appending \"%s\" to urlpos.\n", url->url));
+
newel = (struct urlpos *)xmalloc (sizeof (struct urlpos));
memset (newel, 0, sizeof (*newel));
/* All the tag_* functions are called from collect_tags_mapper, as
specified by KNOWN_TAGS. */
-/* For most tags, all we want to do is harvest URLs from their
- attributes. */
+/* Default tag handler: collect URLs from attributes specified for
+ this tag by tag_url_attributes. */
static void
tag_find_urls (int tagid, struct taginfo *tag, struct map_context *ctx)
{
int i, attrind, first = -1;
- int size = ARRAY_SIZE (tag_url_attributes);
+ int size = countof (tag_url_attributes);
for (i = 0; i < size; i++)
if (tag_url_attributes[i].tagid == tagid)
{
/* We've found the index of tag_url_attributes where the
- attributes of our tags begin. */
+ attributes of our tag begin. */
first = i;
break;
}
{
/* Find whether TAG/ATTRIND is a combination that contains a
URL. */
- char *attrvalue = tag->attrs[attrind].value;
+ char *link = tag->attrs[attrind].value;
/* If you're cringing at the inefficiency of the nested loops,
- remember that the number of attributes the inner loop
- iterates over is laughably small -- three in the worst case
- (IMG). */
+ remember that they both iterate over a laughably small
+ quantity of items. The worst-case inner loop is for the IMG
+ tag, which has three attributes. */
for (i = first; i < size && tag_url_attributes[i].tagid == tagid; i++)
{
if (0 == strcasecmp (tag->attrs[attrind].name,
tag_url_attributes[i].attr_name))
{
int flags = tag_url_attributes[i].flags;
- append_one_url (attrvalue, !(flags & TUA_EXTERNAL),
- tag, attrind, ctx);
+ append_one_url (link, !(flags & TUA_EXTERNAL), tag, attrind, ctx);
}
}
}
}
+/* Handle the BASE tag, for <base href=...>. */
+
static void
tag_handle_base (int tagid, struct taginfo *tag, struct map_context *ctx)
{
ctx->base = xstrdup (newbase);
}
+/* Mark the URL found in <form action=...> for conversion. */
+
+static void
+tag_handle_form (int tagid, struct taginfo *tag, struct map_context *ctx)
+{
+ int attrind;
+ char *action = find_attr (tag, "action", &attrind);
+ if (action)
+ {
+ struct urlpos *action_urlpos = append_one_url (action, 0, tag,
+ attrind, ctx);
+ if (action_urlpos)
+ action_urlpos->ignore_when_downloading = 1;
+ }
+}
+
+/* Handle the LINK tag. It requires special handling because how its
+ links will be followed in -p mode depends on the REL attribute. */
+
static void
tag_handle_link (int tagid, struct taginfo *tag, struct map_context *ctx)
{
int attrind;
char *href = find_attr (tag, "href", &attrind);
- /* All <link href="..."> link references are external,
- except for <link rel="stylesheet" href="...">. */
+ /* All <link href="..."> link references are external, except those
+ known not to be, such as style sheet and shortcut icon:
+
+ <link rel="stylesheet" href="...">
+ <link rel="shortcut icon" href="...">
+ */
if (href)
{
char *rel = find_attr (tag, "rel", NULL);
- int inlinep = (rel && 0 == strcasecmp (rel, "stylesheet"));
+ int inlinep = (rel
+ && (0 == strcasecmp (rel, "stylesheet")
+ || 0 == strcasecmp (rel, "shortcut icon")));
append_one_url (href, inlinep, tag, attrind, ctx);
}
}
-/* Some pages use a META tag to specify that the page be refreshed by
- a new page after a given number of seconds. The general format for
- this is:
-
- <meta http-equiv=Refresh content="NUMBER; URL=index2.html">
-
- So we just need to skip past the "NUMBER; URL=" garbage to get to
- the URL. */
+/* Handle the META tag. This requires special handling because of the
+ refresh feature and because of robot exclusion. */
static void
tag_handle_meta (int tagid, struct taginfo *tag, struct map_context *ctx)
if (http_equiv && 0 == strcasecmp (http_equiv, "refresh"))
{
- struct urlpos *entry;
+ /* Some pages use a META tag to specify that the page be
+ refreshed by a new page after a given number of seconds. The
+ general format for this is:
+
+ <meta http-equiv=Refresh content="NUMBER; URL=index2.html">
+ So we just need to skip past the "NUMBER; URL=" garbage to
+ get to the URL. */
+
+ struct urlpos *entry;
int attrind;
- char *p, *refresh = find_attr (tag, "content", &attrind);
int timeout = 0;
+ char *p;
+
+ char *refresh = find_attr (tag, "content", &attrind);
+ if (!refresh)
+ return;
for (p = refresh; ISDIGIT (*p); p++)
timeout = 10 * timeout + *p - '0';
/* Analyze HTML tags FILE and construct a list of URLs referenced from
it. It merges relative links in FILE with URL. It is aware of
<base href=...> and does the right thing. */
+
struct urlpos *
get_urls_html (const char *file, const char *url, int *meta_disallow_follow)
{
struct file_memory *fm;
struct map_context ctx;
+ int flags;
/* Load the file. */
fm = read_file (file);
if (!interesting_tags)
init_interesting ();
- map_html_tags (fm->content, fm->length, interesting_tags,
- interesting_attributes, collect_tags_mapper, &ctx);
+ /* Specify MHT_TRIM_VALUES because of buggy HTML generators that
+ generate <a href=" foo"> instead of <a href="foo"> (Netscape
+ ignores spaces as well.) If you really mean space, use &32; or
+ %20. */
+ flags = MHT_TRIM_VALUES;
+ if (opt.strict_comments)
+ flags |= MHT_STRICT_COMMENTS;
+
+ map_html_tags (fm->content, fm->length, collect_tags_mapper, &ctx, flags,
+ interesting_tags, interesting_attributes);
DEBUGP (("no-follow in %s: %d\n", file, ctx.nofollow));
if (meta_disallow_follow)
return ctx.head;
}
+/* This doesn't really have anything to do with HTML, but it's similar
+ to get_urls_html, so we put it here. */
+
+struct urlpos *
+get_urls_file (const char *file)
+{
+ struct file_memory *fm;
+ struct urlpos *head, *tail;
+ const char *text, *text_end;
+
+ /* Load the file. */
+ fm = read_file (file);
+ if (!fm)
+ {
+ logprintf (LOG_NOTQUIET, "%s: %s\n", file, strerror (errno));
+ return NULL;
+ }
+ DEBUGP (("Loaded %s (size %ld).\n", file, fm->length));
+
+ head = tail = NULL;
+ text = fm->content;
+ text_end = fm->content + fm->length;
+ while (text < text_end)
+ {
+ int up_error_code;
+ char *url_text;
+ struct urlpos *entry;
+ struct url *url;
+
+ const char *line_beg = text;
+ const char *line_end = memchr (text, '\n', text_end - text);
+ if (!line_end)
+ line_end = text_end;
+ else
+ ++line_end;
+ text = line_end;
+
+ /* Strip whitespace from the beginning and end of line. */
+ while (line_beg < line_end && ISSPACE (*line_beg))
+ ++line_beg;
+ while (line_end > line_beg && ISSPACE (*(line_end - 1)))
+ --line_end;
+
+ if (line_beg == line_end)
+ continue;
+
+ /* The URL is in the [line_beg, line_end) region. */
+
+ /* We must copy the URL to a zero-terminated string, and we
+ can't use alloca because we're in a loop. *sigh*. */
+ url_text = strdupdelim (line_beg, line_end);
+
+ if (opt.base_href)
+ {
+ /* Merge opt.base_href with URL. */
+ char *merged = uri_merge (opt.base_href, url_text);
+ xfree (url_text);
+ url_text = merged;
+ }
+
+ url = url_parse (url_text, &up_error_code);
+ if (!url)
+ {
+ logprintf (LOG_NOTQUIET, "%s: Invalid URL %s: %s\n",
+ file, url_text, url_error (up_error_code));
+ xfree (url_text);
+ continue;
+ }
+ xfree (url_text);
+
+ entry = (struct urlpos *)xmalloc (sizeof (struct urlpos));
+ memset (entry, 0, sizeof (*entry));
+ entry->next = NULL;
+ entry->url = url;
+
+ if (!head)
+ head = entry;
+ else
+ tail->next = entry;
+ tail = entry;
+ }
+ read_file_free (fm);
+ return head;
+}
+
void
cleanup_html_url (void)
{