/* Iterate through STRLIST, and return the first element that matches S, through wildcards or front comparison (as appropriate). */ static char * proclist (char **strlist, const char *s, enum accd flags) { char **x; for (x = strlist; *x; x++) if (has_wildcards_p (*x)) { if (fnmatch (*x, s, FNM_PATHNAME) == 0) break; } else { char *p = *x + ((flags & ALLABS) && (**x == '/')); /* Remove '/' */ if (frontcmp (p, s)) break; } return *x; }
/* The core of recursive retrieving. Endless recursion is avoided by having all URLs stored to a linked list of URLs, which is checked before loading any URL. That way no URL can get loaded twice. The function also supports specification of maximum recursion depth and a number of other goodies. */ uerr_t recursive_retrieve (const char *file, const char *this_url) { char *constr, *filename, *newloc; char *canon_this_url = NULL; int dt, inl, dash_p_leaf_HTML = FALSE; int meta_disallow_follow; int this_url_ftp; /* See below the explanation */ uerr_t err; struct urlinfo *rurl; urlpos *url_list, *cur_url; char *rfile; /* For robots */ struct urlinfo *u; assert (this_url != NULL); assert (file != NULL); /* If quota was exceeded earlier, bail out. */ if (downloaded_exceeds_quota ()) return QUOTEXC; /* Cache the current URL in the list. */ if (first_time) { /* These three operations need to be done only once per Wget run. They should probably be at a different location. */ if (!undesirable_urls) undesirable_urls = make_string_hash_table (0); hash_table_clear (undesirable_urls); string_set_add (undesirable_urls, this_url); /* Enter this_url to the hash table, in original and "enhanced" form. */ u = newurl (); err = parseurl (this_url, u, 0); if (err == URLOK) { string_set_add (undesirable_urls, u->url); if (opt.no_parent) base_dir = xstrdup (u->dir); /* Set the base dir. */ /* Set the canonical this_url to be sent as referer. This problem exists only when running the first time. */ canon_this_url = xstrdup (u->url); } else { DEBUGP (("Double yuck! The *base* URL is broken.\n")); base_dir = NULL; } freeurl (u, 1); depth = 1; robots_host = NULL; forbidden = NULL; first_time = 0; } else ++depth; if (opt.reclevel != INFINITE_RECURSION && depth > opt.reclevel) /* We've exceeded the maximum recursion depth specified by the user. */ { if (opt.page_requisites && depth <= opt.reclevel + 1) /* When -p is specified, we can do one more partial recursion from the "leaf nodes" on the HTML document tree. The recursion is partial in that we won't traverse any <A> or <AREA> tags, nor any <LINK> tags except for <LINK REL="stylesheet">. */ dash_p_leaf_HTML = TRUE; else /* Either -p wasn't specified or it was and we've already gone the one extra (pseudo-)level that it affords us, so we need to bail out. */ { DEBUGP (("Recursion depth %d exceeded max. depth %d.\n", depth, opt.reclevel)); --depth; return RECLEVELEXC; } } /* Determine whether this_url is an FTP URL. If it is, it means that the retrieval is done through proxy. In that case, FTP links will be followed by default and recursion will not be turned off when following them. */ this_url_ftp = (urlproto (this_url) == URLFTP); /* Get the URL-s from an HTML file: */ url_list = get_urls_html (file, canon_this_url ? canon_this_url : this_url, dash_p_leaf_HTML, &meta_disallow_follow); if (opt.use_robots && meta_disallow_follow) { /* The META tag says we are not to follow this file. Respect that. */ free_urlpos (url_list); url_list = NULL; } /* Decide what to do with each of the URLs. A URL will be loaded if it meets several requirements, discussed later. */ for (cur_url = url_list; cur_url; cur_url = cur_url->next) { /* If quota was exceeded earlier, bail out. */ if (downloaded_exceeds_quota ()) break; /* Parse the URL for convenient use in other functions, as well as to get the optimized form. It also checks URL integrity. */ u = newurl (); if (parseurl (cur_url->url, u, 0) != URLOK) { DEBUGP (("Yuck! A bad URL.\n")); freeurl (u, 1); continue; } if (u->proto == URLFILE) { DEBUGP (("Nothing to do with file:// around here.\n")); freeurl (u, 1); continue; } assert (u->url != NULL); constr = xstrdup (u->url); /* Several checkings whether a file is acceptable to load: 1. check if URL is ftp, and we don't load it 2. check for relative links (if relative_only is set) 3. check for domain 4. check for no-parent 5. check for excludes && includes 6. check for suffix 7. check for same host (if spanhost is unset), with possible gethostbyname baggage 8. check for robots.txt Addendum: If the URL is FTP, and it is to be loaded, only the domain and suffix settings are "stronger". Note that .html and (yuck) .htm will get loaded regardless of suffix rules (but that is remedied later with unlink) unless the depth equals the maximum depth. More time- and memory- consuming tests should be put later on the list. */ /* inl is set if the URL we are working on (constr) is stored in undesirable_urls. Using it is crucial to avoid unnecessary repeated continuous hits to the hash table. */ inl = string_set_contains (undesirable_urls, constr); /* If it is FTP, and FTP is not followed, chuck it out. */ if (!inl) if (u->proto == URLFTP && !opt.follow_ftp && !this_url_ftp) { DEBUGP (("Uh, it is FTP but i'm not in the mood to follow FTP.\n")); string_set_add (undesirable_urls, constr); inl = 1; } /* If it is absolute link and they are not followed, chuck it out. */ if (!inl && u->proto != URLFTP) if (opt.relative_only && !cur_url->link_relative_p) { DEBUGP (("It doesn't really look like a relative link.\n")); string_set_add (undesirable_urls, constr); inl = 1; } /* If its domain is not to be accepted/looked-up, chuck it out. */ if (!inl) if (!accept_domain (u)) { DEBUGP (("I don't like the smell of that domain.\n")); string_set_add (undesirable_urls, constr); inl = 1; } /* Check for parent directory. */ if (!inl && opt.no_parent /* If the new URL is FTP and the old was not, ignore opt.no_parent. */ && !(!this_url_ftp && u->proto == URLFTP)) { /* Check for base_dir first. */ if (!(base_dir && frontcmp (base_dir, u->dir))) { /* Failing that, check for parent dir. */ struct urlinfo *ut = newurl (); if (parseurl (this_url, ut, 0) != URLOK) DEBUGP (("Double yuck! The *base* URL is broken.\n")); else if (!frontcmp (ut->dir, u->dir)) { /* Failing that too, kill the URL. */ DEBUGP (("Trying to escape parental guidance with no_parent on.\n")); string_set_add (undesirable_urls, constr); inl = 1; } freeurl (ut, 1); } } /* If the file does not match the acceptance list, or is on the rejection list, chuck it out. The same goes for the directory exclude- and include- lists. */ if (!inl && (opt.includes || opt.excludes)) { if (!accdir (u->dir, ALLABS)) { DEBUGP (("%s (%s) is excluded/not-included.\n", constr, u->dir)); string_set_add (undesirable_urls, constr); inl = 1; } } if (!inl) { char *suf = NULL; /* We check for acceptance/rejection rules only for non-HTML documents. Since we don't know whether they really are HTML, it will be deduced from (an OR-ed list): 1) u->file is "" (meaning it is a directory) 2) suffix exists, AND: a) it is "html", OR b) it is "htm" If the file *is* supposed to be HTML, it will *not* be subject to acc/rej rules, unless a finite maximum depth has been specified and the current depth is the maximum depth. */ if (! (!*u->file || (((suf = suffix (constr)) != NULL) && ((!strcmp (suf, "html") || !strcmp (suf, "htm")) && ((opt.reclevel != INFINITE_RECURSION) && (depth != opt.reclevel)))))) { if (!acceptable (u->file)) { DEBUGP (("%s (%s) does not match acc/rej rules.\n", constr, u->file)); string_set_add (undesirable_urls, constr); inl = 1; } } FREE_MAYBE (suf); } /* Optimize the URL (which includes possible DNS lookup) only after all other possibilities have been exhausted. */ if (!inl) { if (!opt.simple_check) opt_url (u); else { char *p; /* Just lowercase the hostname. */ for (p = u->host; *p; p++) *p = TOLOWER (*p); xfree (u->url); u->url = str_url (u, 0); } xfree (constr); constr = xstrdup (u->url); string_set_add (undesirable_urls, constr); if (!inl && !((u->proto == URLFTP) && !this_url_ftp)) if (!opt.spanhost && this_url && !same_host (this_url, constr)) { DEBUGP (("This is not the same hostname as the parent's.\n")); string_set_add (undesirable_urls, constr); inl = 1; } } /* What about robots.txt? */ if (!inl && opt.use_robots && u->proto == URLHTTP) { /* Since Wget knows about only one set of robot rules at a time, /robots.txt must be reloaded whenever a new host is accessed. robots_host holds the host the current `forbid' variable is assigned to. */ if (!robots_host || !same_host (robots_host, u->host)) { FREE_MAYBE (robots_host); /* Now make robots_host the new host, no matter what the result will be. So if there is no /robots.txt on the site, Wget will not retry getting robots all the time. */ robots_host = xstrdup (u->host); free_vec (forbidden); forbidden = NULL; err = retrieve_robots (constr, ROBOTS_FILENAME); if (err == ROBOTSOK) { rurl = robots_url (constr, ROBOTS_FILENAME); rfile = url_filename (rurl); forbidden = parse_robots (rfile); freeurl (rurl, 1); xfree (rfile); } } /* Now that we have (or don't have) robots, we can check for them. */ if (!robots_match (u, forbidden)) { DEBUGP (("Stuffing %s because %s forbids it.\n", this_url, ROBOTS_FILENAME)); string_set_add (undesirable_urls, constr); inl = 1; } } filename = NULL; /* If it wasn't chucked out, do something with it. */ if (!inl) { DEBUGP (("I've decided to load it -> ")); /* Add it to the list of already-loaded URL-s. */ string_set_add (undesirable_urls, constr); /* Automatically followed FTPs will *not* be downloaded recursively. */ if (u->proto == URLFTP) { /* Don't you adore side-effects? */ opt.recursive = 0; } /* Reset its type. */ dt = 0; /* Retrieve it. */ retrieve_url (constr, &filename, &newloc, canon_this_url ? canon_this_url : this_url, &dt); if (u->proto == URLFTP) { /* Restore... */ opt.recursive = 1; } if (newloc) { xfree (constr); constr = newloc; } /* If there was no error, and the type is text/html, parse it recursively. */ if (dt & TEXTHTML) { if (dt & RETROKF) recursive_retrieve (filename, constr); } else DEBUGP (("%s is not text/html so we don't chase.\n", filename ? filename: "(null)")); if (opt.delete_after || (filename && !acceptable (filename))) /* Either --delete-after was specified, or we loaded this otherwise rejected (e.g. by -R) HTML file just so we could harvest its hyperlinks -- in either case, delete the local file. */ { DEBUGP (("Removing file due to %s in recursive_retrieve():\n", opt.delete_after ? "--delete-after" : "recursive rejection criteria")); logprintf (LOG_VERBOSE, (opt.delete_after ? _("Removing %s.\n") : _("Removing %s since it should be rejected.\n")), filename); if (unlink (filename)) logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno)); dt &= ~RETROKF; } /* If everything was OK, and links are to be converted, let's store the local filename. */ if (opt.convert_links && (dt & RETROKF) && (filename != NULL)) { cur_url->convert = CO_CONVERT_TO_RELATIVE; cur_url->local_name = xstrdup (filename); } } else DEBUGP (("%s already in list, so we don't load.\n", constr)); /* Free filename and constr. */ FREE_MAYBE (filename); FREE_MAYBE (constr); freeurl (u, 1); /* Increment the pbuf for the appropriate size. */ } if (opt.convert_links && !opt.delete_after) /* This is merely the first pass: the links that have been successfully downloaded are converted. In the second pass, convert_all_links() will also convert those links that have NOT been downloaded to their canonical form. */ convert_links (file, url_list); /* Free the linked list of URL-s. */ free_urlpos (url_list); /* Free the canonical this_url. */ FREE_MAYBE (canon_this_url); /* Decrement the recursion depth. */ --depth; if (downloaded_exceeds_quota ()) return QUOTEXC; else return RETROK; }
static int download_child_p (const struct urlpos *upos, struct url *parent, int depth, struct url *start_url_parsed, struct hash_table *blacklist) { struct url *u = upos->url; const char *url = u->url; int u_scheme_like_http; DEBUGP (("Deciding whether to enqueue \"%s\".\n", url)); if (string_set_contains (blacklist, url)) { DEBUGP (("Already on the black list.\n")); goto out; } /* Several things to check for: 1. if scheme is not http, and we don't load it 2. check for relative links (if relative_only is set) 3. check for domain 4. check for no-parent 5. check for excludes && includes 6. check for suffix 7. check for same host (if spanhost is unset), with possible gethostbyname baggage 8. check for robots.txt Addendum: If the URL is FTP, and it is to be loaded, only the domain and suffix settings are "stronger". Note that .html files will get loaded regardless of suffix rules (but that is remedied later with unlink) unless the depth equals the maximum depth. More time- and memory- consuming tests should be put later on the list. */ /* Determine whether URL under consideration has a HTTP-like scheme. */ u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP); /* 1. Schemes other than HTTP are normally not recursed into. */ if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp)) { DEBUGP (("Not following non-HTTP schemes.\n")); goto out; } /* 2. If it is an absolute link and they are not followed, throw it out. */ if (u_scheme_like_http) if (opt.relative_only && !upos->link_relative_p) { DEBUGP (("It doesn't really look like a relative link.\n")); goto out; } /* 3. If its domain is not to be accepted/looked-up, chuck it out. */ if (!accept_domain (u)) { DEBUGP (("The domain was not accepted.\n")); goto out; } /* 4. Check for parent directory. If we descended to a different host or changed the scheme, ignore opt.no_parent. Also ignore it for documents needed to display the parent page when in -p mode. */ if (opt.no_parent && schemes_are_similar_p (u->scheme, start_url_parsed->scheme) && 0 == strcasecmp (u->host, start_url_parsed->host) && u->port == start_url_parsed->port && !(opt.page_requisites && upos->link_inline_p)) { if (!frontcmp (start_url_parsed->dir, u->dir)) { DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n", u->dir, start_url_parsed->dir)); goto out; } } /* 5. If the file does not match the acceptance list, or is on the rejection list, chuck it out. The same goes for the directory exclusion and inclusion lists. */ if (opt.includes || opt.excludes) { if (!accdir (u->dir, ALLABS)) { DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir)); goto out; } } /* 6. Check for acceptance/rejection rules. We ignore these rules for directories (no file name to match) and for HTML documents, which might lead to other files that do need to be downloaded. That is, unless we've exhausted the recursion depth anyway. */ if (u->file[0] != '\0' && !(has_html_suffix_p (u->file) && depth != INFINITE_RECURSION && depth < opt.reclevel - 1)) { if (!acceptable (u->file)) { DEBUGP (("%s (%s) does not match acc/rej rules.\n", url, u->file)); goto out; } } /* 7. */ if (schemes_are_similar_p (u->scheme, parent->scheme)) if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host)) { DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n", u->host, parent->host)); goto out; } /* 8. */ if (opt.use_robots && u_scheme_like_http) { struct robot_specs *specs = res_get_specs (u->host, u->port); if (!specs) { char *rfile; if (res_retrieve_file (url, &rfile)) { specs = res_parse_from_file (rfile); xfree (rfile); } else { /* If we cannot get real specs, at least produce dummy ones so that we can register them and stop trying to retrieve them. */ specs = res_parse ("", 0); } res_register_specs (u->host, u->port, specs); } /* Now that we have (or don't have) robots.txt specs, we can check what they say. */ if (!res_match_path (specs, u->path)) { DEBUGP (("Not following %s because robots.txt forbids it.\n", url)); string_set_add (blacklist, url); goto out; } } /* The URL has passed all the tests. It can be placed in the download queue. */ DEBUGP (("Decided to load it.\n")); return 1; out: DEBUGP (("Decided NOT to load it.\n")); return 0; }
/* The core of recursive retrieving. Endless recursion is avoided by having all URL-s stored to a linked list of URL-s, which is checked before loading any URL. That way no URL can get loaded twice. The function also supports specification of maximum recursion depth and a number of other goodies. */ uerr_t recursive_retrieve (const char *file, const char *this_url) { char *constr, *filename, *newloc; char *canon_this_url = NULL; int dt, inl; int this_url_ftp; /* See below the explanation */ uerr_t err; struct urlinfo *rurl; urlpos *url_list, *cur_url; char *rfile; /* For robots */ struct urlinfo *u; assert (this_url != NULL); assert (file != NULL); /* If quota was exceeded earlier, bail out. */ if (opt.quota && (opt.downloaded > opt.quota)) return QUOTEXC; /* Cache the current URL in the list. */ if (first_time) { ulist = add_slist (ulist, this_url, 0); urls_downloaded = NULL; urls_html = NULL; /* Enter this_url to the slist, in original and "enhanced" form. */ u = newurl (); err = parseurl (this_url, u, 0); if (err == URLOK) { ulist = add_slist (ulist, u->url, 0); urls_downloaded = add_url (urls_downloaded, u->url, file); urls_html = add_slist (urls_html, file, NOSORT); if (opt.no_parent) base_dir = xstrdup (u->dir); /* Set the base dir. */ /* Set the canonical this_url to be sent as referer. This problem exists only when running the first time. */ canon_this_url = xstrdup (u->url); } else { DEBUGP (("Double yuck! The *base* URL is broken.\n")); base_dir = NULL; } freeurl (u, 1); depth = 1; robots_host = NULL; forbidden = NULL; first_time = 0; } else ++depth; /* Bail out if opt.reclevel is exceeded. */ if ((opt.reclevel != 0) && (depth > opt.reclevel)) { DEBUGP (("Recursion depth %d exceeded max. depth %d.\n", depth, opt.reclevel)); --depth; return RECLEVELEXC; } /* Determine whether this_url is an FTP URL. If it is, it means that the retrieval is done through proxy. In that case, FTP links will be followed by default and recursion will not be turned off when following them. */ this_url_ftp = (urlproto (this_url) == URLFTP); /* Get the URL-s from an HTML file: */ url_list = get_urls_html (file, canon_this_url ? canon_this_url : this_url, 0); /* Decide what to do with each of the URLs. A URL will be loaded if it meets several requirements, discussed later. */ for (cur_url = url_list; cur_url; cur_url = cur_url->next) { /* If quota was exceeded earlier, bail out. */ if (opt.quota && (opt.downloaded > opt.quota)) break; /* Parse the URL for convenient use in other functions, as well as to get the optimized form. It also checks URL integrity. */ u = newurl (); if (parseurl (cur_url->url, u, 0) != URLOK) { DEBUGP (("Yuck! A bad URL.\n")); freeurl (u, 1); continue; } if (u->proto == URLFILE) { DEBUGP (("Nothing to do with file:// around here.\n")); freeurl (u, 1); continue; } assert (u->url != NULL); constr = xstrdup (u->url); /* Several checkings whether a file is acceptable to load: 1. check if URL is ftp, and we don't load it 2. check for relative links (if relative_only is set) 3. check for domain 4. check for no-parent 5. check for excludes && includes 6. check for suffix 7. check for same host (if spanhost is unset), with possible gethostbyname baggage 8. check for robots.txt Addendum: If the URL is FTP, and it is to be loaded, only the domain and suffix settings are "stronger". Note that .html and (yuck) .htm will get loaded regardless of suffix rules (but that is remedied later with unlink). More time- and memory- consuming tests should be put later on the list. */ /* inl is set if the URL we are working on (constr) is stored in ulist. Using it is crucial to avoid the incessant calls to in_slist, which is quite slow. */ inl = in_slist (ulist, constr); /* If it is FTP, and FTP is not followed, chuck it out. */ if (!inl) if (u->proto == URLFTP && !opt.follow_ftp && !this_url_ftp) { DEBUGP (("Uh, it is FTP but i'm not in the mood to follow FTP.\n")); ulist = add_slist (ulist, constr, 0); inl = 1; } /* If it is absolute link and they are not followed, chuck it out. */ if (!inl && u->proto != URLFTP) if (opt.relative_only && !(cur_url->flags & URELATIVE)) { DEBUGP (("It doesn't really look like a relative link.\n")); ulist = add_slist (ulist, constr, 0); inl = 1; } /* If its domain is not to be accepted/looked-up, chuck it out. */ if (!inl) if (!accept_domain (u)) { DEBUGP (("I don't like the smell of that domain.\n")); ulist = add_slist (ulist, constr, 0); inl = 1; } /* Check for parent directory. */ if (!inl && opt.no_parent /* If the new URL is FTP and the old was not, ignore opt.no_parent. */ && !(!this_url_ftp && u->proto == URLFTP)) { /* Check for base_dir first. */ if (!(base_dir && frontcmp (base_dir, u->dir))) { /* Failing that, check for parent dir. */ struct urlinfo *ut = newurl (); if (parseurl (this_url, ut, 0) != URLOK) DEBUGP (("Double yuck! The *base* URL is broken.\n")); else if (!frontcmp (ut->dir, u->dir)) { /* Failing that too, kill the URL. */ DEBUGP (("Trying to escape parental guidance with no_parent on.\n")); ulist = add_slist (ulist, constr, 0); inl = 1; } freeurl (ut, 1); } } /* If the file does not match the acceptance list, or is on the rejection list, chuck it out. The same goes for the directory exclude- and include- lists. */ if (!inl && (opt.includes || opt.excludes)) { if (!accdir (u->dir, ALLABS)) { DEBUGP (("%s (%s) is excluded/not-included.\n", constr, u->dir)); ulist = add_slist (ulist, constr, 0); inl = 1; } } if (!inl) { char *suf = NULL; /* We check for acceptance/rejection rules only for non-HTML documents. Since we don't know whether they really are HTML, it will be deduced from (an OR-ed list): 1) u->file is "" (meaning it is a directory) 2) suffix exists, AND: a) it is "html", OR b) it is "htm" If the file *is* supposed to be HTML, it will *not* be subject to acc/rej rules. That's why the `!'. */ if (! (!*u->file || (((suf = suffix (constr)) != NULL) && (!strcmp (suf, "html") || !strcmp (suf, "htm"))))) { if (!acceptable (u->file)) { DEBUGP (("%s (%s) does not match acc/rej rules.\n", constr, u->file)); ulist = add_slist (ulist, constr, 0); inl = 1; } } FREE_MAYBE (suf); } /* Optimize the URL (which includes possible DNS lookup) only after all other possibilities have been exhausted. */ if (!inl) { if (!opt.simple_check) opt_url (u); else { char *p; /* Just lowercase the hostname. */ for (p = u->host; *p; p++) *p = tolower (*p); free (u->url); u->url = str_url (u, 0); } free (constr); constr = xstrdup (u->url); inl = in_slist (ulist, constr); if (!inl && !((u->proto == URLFTP) && !this_url_ftp)) if (!opt.spanhost && this_url && !same_host (this_url, constr)) { DEBUGP (("This is not the same hostname as the parent's.\n")); ulist = add_slist (ulist, constr, 0); inl = 1; } } /* What about robots.txt? */ if (!inl && opt.use_robots && u->proto == URLHTTP) { /* Since Wget knows about only one set of robot rules at a time, /robots.txt must be reloaded whenever a new host is accessed. robots_host holds the host the current `forbid' variable is assigned to. */ if (!robots_host || !same_host (robots_host, u->host)) { FREE_MAYBE (robots_host); /* Now make robots_host the new host, no matter what the result will be. So if there is no /robots.txt on the site, Wget will not retry getting robots all the time. */ robots_host = xstrdup (u->host); free_vec (forbidden); forbidden = NULL; err = retrieve_robots (constr, ROBOTS_FILENAME); if (err == ROBOTSOK) { rurl = robots_url (constr, ROBOTS_FILENAME); rfile = url_filename (rurl); forbidden = parse_robots (rfile); freeurl (rurl, 1); free (rfile); } } /* Now that we have (or don't have) robots, we can check for them. */ if (!robots_match (u, forbidden)) { DEBUGP (("Stuffing %s because %s forbids it.\n", this_url, ROBOTS_FILENAME)); ulist = add_slist (ulist, constr, 0); inl = 1; } } filename = NULL; /* If it wasn't chucked out, do something with it. */ if (!inl) { DEBUGP (("I've decided to load it -> ")); /* Add it to the list of already-loaded URL-s. */ ulist = add_slist (ulist, constr, 0); /* Automatically followed FTPs will *not* be downloaded recursively. */ if (u->proto == URLFTP) { /* Don't you adore side-effects? */ opt.recursive = 0; } /* Reset its type. */ dt = 0; /* Retrieve it. */ retrieve_url (constr, &filename, &newloc, canon_this_url ? canon_this_url : this_url, &dt); if (u->proto == URLFTP) { /* Restore... */ opt.recursive = 1; } if (newloc) { free (constr); constr = newloc; } /* In case of convert_links: If there was no error, add it to the list of downloaded URLs. We might need it for conversion. */ if (opt.convert_links && filename) { if (dt & RETROKF) { urls_downloaded = add_url (urls_downloaded, constr, filename); /* If the URL is HTML, note it. */ if (dt & TEXTHTML) urls_html = add_slist (urls_html, filename, NOSORT); } } /* If there was no error, and the type is text/html, parse it recursively. */ if (dt & TEXTHTML) { if (dt & RETROKF) recursive_retrieve (filename, constr); } else DEBUGP (("%s is not text/html so we don't chase.\n", filename ? filename: "(null)")); /* If an suffix-rejected file was loaded only because it was HTML, undo the error now */ if (opt.delete_after || (filename && !acceptable (filename))) { logprintf (LOG_VERBOSE, (opt.delete_after ? _("Removing %s.\n") : _("Removing %s since it should be rejected.\n")), filename); if (unlink (filename)) logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno)); dt &= ~RETROKF; } /* If everything was OK, and links are to be converted, let's store the local filename. */ if (opt.convert_links && (dt & RETROKF) && (filename != NULL)) { cur_url->flags |= UABS2REL; cur_url->local_name = xstrdup (filename); } } DEBUGP (("%s already in list, so we don't load.\n", constr)); /* Free filename and constr. */ FREE_MAYBE (filename); FREE_MAYBE (constr); freeurl (u, 1); /* Increment the pbuf for the appropriate size. */ } if (opt.convert_links) convert_links (file, url_list); /* Free the linked list of URL-s. */ free_urlpos (url_list); /* Free the canonical this_url. */ FREE_MAYBE (canon_this_url); /* Decrement the recursion depth. */ --depth; if (opt.quota && (opt.downloaded > opt.quota)) return QUOTEXC; else return RETROK; }