示例#1
0
/* Cleanup the data structures associated with recursive retrieving
   (the variables above).  */
void
recursive_cleanup (void)
{
  if (undesirable_urls)
    {
      string_set_free (undesirable_urls);
      undesirable_urls = NULL;
    }
  if (dl_file_url_map)
    {
      free_keys_and_values (dl_file_url_map);
      hash_table_destroy (dl_file_url_map);
      dl_file_url_map = NULL;
    }
  if (dl_url_file_map)
    {
      free_keys_and_values (dl_url_file_map);
      hash_table_destroy (dl_url_file_map);
      dl_url_file_map = NULL;
    }
  undesirable_urls = NULL;
  free_vec (forbidden);
  forbidden = NULL;
  slist_free (downloaded_html_files);
  downloaded_html_files = NULL;
  FREE_MAYBE (base_dir);
  FREE_MAYBE (robots_host);
  first_time = 1;
}
示例#2
0
void
convert_cleanup (void)
{
    if (dl_file_url_map)
    {
        free_keys_and_values (dl_file_url_map);
        hash_table_destroy (dl_file_url_map);
        dl_file_url_map = NULL;
    }
    if (dl_url_file_map)
    {
        free_keys_and_values (dl_url_file_map);
        hash_table_destroy (dl_url_file_map);
        dl_url_file_map = NULL;
    }
    if (downloaded_html_set)
        string_set_free (downloaded_html_set);
    downloaded_files_free ();
    if (converted_files)
        string_set_free (converted_files);
}
示例#3
0
void
spider_cleanup (void)
{
  if (nonexisting_urls_set)
    string_set_free (nonexisting_urls_set);
}
示例#4
0
uerr_t
retrieve_tree (const char *start_url)
{
  uerr_t status = RETROK;

  /* The queue of URLs we need to load. */
  struct url_queue *queue;

  /* The URLs we do not wish to enqueue, because they are already in
     the queue, but haven't been downloaded yet.  */
  struct hash_table *blacklist;

  int up_error_code;
  struct url *start_url_parsed = url_parse (start_url, &up_error_code);

  if (!start_url_parsed)
    {
      logprintf (LOG_NOTQUIET, "%s: %s.\n", start_url,
		 url_error (up_error_code));
      return URLERROR;
    }

  queue = url_queue_new ();
  blacklist = make_string_hash_table (0);

  /* Enqueue the starting URL.  Use start_url_parsed->url rather than
     just URL so we enqueue the canonical form of the URL.  */
  url_enqueue (queue, xstrdup (start_url_parsed->url), NULL, 0, 1);
  string_set_add (blacklist, start_url_parsed->url);

  while (1)
    {
      int descend = 0;
      char *url, *referer, *file = NULL;
      int depth, html_allowed;
      boolean dash_p_leaf_HTML = FALSE;

      if (opt.quota && total_downloaded_bytes > opt.quota)
	break;
      if (status == FWRITEERR)
	break;

      /* Get the next URL from the queue... */

      if (!url_dequeue (queue,
			(const char **)&url, (const char **)&referer,
			&depth, &html_allowed))
	break;

      /* ...and download it.  Note that this download is in most cases
	 unconditional, as download_child_p already makes sure a file
	 doesn't get enqueued twice -- and yet this check is here, and
	 not in download_child_p.  This is so that if you run `wget -r
	 URL1 URL2', and a random URL is encountered once under URL1
	 and again under URL2, but at a different (possibly smaller)
	 depth, we want the URL's children to be taken into account
	 the second time.  */
      if (dl_url_file_map && hash_table_contains (dl_url_file_map, url))
	{
	  file = xstrdup (hash_table_get (dl_url_file_map, url));

	  DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n",
		   url, file));

	  if (html_allowed
	      && downloaded_html_set
	      && string_set_contains (downloaded_html_set, file))
	    descend = 1;
	}
      else
	{
	  int dt = 0;
	  char *redirected = NULL;
	  int oldrec = opt.recursive;

	  opt.recursive = 0;
	  status = retrieve_url (url, &file, &redirected, referer, &dt);
	  opt.recursive = oldrec;

	  if (html_allowed && file && status == RETROK
	      && (dt & RETROKF) && (dt & TEXTHTML))
	    descend = 1;

	  if (redirected)
	    {
	      /* We have been redirected, possibly to another host, or
		 different path, or wherever.  Check whether we really
		 want to follow it.  */
	      if (descend)
		{
		  if (!descend_redirect_p (redirected, url, depth,
					   start_url_parsed, blacklist))
		    descend = 0;
		  else
		    /* Make sure that the old pre-redirect form gets
		       blacklisted. */
		    string_set_add (blacklist, url);
		}

	      xfree (url);
	      url = redirected;
	    }
	}

      if (descend
	  && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION)
	{
	  if (opt.page_requisites
	      && (depth == opt.reclevel || depth == opt.reclevel + 1))
	    {
	      /* When -p is specified, we are allowed to exceed the
		 maximum depth, but only for the "inline" links,
		 i.e. those that are needed to display the page.
		 Originally this could exceed the depth at most by
		 one, but we allow one more level so that the leaf
		 pages that contain frames can be loaded
		 correctly.  */
	      dash_p_leaf_HTML = TRUE;
	    }
	  else
	    {
	      /* Either -p wasn't specified or it was and we've
		 already spent the two extra (pseudo-)levels that it
		 affords us, so we need to bail out. */
	      DEBUGP (("Not descending further; at depth %d, max. %d.\n",
		       depth, opt.reclevel));
	      descend = 0;
	    }
	}

      /* If the downloaded document was HTML, parse it and enqueue the
	 links it contains. */

      if (descend)
	{
	  int meta_disallow_follow = 0;
	  struct urlpos *children
	    = get_urls_html (file, url, &meta_disallow_follow);

	  if (opt.use_robots && meta_disallow_follow)
	    {
	      free_urlpos (children);
	      children = NULL;
	    }

	  if (children)
	    {
	      struct urlpos *child = children;
	      struct url *url_parsed = url_parsed = url_parse (url, NULL);
	      assert (url_parsed != NULL);

	      for (; child; child = child->next)
		{
		  if (child->ignore_when_downloading)
		    continue;
		  if (dash_p_leaf_HTML && !child->link_inline_p)
		    continue;
		  if (download_child_p (child, url_parsed, depth, start_url_parsed,
					blacklist))
		    {
		      url_enqueue (queue, xstrdup (child->url->url),
				   xstrdup (url), depth + 1,
				   child->link_expect_html);
		      /* We blacklist the URL we have enqueued, because we
			 don't want to enqueue (and hence download) the
			 same URL twice.  */
		      string_set_add (blacklist, child->url->url);
		    }
		}

	      url_free (url_parsed);
	      free_urlpos (children);
	    }
	}

      if (opt.delete_after || (file && !acceptable (file)))
	{
	  /* Either --delete-after was specified, or we loaded this
	     otherwise rejected (e.g. by -R) HTML file just so we
	     could harvest its hyperlinks -- in either case, delete
	     the local file. */
	  DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
		   opt.delete_after ? "--delete-after" :
		   "recursive rejection criteria"));
	  logprintf (LOG_VERBOSE,
		     (opt.delete_after
		      ? _("Removing %s.\n")
		      : _("Removing %s since it should be rejected.\n")),
		     file);
	  if (unlink (file))
	    logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
	  register_delete_file (file);
	}

      xfree (url);
      FREE_MAYBE (referer);
      FREE_MAYBE (file);
    }

  /* If anything is left of the queue due to a premature exit, free it
     now.  */
  {
    char *d1, *d2;
    int d3, d4;
    while (url_dequeue (queue,
			(const char **)&d1, (const char **)&d2, &d3, &d4))
      {
	xfree (d1);
	FREE_MAYBE (d2);
      }
  }
  url_queue_delete (queue);

  if (start_url_parsed)
    url_free (start_url_parsed);
  string_set_free (blacklist);

  if (opt.quota && total_downloaded_bytes > opt.quota)
    return QUOTEXC;
  else if (status == FWRITEERR)
    return FWRITEERR;
  else
    return RETROK;
}
示例#5
0
static bool
choose_serial_port(struct serial_port *sport, const char *portname)
{
	struct string_set *serial_ports;
	struct print_context pc;
	struct pick_context pk;
	char *line, *end;
	size_t len;

	pc.pc_counter = 0;
	pc.pc_handle = stdout;

	if (portname != NULL) {
		serial_ports = NULL;
		goto open_port;
	}

	serial_ports = serial_port_enumerate();
	string_set_foreach(serial_ports, print_serial_port, &pc);

	if (pc.pc_counter == 0) {
		fprintf(pc.pc_handle, "Sorry, no ports available.\n");
		string_set_free(serial_ports);
		return (false);
	}

	pk.pc_counter = 0;
	pk.pc_name = NULL;
	pk.pc_pick = 0;

	for (;;) {
		fprintf(pc.pc_handle, "Please enter the number corresponding with the port you would like to use.\n");
		line = fgetln(stdin, &len);
		if (line == NULL)
			continue;
		line[len - 1] = '\0';
		if (line[0] == '\0')
			continue;
		pk.pc_pick = strtoul(line, &end, 10);
		if (*end != '\0') {
			pk.pc_pick = 0;
			continue;
		}
		if (pk.pc_pick < 1 || pk.pc_pick > pc.pc_counter) {
			pk.pc_pick = 0;
			continue;
		}
		break;
	}

	string_set_foreach(serial_ports, pick_serial_port, &pk);
	if (pk.pc_name == NULL) {
		string_set_free(serial_ports);
		return (false);
	}

	portname = pk.pc_name;

open_port:
	fprintf(pc.pc_handle, "Serial port selected: %s\n", portname);

	if (!serial_port_open(sport, portname)) {
		if (serial_ports != NULL)
			string_set_free(serial_ports);
		return (false);
	}
	fprintf(pc.pc_handle, "Serial port opened successfully.\n");
	if (serial_ports != NULL)
		string_set_free(serial_ports);
	return (true);
}