Пример #1
0
void vi_evg_compute_unit_read_checkpoint(struct vi_evg_compute_unit_t *compute_unit, FILE *f)
{
	char *work_group_name;
	char *inst_name;

	struct vi_evg_work_group_t *work_group;
	struct vi_evg_inst_t *inst;

	int num_work_groups;
	int num_insts;

	int count;
	int i;

	/* Empty work-group list */
	HASH_TABLE_FOR_EACH(compute_unit->work_group_table, work_group_name, work_group)
		vi_evg_work_group_free(work_group);
	hash_table_clear(compute_unit->work_group_table);

	/* Empty instruction list */
	HASH_TABLE_FOR_EACH(compute_unit->inst_table, inst_name, inst)
		vi_evg_inst_free(inst);
	hash_table_clear(compute_unit->inst_table);

	/* Read number of work-groups */
	count = fread(&num_work_groups, 1, 4, f);
	if (count != 4)
		fatal("%s: cannot read checkpoint", __FUNCTION__);

	/* Read work-groups */
	for (i = 0; i < num_work_groups; i++)
	{
		work_group = vi_evg_work_group_create(NULL, 0, 0, 0, 0, 0);
		vi_evg_work_group_read_checkpoint(work_group, f);
		if (!hash_table_insert(compute_unit->work_group_table, work_group->name, work_group))
			panic("%s: invalid work-group in checkpoint", __FUNCTION__);
	}

	/* Read number of instructions */
	count = fread(&num_insts, 1, 4, f);
	if (count != 4)
		fatal("%s: cannot read checkpoint", __FUNCTION__);

	/* Read instructions */
	for (i = 0; i < num_insts; i++)
	{
		inst = vi_evg_inst_create(NULL, 0, 0, 0, 0, 0, 0, NULL, NULL,
			NULL, NULL, NULL, NULL);
		vi_evg_inst_read_checkpoint(inst, f);
		if (!hash_table_insert(compute_unit->inst_table, inst->name, inst))
			panic("%s: invalid instruction in checkpoint", __FUNCTION__);
	}
}
Пример #2
0
void vi_x86_core_read_checkpoint(struct vi_x86_core_t *core, FILE *f)
{
    struct vi_x86_inst_t *inst;

    int num_contexts;
    int num_insts;
    int count;
    int i;

    char context_name[MAX_STRING_SIZE];

    char *inst_name;

    /* Clear table of contexts.
     * Elements are VI_X86_CONTEXT_EMPTY, no need to free. */
    hash_table_clear(core->context_table);

    /* Create table of instructions */
    HASH_TABLE_FOR_EACH(core->inst_table, inst_name, inst)
    vi_x86_inst_free(inst);
    hash_table_clear(core->inst_table);

    /* Number of contexts */
    count = fread(&num_contexts, 1, 4, f);
    if (count != 4)
        fatal("%s: cannot read checkpoint", __FUNCTION__);

    /* Read contexts */
    for (i = 0; i < num_contexts; i++)
    {
        str_read_from_file(f, context_name, sizeof context_name);
        if (!hash_table_insert(core->context_table, context_name, VI_X86_CONTEXT_EMPTY))
            panic("%s: invalid context", __FUNCTION__);
    }

    /* Number of instructions */
    count = fread(&num_insts, 1, 4, f);
    if (count != 4)
        fatal("%s: cannot read checkpoint", __FUNCTION__);

    /* Read instructions */
    for (i = 0; i < num_insts; i++)
    {
        inst = vi_x86_inst_create(0, NULL, NULL, NULL, 0, 0);
        vi_x86_inst_read_checkpoint(inst, f);
        if (!hash_table_insert(core->inst_table, inst->name, inst))
            panic("%s: invalid instruction", __FUNCTION__);
    }
}
void hash_table_free(struct hash_table *m)
{
    hash_table_clear(m);
    if (m) {
        free(m);
    }    
}
Пример #4
0
ir_visitor_status
output_read_remover::visit_leave(ir_emit_vertex *ir)
{
   hash_table_call_foreach(replacements, emit_return_copy, ir);
   hash_table_clear(replacements);
   return visit_continue;
}
Пример #5
0
/*************************************************************//**
Creates a hash table with >= n array cells. The actual number of cells is
chosen to be a prime number slightly bigger than n.
@return	own: created table */
UNIV_INTERN
hash_table_t*
hash_create(
/*========*/
	ulint	n)	/*!< in: number of array cells */
{
	hash_cell_t*	array;
	ulint		prime;
	hash_table_t*	table;

	prime = ut_find_prime(n);

	table = mem_alloc(sizeof(hash_table_t));

	array = ut_malloc(sizeof(hash_cell_t) * prime);

	table->array = array;
	table->n_cells = prime;
#ifndef UNIV_HOTBACKUP
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
	table->adaptive = FALSE;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
	table->n_mutexes = 0;
	table->mutexes = NULL;
	table->heaps = NULL;
#endif /* !UNIV_HOTBACKUP */
	table->heap = NULL;
	ut_d(table->magic_n = HASH_TABLE_MAGIC_N);

	/* Initialize the cell array */
	hash_table_clear(table);

	return(table);
}
Пример #6
0
void master_login_auth_disconnect(struct master_login_auth *auth)
{
	struct master_login_auth_request *request;

	while (auth->request_head != NULL) {
		request = auth->request_head;
		DLLIST2_REMOVE(&auth->request_head,
			       &auth->request_tail, request);

		request_internal_failure(request,
			"Disconnected from auth server, aborting");
		i_free(request);
	}
	hash_table_clear(auth->requests, FALSE);

	if (auth->to != NULL)
		timeout_remove(&auth->to);
	if (auth->io != NULL)
		io_remove(&auth->io);
	if (auth->fd != -1) {
		i_stream_destroy(&auth->input);
		o_stream_destroy(&auth->output);

		net_disconnect(auth->fd);
		auth->fd = -1;
	}
	auth->version_received = FALSE;
}
Пример #7
0
void hash_table_free(struct hash_table_t *table)
{
	/* Clear table */
	hash_table_clear(table);

	/* Free element vector and hash table */
	free(table->elem_vector);
	free(table);
}
Пример #8
0
// when we see }, go back one hash table and delete
void scope_leave(){
	// include check for null?
	// h = hash_table_lookup(h, "0prev");  // move h back
	h = h->prev;  // move h back
	hash_table_clear(h->next);
	hash_table_delete(h->next);
	h->next = NULL;
	// hash_table_clear(hash_table_lookup(h,"0next"));  // clear
	// hash_table_delete(hash_table_lookup(h,"0next"));  // delete table
} 
Пример #9
0
static void replicator_abort_all_requests(struct replicator_connection *conn)
{
	struct hash_iterate_context *iter;
	void *key, *value;

	iter = hash_table_iterate_init(conn->requests);
	while (hash_table_iterate(iter, conn->requests, &key, &value))
		conn->callback(FALSE, value);
	hash_table_iterate_deinit(&iter);
	hash_table_clear(conn->requests, TRUE);
}
Пример #10
0
void mailbox_guid_cache_refresh(struct mailbox_list *list)
{
	struct mailbox_list_iterate_context *ctx;
	const struct mailbox_info *info;
	struct mailbox *box;
	struct mailbox_metadata metadata;
	struct mailbox_guid_cache_rec *rec;
	uint8_t *guid_p;

	if (!hash_table_is_created(list->guid_cache)) {
		list->guid_cache_pool =
			pool_alloconly_create("guid cache", 1024*16);
		hash_table_create(&list->guid_cache, list->guid_cache_pool, 0,
				  guid_128_hash, guid_128_cmp);
	} else {
		hash_table_clear(list->guid_cache, TRUE);
		p_clear(list->guid_cache_pool);
	}
	list->guid_cache_invalidated = FALSE;
	list->guid_cache_updated = FALSE;
	list->guid_cache_errors = FALSE;

	ctx = mailbox_list_iter_init(list, "*",
				     MAILBOX_LIST_ITER_SKIP_ALIASES |
				     MAILBOX_LIST_ITER_NO_AUTO_BOXES);
	while ((info = mailbox_list_iter_next(ctx)) != NULL) {
		if ((info->flags &
		     (MAILBOX_NOSELECT | MAILBOX_NONEXISTENT)) != 0)
			continue;

		box = mailbox_alloc(list, info->vname, 0);
		if (mailbox_get_metadata(box, MAILBOX_METADATA_GUID,
					 &metadata) < 0) {
			i_error("Couldn't get mailbox %s GUID: %s",
				info->vname, mailbox_get_last_internal_error(box, NULL));
			list->guid_cache_errors = TRUE;
		} else if ((rec = hash_table_lookup(list->guid_cache,
				(const uint8_t *)metadata.guid)) != NULL) {
			i_warning("Mailbox %s has duplicate GUID with %s: %s",
				  info->vname, rec->vname,
				  guid_128_to_string(metadata.guid));
		} else {
			rec = p_new(list->guid_cache_pool,
				    struct mailbox_guid_cache_rec, 1);
			memcpy(rec->guid, metadata.guid, sizeof(rec->guid));
			rec->vname = p_strdup(list->guid_cache_pool, info->vname);
			guid_p = rec->guid;
			hash_table_insert(list->guid_cache, guid_p, rec);
		}
		mailbox_free(&box);
	}
	if (mailbox_list_iter_deinit(&ctx) < 0)
		list->guid_cache_errors = TRUE;
}
Пример #11
0
/*******************************************************************//**
Declare a cache empty, preparing it to be filled up. Not all resources
are freed because they can be reused. */
static
void
trx_i_s_cache_clear(
/*================*/
	trx_i_s_cache_t*	cache)	/*!< out: cache to clear */
{
	cache->innodb_trx.rows_used = 0;
	cache->innodb_locks.rows_used = 0;
	cache->innodb_lock_waits.rows_used = 0;

	hash_table_clear(cache->locks_hash);

	ha_storage_empty(&cache->storage);
}
Пример #12
0
static void
auth_server_connection_remove_requests(struct auth_server_connection *conn)
{
	static const char *const temp_failure_args[] = { "temp", NULL };
	struct hash_iterate_context *iter;
	void *key, *value;

	iter = hash_table_iterate_init(conn->requests);
	while (hash_table_iterate(iter, &key, &value)) {
		struct auth_client_request *request = value;

		auth_client_request_server_input(request,
						 AUTH_REQUEST_STATUS_FAIL,
						 temp_failure_args);
	}
	hash_table_iterate_deinit(&iter);
	hash_table_clear(conn->requests, FALSE);
}
Пример #13
0
static void
auth_server_connection_remove_requests(struct auth_server_connection *conn,
				       const char *disconnect_reason)
{
	static const char *const temp_failure_args[] = { "temp", NULL };
	struct hash_iterate_context *iter;
	void *key;
	struct auth_client_request *request;
	time_t created, oldest = 0;
	unsigned int request_count = 0;

	if (hash_table_count(conn->requests) == 0)
		return;

	iter = hash_table_iterate_init(conn->requests);
	while (hash_table_iterate(iter, conn->requests, &key, &request)) {
		if (!auth_client_request_is_aborted(request)) {
			request_count++;
			created = auth_client_request_get_create_time(request);
			if (oldest > created || oldest == 0)
				oldest = created;
		}

		auth_client_request_server_input(request,
			AUTH_REQUEST_STATUS_INTERNAL_FAIL,
			temp_failure_args);
	}
	hash_table_iterate_deinit(&iter);
	hash_table_clear(conn->requests, FALSE);

	if (request_count > 0) {
		i_warning("Auth connection closed with %u pending requests "
			  "(max %u secs, pid=%s, %s)", request_count,
			  (unsigned int)(ioloop_time - oldest),
			  my_pid, disconnect_reason);
	}
}
Пример #14
0
/********************************************************************//**
Disable the adaptive hash search system and empty the index. */
UNIV_INTERN
void
btr_search_disable(void)
/*====================*/
{
	dict_table_t*	table;

	mutex_enter(&dict_sys->mutex);
	rw_lock_x_lock(&btr_search_latch);

	btr_search_enabled = FALSE;

	/* Clear the index->search_info->ref_count of every index in
	the data dictionary cache. */
	for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table;
	     table = UT_LIST_GET_NEXT(table_LRU, table)) {

		dict_index_t*	index;

		for (index = dict_table_get_first_index(table); index;
		     index = dict_table_get_next_index(index)) {

			index->search_info->ref_count = 0;
		}
	}

	mutex_exit(&dict_sys->mutex);

	/* Set all block->index = NULL. */
	buf_pool_clear_hash_index();

	/* Clear the adaptive hash index. */
	hash_table_clear(btr_search_sys->hash_index);
	mem_heap_empty(btr_search_sys->hash_index->heap);

	rw_lock_x_unlock(&btr_search_latch);
}
Пример #15
0
enum edit_error
edit_process_picking
  (struct edit_picking* picking,
   const enum edit_selection_mode selection_mode)
{
  struct app_world* world = NULL;
  const app_pick_t* pick_list = NULL;
  size_t nb_picks = 0;
  size_t nb_picks_none = 0;
  size_t i = 0;
  enum app_error app_err = APP_NO_ERROR;
  enum edit_error edit_err = EDIT_NO_ERROR;

  if(UNLIKELY(!picking)) {
    edit_err = EDIT_INVALID_ARGUMENT;
    goto error;
  }

  APP(poll_picking(picking->app, &nb_picks, &pick_list));
  if(nb_picks == 0)
    goto exit;

  app_err = app_get_main_world(picking->app, &world);
  if(app_err != APP_NO_ERROR) {
    edit_err = app_to_edit_error(app_err);
    goto error;
  }

  SL(hash_table_clear(picking->picked_instances_htbl));

  for(i = 0; i < nb_picks; ++i) {
    const uint32_t pick_id = APP_PICK_ID_GET(pick_list[i]);
    const enum app_pick_group pick_group = APP_PICK_GROUP_GET(pick_list[i]);

    switch(pick_group) {
      case APP_PICK_GROUP_IMDRAW:
        edit_err = pick_imdraw(picking, pick_id);
        break;
      case APP_PICK_GROUP_WORLD:
        edit_err = pick_world(picking, world, pick_id, selection_mode);
        break;
      case APP_PICK_GROUP_NONE:
        ++nb_picks_none;
        break;
      default: assert(0); break; /* Unreachable code */
    }

    if(edit_err != EDIT_NO_ERROR)
      goto error;
  }

  if(nb_picks == nb_picks_none) {
    edit_err = edit_clear_model_instance_selection
      (picking->instance_selection);
  }

exit:
  return edit_err;
error:
  goto exit;
}
Пример #16
0
/* The core of recursive retrieving.  Endless recursion is avoided by
   having all URLs stored to a linked list of URLs, which is checked
   before loading any URL.  That way no URL can get loaded twice.

   The function also supports specification of maximum recursion depth
   and a number of other goodies.  */
uerr_t
recursive_retrieve (const char *file, const char *this_url)
{
  char *constr, *filename, *newloc;
  char *canon_this_url = NULL;
  int dt, inl, dash_p_leaf_HTML = FALSE;
  int meta_disallow_follow;
  int this_url_ftp;            /* See below the explanation */
  uerr_t err;
  struct urlinfo *rurl;
  urlpos *url_list, *cur_url;
  char *rfile; /* For robots */
  struct urlinfo *u;

  assert (this_url != NULL);
  assert (file != NULL);
  /* If quota was exceeded earlier, bail out.  */
  if (downloaded_exceeds_quota ())
    return QUOTEXC;
  /* Cache the current URL in the list.  */
  if (first_time)
    {
      /* These three operations need to be done only once per Wget
         run.  They should probably be at a different location.  */
      if (!undesirable_urls)
	undesirable_urls = make_string_hash_table (0);

      hash_table_clear (undesirable_urls);
      string_set_add (undesirable_urls, this_url);
      /* Enter this_url to the hash table, in original and "enhanced" form.  */
      u = newurl ();
      err = parseurl (this_url, u, 0);
      if (err == URLOK)
	{
	  string_set_add (undesirable_urls, u->url);
	  if (opt.no_parent)
	    base_dir = xstrdup (u->dir); /* Set the base dir.  */
	  /* Set the canonical this_url to be sent as referer.  This
	     problem exists only when running the first time.  */
	  canon_this_url = xstrdup (u->url);
	}
      else
	{
	  DEBUGP (("Double yuck!  The *base* URL is broken.\n"));
	  base_dir = NULL;
	}
      freeurl (u, 1);
      depth = 1;
      robots_host = NULL;
      forbidden = NULL;
      first_time = 0;
    }
  else
    ++depth;

  if (opt.reclevel != INFINITE_RECURSION && depth > opt.reclevel)
    /* We've exceeded the maximum recursion depth specified by the user. */
    {
      if (opt.page_requisites && depth <= opt.reclevel + 1)
	/* When -p is specified, we can do one more partial recursion from the
	   "leaf nodes" on the HTML document tree.  The recursion is partial in
	   that we won't traverse any <A> or <AREA> tags, nor any <LINK> tags
	   except for <LINK REL="stylesheet">. */
	dash_p_leaf_HTML = TRUE;
      else
	/* Either -p wasn't specified or it was and we've already gone the one
	   extra (pseudo-)level that it affords us, so we need to bail out. */
	{
	  DEBUGP (("Recursion depth %d exceeded max. depth %d.\n",
		   depth, opt.reclevel));
	  --depth;
	  return RECLEVELEXC;
	}
    }

  /* Determine whether this_url is an FTP URL.  If it is, it means
     that the retrieval is done through proxy.  In that case, FTP
     links will be followed by default and recursion will not be
     turned off when following them.  */
  this_url_ftp = (urlproto (this_url) == URLFTP);

  /* Get the URL-s from an HTML file: */
  url_list = get_urls_html (file, canon_this_url ? canon_this_url : this_url,
			    dash_p_leaf_HTML, &meta_disallow_follow);

  if (opt.use_robots && meta_disallow_follow)
    {
      /* The META tag says we are not to follow this file.  Respect
         that.  */
      free_urlpos (url_list);
      url_list = NULL;
    }

  /* Decide what to do with each of the URLs.  A URL will be loaded if
     it meets several requirements, discussed later.  */
  for (cur_url = url_list; cur_url; cur_url = cur_url->next)
    {
      /* If quota was exceeded earlier, bail out.  */
      if (downloaded_exceeds_quota ())
	break;
      /* Parse the URL for convenient use in other functions, as well
	 as to get the optimized form.  It also checks URL integrity.  */
      u = newurl ();
      if (parseurl (cur_url->url, u, 0) != URLOK)
	{
	  DEBUGP (("Yuck!  A bad URL.\n"));
	  freeurl (u, 1);
	  continue;
	}
      if (u->proto == URLFILE)
	{
	  DEBUGP (("Nothing to do with file:// around here.\n"));
	  freeurl (u, 1);
	  continue;
	}
      assert (u->url != NULL);
      constr = xstrdup (u->url);

      /* Several checkings whether a file is acceptable to load:
	 1. check if URL is ftp, and we don't load it
	 2. check for relative links (if relative_only is set)
	 3. check for domain
	 4. check for no-parent
	 5. check for excludes && includes
	 6. check for suffix
	 7. check for same host (if spanhost is unset), with possible
	 gethostbyname baggage
	 8. check for robots.txt

	 Addendum: If the URL is FTP, and it is to be loaded, only the
	 domain and suffix settings are "stronger".

	 Note that .html and (yuck) .htm will get loaded regardless of
	 suffix rules (but that is remedied later with unlink) unless
	 the depth equals the maximum depth.

	 More time- and memory- consuming tests should be put later on
	 the list.  */

      /* inl is set if the URL we are working on (constr) is stored in
	 undesirable_urls.  Using it is crucial to avoid unnecessary
	 repeated continuous hits to the hash table.  */
      inl = string_set_contains (undesirable_urls, constr);

      /* If it is FTP, and FTP is not followed, chuck it out.  */
      if (!inl)
	if (u->proto == URLFTP && !opt.follow_ftp && !this_url_ftp)
	  {
	    DEBUGP (("Uh, it is FTP but i'm not in the mood to follow FTP.\n"));
	    string_set_add (undesirable_urls, constr);
	    inl = 1;
	  }
      /* If it is absolute link and they are not followed, chuck it
	 out.  */
      if (!inl && u->proto != URLFTP)
	if (opt.relative_only && !cur_url->link_relative_p)
	  {
	    DEBUGP (("It doesn't really look like a relative link.\n"));
	    string_set_add (undesirable_urls, constr);
	    inl = 1;
	  }
      /* If its domain is not to be accepted/looked-up, chuck it out.  */
      if (!inl)
	if (!accept_domain (u))
	  {
	    DEBUGP (("I don't like the smell of that domain.\n"));
	    string_set_add (undesirable_urls, constr);
	    inl = 1;
	  }
      /* Check for parent directory.  */
      if (!inl && opt.no_parent
	  /* If the new URL is FTP and the old was not, ignore
             opt.no_parent.  */
	  && !(!this_url_ftp && u->proto == URLFTP))
	{
	  /* Check for base_dir first.  */
	  if (!(base_dir && frontcmp (base_dir, u->dir)))
	    {
	      /* Failing that, check for parent dir.  */
	      struct urlinfo *ut = newurl ();
	      if (parseurl (this_url, ut, 0) != URLOK)
		DEBUGP (("Double yuck!  The *base* URL is broken.\n"));
	      else if (!frontcmp (ut->dir, u->dir))
		{
		  /* Failing that too, kill the URL.  */
		  DEBUGP (("Trying to escape parental guidance with no_parent on.\n"));
		  string_set_add (undesirable_urls, constr);
		  inl = 1;
		}
	      freeurl (ut, 1);
	    }
	}
      /* If the file does not match the acceptance list, or is on the
	 rejection list, chuck it out.  The same goes for the
	 directory exclude- and include- lists.  */
      if (!inl && (opt.includes || opt.excludes))
	{
	  if (!accdir (u->dir, ALLABS))
	    {
	      DEBUGP (("%s (%s) is excluded/not-included.\n", constr, u->dir));
	      string_set_add (undesirable_urls, constr);
	      inl = 1;
	    }
	}
      if (!inl)
	{
	  char *suf = NULL;
	  /* We check for acceptance/rejection rules only for non-HTML
	     documents.  Since we don't know whether they really are
	     HTML, it will be deduced from (an OR-ed list):

	     1) u->file is "" (meaning it is a directory)
	     2) suffix exists, AND:
	     a) it is "html", OR
	     b) it is "htm"

	     If the file *is* supposed to be HTML, it will *not* be
            subject to acc/rej rules, unless a finite maximum depth has
            been specified and the current depth is the maximum depth. */
	  if (!
	      (!*u->file
	       || (((suf = suffix (constr)) != NULL)
                  && ((!strcmp (suf, "html") || !strcmp (suf, "htm"))
                      && ((opt.reclevel != INFINITE_RECURSION) &&
			  (depth != opt.reclevel))))))
	    {
	      if (!acceptable (u->file))
		{
		  DEBUGP (("%s (%s) does not match acc/rej rules.\n",
			  constr, u->file));
		  string_set_add (undesirable_urls, constr);
		  inl = 1;
		}
	    }
	  FREE_MAYBE (suf);
	}
      /* Optimize the URL (which includes possible DNS lookup) only
	 after all other possibilities have been exhausted.  */
      if (!inl)
	{
	  if (!opt.simple_check)
	    opt_url (u);
	  else
	    {
	      char *p;
	      /* Just lowercase the hostname.  */
	      for (p = u->host; *p; p++)
		*p = TOLOWER (*p);
	      xfree (u->url);
	      u->url = str_url (u, 0);
	    }
	  xfree (constr);
	  constr = xstrdup (u->url);
	  string_set_add (undesirable_urls, constr);
	  if (!inl && !((u->proto == URLFTP) && !this_url_ftp))
	    if (!opt.spanhost && this_url && !same_host (this_url, constr))
	      {
		DEBUGP (("This is not the same hostname as the parent's.\n"));
		string_set_add (undesirable_urls, constr);
		inl = 1;
	      }
	}
      /* What about robots.txt?  */
      if (!inl && opt.use_robots && u->proto == URLHTTP)
	{
	  /* Since Wget knows about only one set of robot rules at a
	     time, /robots.txt must be reloaded whenever a new host is
	     accessed.

	     robots_host holds the host the current `forbid' variable
	     is assigned to.  */
	  if (!robots_host || !same_host (robots_host, u->host))
	    {
	      FREE_MAYBE (robots_host);
	      /* Now make robots_host the new host, no matter what the
		 result will be.  So if there is no /robots.txt on the
		 site, Wget will not retry getting robots all the
		 time.  */
	      robots_host = xstrdup (u->host);
	      free_vec (forbidden);
	      forbidden = NULL;
	      err = retrieve_robots (constr, ROBOTS_FILENAME);
	      if (err == ROBOTSOK)
		{
		  rurl = robots_url (constr, ROBOTS_FILENAME);
		  rfile = url_filename (rurl);
		  forbidden = parse_robots (rfile);
		  freeurl (rurl, 1);
		  xfree (rfile);
		}
	    }

	  /* Now that we have (or don't have) robots, we can check for
	     them.  */
	  if (!robots_match (u, forbidden))
	    {
	      DEBUGP (("Stuffing %s because %s forbids it.\n", this_url,
		       ROBOTS_FILENAME));
	      string_set_add (undesirable_urls, constr);
	      inl = 1;
	    }
	}

      filename = NULL;
      /* If it wasn't chucked out, do something with it.  */
      if (!inl)
	{
	  DEBUGP (("I've decided to load it -> "));
	  /* Add it to the list of already-loaded URL-s.  */
	  string_set_add (undesirable_urls, constr);
	  /* Automatically followed FTPs will *not* be downloaded
	     recursively.  */
	  if (u->proto == URLFTP)
	    {
	      /* Don't you adore side-effects?  */
	      opt.recursive = 0;
	    }
	  /* Reset its type.  */
	  dt = 0;
	  /* Retrieve it.  */
	  retrieve_url (constr, &filename, &newloc,
		       canon_this_url ? canon_this_url : this_url, &dt);
	  if (u->proto == URLFTP)
	    {
	      /* Restore...  */
	      opt.recursive = 1;
	    }
	  if (newloc)
	    {
	      xfree (constr);
	      constr = newloc;
	    }
	  /* If there was no error, and the type is text/html, parse
	     it recursively.  */
	  if (dt & TEXTHTML)
	    {
	      if (dt & RETROKF)
		recursive_retrieve (filename, constr);
	    }
	  else
	    DEBUGP (("%s is not text/html so we don't chase.\n",
		     filename ? filename: "(null)"));

	  if (opt.delete_after || (filename && !acceptable (filename)))
	    /* Either --delete-after was specified, or we loaded this otherwise
	       rejected (e.g. by -R) HTML file just so we could harvest its
	       hyperlinks -- in either case, delete the local file. */
	    {
	      DEBUGP (("Removing file due to %s in recursive_retrieve():\n",
		       opt.delete_after ? "--delete-after" :
		       "recursive rejection criteria"));
	      logprintf (LOG_VERBOSE,
			 (opt.delete_after ? _("Removing %s.\n")
			  : _("Removing %s since it should be rejected.\n")),
			 filename);
	      if (unlink (filename))
		logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));
	      dt &= ~RETROKF;
	    }

	  /* If everything was OK, and links are to be converted, let's
	     store the local filename.  */
	  if (opt.convert_links && (dt & RETROKF) && (filename != NULL))
	    {
	      cur_url->convert = CO_CONVERT_TO_RELATIVE;
	      cur_url->local_name = xstrdup (filename);
	    }
	}
      else
	DEBUGP (("%s already in list, so we don't load.\n", constr));
      /* Free filename and constr.  */
      FREE_MAYBE (filename);
      FREE_MAYBE (constr);
      freeurl (u, 1);
      /* Increment the pbuf for the appropriate size.  */
    }
  if (opt.convert_links && !opt.delete_after)
    /* This is merely the first pass: the links that have been
       successfully downloaded are converted.  In the second pass,
       convert_all_links() will also convert those links that have NOT
       been downloaded to their canonical form.  */
    convert_links (file, url_list);
  /* Free the linked list of URL-s.  */
  free_urlpos (url_list);
  /* Free the canonical this_url.  */
  FREE_MAYBE (canon_this_url);
  /* Decrement the recursion depth.  */
  --depth;
  if (downloaded_exceeds_quota ())
    return QUOTEXC;
  else
    return RETROK;
}
Пример #17
0
static void maildir_keywords_clear(struct maildir_keywords *mk)
{
	array_clear(&mk->list);
	hash_table_clear(mk->hash, TRUE);
	p_clear(mk->pool);
}
Пример #18
0
void hash_table_free(hash_table htable, bool free_items_in_table)
{
    hash_table_clear(htable, free_items_in_table);
    free(htable.table);
}