Beispiel #1
0
static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
	struct INFTLrecord *inftl;
	unsigned long temp;

	if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
		return;
	/* OK, this is moderately ugly.  But probably safe.  Alternatives? */
	if (memcmp(mtd->name, "DiskOnChip", 10))
		return;

	if (!mtd->block_isbad) {
		printk(KERN_ERR
"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
"Please use the new diskonchip driver under the NAND subsystem.\n");
		return;
	}

	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);

	inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);

	if (!inftl) {
		printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
		return;
	}

	inftl->mbd.mtd = mtd;
	inftl->mbd.devnum = -1;

	inftl->mbd.tr = tr;

	if (INFTL_mount(inftl) < 0) {
		printk(KERN_WARNING "INFTL: could not mount device\n");
		kfree(inftl);
		return;
	}

	/* OK, it's a new one. Set up all the data structures. */

	/* Calculate geometry */
	inftl->cylinders = 1024;
	inftl->heads = 16;

	temp = inftl->cylinders * inftl->heads;
	inftl->sectors = inftl->mbd.size / temp;
	if (inftl->mbd.size % temp) {
		inftl->sectors++;
		temp = inftl->cylinders * inftl->sectors;
		inftl->heads = inftl->mbd.size / temp;

		if (inftl->mbd.size % temp) {
			inftl->heads++;
			temp = inftl->heads * inftl->sectors;
			inftl->cylinders = inftl->mbd.size / temp;
		}
	}

	if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) {
		/*
		  Oh no we don't have
		   mbd.size == heads * cylinders * sectors
		*/
		printk(KERN_WARNING "INFTL: cannot calculate a geometry to "
		       "match size of 0x%lx.\n", inftl->mbd.size);
		printk(KERN_WARNING "INFTL: using C:%d H:%d S:%d "
			"(== 0x%lx sects)\n",
			inftl->cylinders, inftl->heads , inftl->sectors,
			(long)inftl->cylinders * (long)inftl->heads *
			(long)inftl->sectors );
	}

	if (add_mtd_blktrans_dev(&inftl->mbd)) {
		kfree(inftl->PUtable);
		kfree(inftl->VUtable);
		kfree(inftl);
		return;
	}
#ifdef PSYCHO_DEBUG
	printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a');
#endif
	return;
}
Beispiel #2
0
/*
 * int ut_report_state
 *
 * Checks if the `state' differs from the old state and creates a notification
 * if appropriate.
 * Does not fail.
 */
static int ut_report_state(const data_set_t *ds, const value_list_t *vl,
                           const threshold_t *th, const gauge_t *values,
                           int ds_index, int state) { /* {{{ */
  int state_old;
  notification_t n;

  char *buf;
  size_t bufsize;

  int status;

  /* Check if hits matched */
  if ((th->hits != 0)) {
    int hits = uc_get_hits(ds, vl);
    /* STATE_OKAY resets hits unless PERSIST_OK flag is set. Hits resets if
     * threshold is hit. */
    if (((state == STATE_OKAY) && ((th->flags & UT_FLAG_PERSIST_OK) == 0)) ||
        (hits > th->hits)) {
      DEBUG("ut_report_state: reset uc_get_hits = 0");
      uc_set_hits(ds, vl, 0); /* reset hit counter and notify */
    } else {
      DEBUG("ut_report_state: th->hits = %d, uc_get_hits = %d", th->hits,
            uc_get_hits(ds, vl));
      (void)uc_inc_hits(ds, vl, 1); /* increase hit counter */
      return (0);
    }
  } /* end check hits */

  state_old = uc_get_state(ds, vl);

  /* If the state didn't change, report if `persistent' is specified. If the
   * state is `okay', then only report if `persist_ok` flag is set. */
  if (state == state_old) {
    if ((th->flags & UT_FLAG_PERSIST) == 0)
      return (0);
    else if ((state == STATE_OKAY) && ((th->flags & UT_FLAG_PERSIST_OK) == 0))
      return (0);
  }

  if (state != state_old)
    uc_set_state(ds, vl, state);

  NOTIFICATION_INIT_VL(&n, vl);

  buf = n.message;
  bufsize = sizeof(n.message);

  if (state == STATE_OKAY)
    n.severity = NOTIF_OKAY;
  else if (state == STATE_WARNING)
    n.severity = NOTIF_WARNING;
  else
    n.severity = NOTIF_FAILURE;

  n.time = vl->time;

  status = ssnprintf(buf, bufsize, "Host %s, plugin %s", vl->host, vl->plugin);
  buf += status;
  bufsize -= status;

  if (vl->plugin_instance[0] != '\0') {
    status = ssnprintf(buf, bufsize, " (instance %s)", vl->plugin_instance);
    buf += status;
    bufsize -= status;
  }

  status = ssnprintf(buf, bufsize, " type %s", vl->type);
  buf += status;
  bufsize -= status;

  if (vl->type_instance[0] != '\0') {
    status = ssnprintf(buf, bufsize, " (instance %s)", vl->type_instance);
    buf += status;
    bufsize -= status;
  }

  plugin_notification_meta_add_string(&n, "DataSource", ds->ds[ds_index].name);
  plugin_notification_meta_add_double(&n, "CurrentValue", values[ds_index]);
  plugin_notification_meta_add_double(&n, "WarningMin", th->warning_min);
  plugin_notification_meta_add_double(&n, "WarningMax", th->warning_max);
  plugin_notification_meta_add_double(&n, "FailureMin", th->failure_min);
  plugin_notification_meta_add_double(&n, "FailureMax", th->failure_max);

  /* Send an okay notification */
  if (state == STATE_OKAY) {
    if (state_old == STATE_MISSING)
      ssnprintf(buf, bufsize, ": Value is no longer missing.");
    else
      ssnprintf(buf, bufsize, ": All data sources are within range again. "
                              "Current value of \"%s\" is %f.",
                ds->ds[ds_index].name, values[ds_index]);
  } else {
    double min;
    double max;

    min = (state == STATE_ERROR) ? th->failure_min : th->warning_min;
    max = (state == STATE_ERROR) ? th->failure_max : th->warning_max;

    if (th->flags & UT_FLAG_INVERT) {
      if (!isnan(min) && !isnan(max)) {
        ssnprintf(buf, bufsize,
                  ": Data source \"%s\" is currently "
                  "%f. That is within the %s region of %f%s and %f%s.",
                  ds->ds[ds_index].name, values[ds_index],
                  (state == STATE_ERROR) ? "failure" : "warning", min,
                  ((th->flags & UT_FLAG_PERCENTAGE) != 0) ? "%" : "", max,
                  ((th->flags & UT_FLAG_PERCENTAGE) != 0) ? "%" : "");
      } else {
        ssnprintf(buf, bufsize, ": Data source \"%s\" is currently "
                                "%f. That is %s the %s threshold of %f%s.",
                  ds->ds[ds_index].name, values[ds_index],
                  isnan(min) ? "below" : "above",
                  (state == STATE_ERROR) ? "failure" : "warning",
                  isnan(min) ? max : min,
                  ((th->flags & UT_FLAG_PERCENTAGE) != 0) ? "%" : "");
      }
    } else if (th->flags & UT_FLAG_PERCENTAGE) {
      gauge_t value;
      gauge_t sum;

      sum = 0.0;
      for (size_t i = 0; i < vl->values_len; i++) {
        if (isnan(values[i]))
          continue;

        sum += values[i];
      }

      if (sum == 0.0)
        value = NAN;
      else
        value = 100.0 * values[ds_index] / sum;

      ssnprintf(buf, bufsize,
                ": Data source \"%s\" is currently "
                "%g (%.2f%%). That is %s the %s threshold of %.2f%%.",
                ds->ds[ds_index].name, values[ds_index], value,
                (value < min) ? "below" : "above",
                (state == STATE_ERROR) ? "failure" : "warning",
                (value < min) ? min : max);
    } else /* is not inverted */
    {
      ssnprintf(buf, bufsize, ": Data source \"%s\" is currently "
                              "%f. That is %s the %s threshold of %f.",
                ds->ds[ds_index].name, values[ds_index],
                (values[ds_index] < min) ? "below" : "above",
                (state == STATE_ERROR) ? "failure" : "warning",
                (values[ds_index] < min) ? min : max);
    }
  }

  plugin_dispatch_notification(&n);

  plugin_notification_meta_free(n.meta);
  return (0);
} /* }}} int ut_report_state */
Beispiel #3
0
int nl_send_del_route_msg(struct in_addr dest)
{
	DEBUG(LOG_DEBUG, 0, "Send DELROUTE to kernel: %s", ip_to_str(dest));
	return nl_create_and_send_msg(nlsock, KAODVM_DELROUTE, &dest, sizeof(dest));
}
/* list all domain groups */
static NTSTATUS enum_local_groups(struct winbindd_domain *domain,
				TALLOC_CTX *mem_ctx,
				uint32 *num_entries, 
				struct acct_info **info)
{
	struct winbind_cache *cache = get_cache(domain);
	struct cache_entry *centry = NULL;
	NTSTATUS status;
	unsigned int i;

	if (!cache->tdb)
		goto do_query;

	centry = wcache_fetch(cache, domain, "GL/%s/local", domain->name);
	if (!centry)
		goto do_query;

	*num_entries = centry_uint32(centry);
	
	if (*num_entries == 0)
		goto do_cached;

	(*info) = talloc(mem_ctx, sizeof(**info) * (*num_entries));
	if (! (*info))
		smb_panic("enum_dom_groups out of memory");
	for (i=0; i<(*num_entries); i++) {
		fstrcpy((*info)[i].acct_name, centry_string(centry, mem_ctx));
		fstrcpy((*info)[i].acct_desc, centry_string(centry, mem_ctx));
		(*info)[i].rid = centry_uint32(centry);
	}

do_cached:	

	/* If we are returning cached data and the domain controller
	   is down then we don't know whether the data is up to date
	   or not.  Return NT_STATUS_MORE_PROCESSING_REQUIRED to
	   indicate this. */

	if (wcache_server_down(domain)) {
		DEBUG(10, ("enum_local_groups: returning cached user list and server was down\n"));
		status = NT_STATUS_MORE_PROCESSING_REQUIRED;
	} else
		status = centry->status;

	DEBUG(10,("enum_local_groups: [Cached] - cached list for domain %s status %s\n",
		domain->name, get_friendly_nt_error_msg(status) ));

	centry_free(centry);
	return status;

do_query:
	*num_entries = 0;
	*info = NULL;

	/* Return status value returned by seq number check */

	if (!NT_STATUS_IS_OK(domain->last_status))
		return domain->last_status;

	DEBUG(10,("enum_local_groups: [Cached] - doing backend query for list for domain %s\n",
		domain->name ));

	status = domain->backend->enum_local_groups(domain, mem_ctx, num_entries, info);

	/* and save it */
	refresh_sequence_number(domain, False);
	centry = centry_start(domain, status);
	if (!centry)
		goto skip_save;
	centry_put_uint32(centry, *num_entries);
	for (i=0; i<(*num_entries); i++) {
		centry_put_string(centry, (*info)[i].acct_name);
		centry_put_string(centry, (*info)[i].acct_desc);
		centry_put_uint32(centry, (*info)[i].rid);
	}
	centry_end(centry, "GL/%s/local", domain->name);
	centry_free(centry);

skip_save:
	return status;
}
Beispiel #5
0
int fd_close_posix(struct files_struct *fsp)
{
	int saved_errno = 0;
	int ret;
	int *fd_array = NULL;
	size_t count, i;

	if (!lp_locking(fsp->conn->params) ||
	    !lp_posix_locking(fsp->conn->params))
	{
		/*
		 * No locking or POSIX to worry about or we want POSIX semantics
		 * which will lose all locks on all fd's open on this dev/inode,
		 * just close.
		 */
		return close(fsp->fh->fd);
	}

	if (get_windows_lock_ref_count(fsp)) {

		/*
		 * There are outstanding locks on this dev/inode pair on
		 * other fds. Add our fd to the pending close tdb and set
		 * fsp->fh->fd to -1.
		 */

		add_fd_to_close_entry(fsp);
		return 0;
	}

	/*
	 * No outstanding locks. Get the pending close fd's
	 * from the tdb and close them all.
	 */

	count = get_posix_pending_close_entries(talloc_tos(), fsp, &fd_array);

	if (count) {
		DEBUG(10,("fd_close_posix: doing close on %u fd's.\n",
			  (unsigned int)count));

		for(i = 0; i < count; i++) {
			if (close(fd_array[i]) == -1) {
				saved_errno = errno;
			}
		}

		/*
		 * Delete all fd's stored in the tdb
		 * for this dev/inode pair.
		 */

		delete_close_entries(fsp);
	}

	TALLOC_FREE(fd_array);

	/* Don't need a lock ref count on this dev/ino anymore. */
	delete_windows_lock_ref_count(fsp);

	/*
	 * Finally close the fd associated with this fsp.
	 */

	ret = close(fsp->fh->fd);

	if (ret == 0 && saved_errno != 0) {
		errno = saved_errno;
		ret = -1;
	}

	return ret;
}
static NTSTATUS lookup_groupmem(struct winbindd_domain *domain,
				TALLOC_CTX *mem_ctx,
				DOM_SID *group_sid, uint32 *num_names, 
				DOM_SID ***sid_mem, char ***names, 
				uint32 **name_types)
{
	struct winbind_cache *cache = get_cache(domain);
	struct cache_entry *centry = NULL;
	NTSTATUS status;
	unsigned int i;
	fstring sid_string;

	if (!cache->tdb)
		goto do_query;

	centry = wcache_fetch(cache, domain, "GM/%s", sid_to_string(sid_string, group_sid));
	if (!centry)
		goto do_query;

	*num_names = centry_uint32(centry);
	
	if (*num_names == 0)
		goto do_cached;

	(*sid_mem) = talloc(mem_ctx, sizeof(**sid_mem) * (*num_names));
	(*names) = talloc(mem_ctx, sizeof(**names) * (*num_names));
	(*name_types) = talloc(mem_ctx, sizeof(**name_types) * (*num_names));

	if (! (*sid_mem) || ! (*names) || ! (*name_types)) {
		smb_panic("lookup_groupmem out of memory");
	}

	for (i=0; i<(*num_names); i++) {
		(*sid_mem)[i] = centry_sid(centry, mem_ctx);
		(*names)[i] = centry_string(centry, mem_ctx);
		(*name_types)[i] = centry_uint32(centry);
	}

do_cached:	
	status = centry->status;

	DEBUG(10,("lookup_groupmem: [Cached] - cached info for domain %s status %s\n",
		domain->name, get_friendly_nt_error_msg(status) ));

	centry_free(centry);
	return status;

do_query:
	(*num_names) = 0;
	(*sid_mem) = NULL;
	(*names) = NULL;
	(*name_types) = NULL;
	
	/* Return status value returned by seq number check */

	if (!NT_STATUS_IS_OK(domain->last_status))
		return domain->last_status;

	DEBUG(10,("lookup_groupmem: [Cached] - doing backend query for info for domain %s\n",
		domain->name ));

	status = domain->backend->lookup_groupmem(domain, mem_ctx, group_sid, num_names, 
						  sid_mem, names, name_types);

	/* and save it */
	refresh_sequence_number(domain, False);
	centry = centry_start(domain, status);
	if (!centry)
		goto skip_save;
	centry_put_uint32(centry, *num_names);
	for (i=0; i<(*num_names); i++) {
		centry_put_sid(centry, (*sid_mem)[i]);
		centry_put_string(centry, (*names)[i]);
		centry_put_uint32(centry, (*name_types)[i]);
	}	
	centry_end(centry, "GM/%s", sid_to_string(sid_string, group_sid));
	centry_free(centry);

skip_save:
	return status;
}
/* Query display info. This is the basic user list fn */
static NTSTATUS query_user_list(struct winbindd_domain *domain,
				TALLOC_CTX *mem_ctx,
				uint32 *num_entries, 
				WINBIND_USERINFO **info)
{
	struct winbind_cache *cache = get_cache(domain);
	struct cache_entry *centry = NULL;
	NTSTATUS status;
	unsigned int i, retry;

	if (!cache->tdb)
		goto do_query;

	centry = wcache_fetch(cache, domain, "UL/%s", domain->name);
	if (!centry)
		goto do_query;

	*num_entries = centry_uint32(centry);
	
	if (*num_entries == 0)
		goto do_cached;

	(*info) = talloc(mem_ctx, sizeof(**info) * (*num_entries));
	if (! (*info))
		smb_panic("query_user_list out of memory");
	for (i=0; i<(*num_entries); i++) {
		(*info)[i].acct_name = centry_string(centry, mem_ctx);
		(*info)[i].full_name = centry_string(centry, mem_ctx);
		(*info)[i].user_sid = centry_sid(centry, mem_ctx);
		(*info)[i].group_sid = centry_sid(centry, mem_ctx);
	}

do_cached:	
	status = centry->status;

	DEBUG(10,("query_user_list: [Cached] - cached list for domain %s status %s\n",
		domain->name, get_friendly_nt_error_msg(status) ));

	centry_free(centry);
	return status;

do_query:
	*num_entries = 0;
	*info = NULL;

	/* Return status value returned by seq number check */

	if (!NT_STATUS_IS_OK(domain->last_status))
		return domain->last_status;

	/* Put the query_user_list() in a retry loop.  There appears to be
	 * some bug either with Windows 2000 or Samba's handling of large
	 * rpc replies.  This manifests itself as sudden disconnection
	 * at a random point in the enumeration of a large (60k) user list.
	 * The retry loop simply tries the operation again. )-:  It's not
	 * pretty but an acceptable workaround until we work out what the
	 * real problem is. */

	retry = 0;
	do {

		DEBUG(10,("query_user_list: [Cached] - doing backend query for list for domain %s\n",
			domain->name ));

		status = domain->backend->query_user_list(domain, mem_ctx, num_entries, info);
		if (!NT_STATUS_IS_OK(status))
			DEBUG(3, ("query_user_list: returned 0x%08x, retrying\n", NT_STATUS_V(status)));
			if (NT_STATUS_V(status) == NT_STATUS_V(NT_STATUS_UNSUCCESSFUL)) {
				DEBUG(3, ("query_user_list: flushing connection cache\n"));
				winbindd_cm_flush();
			}

	} while (NT_STATUS_V(status) == NT_STATUS_V(NT_STATUS_UNSUCCESSFUL) && 
		 (retry++ < 5));

	/* and save it */
	refresh_sequence_number(domain, False);
	centry = centry_start(domain, status);
	if (!centry)
		goto skip_save;
	centry_put_uint32(centry, *num_entries);
	for (i=0; i<(*num_entries); i++) {
		centry_put_string(centry, (*info)[i].acct_name);
		centry_put_string(centry, (*info)[i].full_name);
		centry_put_sid(centry, (*info)[i].user_sid);
		centry_put_sid(centry, (*info)[i].group_sid);
		if (domain->backend->consistent) {
			/* when the backend is consistent we can pre-prime some mappings */
			wcache_save_name_to_sid(domain, NT_STATUS_OK, 
						(*info)[i].acct_name, 
						(*info)[i].user_sid,
						SID_NAME_USER);
			wcache_save_sid_to_name(domain, NT_STATUS_OK, 
						(*info)[i].user_sid,
						(*info)[i].acct_name, 
						SID_NAME_USER);
			wcache_save_user(domain, NT_STATUS_OK, &(*info)[i]);
		}
	}	
	centry_end(centry, "UL/%s", domain->name);
	centry_free(centry);

skip_save:
	return status;
}
// ---------------------------------------------------------------------------
// CHssLoginTimer::RunL
// ---------------------------------------------------------------------------
//  
void CHssLoginTimer::RunL()
	{
	DEBUG("CHssLoginTimer::RunL()");
    iObserver.LoginTimeout();
	}    
Beispiel #9
0
/*
  handle stdout/stderr from the child
 */
static void samba_runcmd_io_handler(struct tevent_context *ev,
				    struct tevent_fd *fde,
				    uint16_t flags,
				    void *private_data)
{
	struct tevent_req *req = talloc_get_type_abort(private_data,
				 struct tevent_req);
	struct samba_runcmd_state *state = tevent_req_data(req,
					   struct samba_runcmd_state);
	int level;
	char *p;
	int n, fd;

	if (fde == state->fde_stdout) {
		level = state->stdout_log_level;
		fd = state->fd_stdout;
	} else if (fde == state->fde_stderr) {
		level = state->stderr_log_level;
		fd = state->fd_stderr;
	} else {
		return;
	}

	if (!(flags & TEVENT_FD_READ)) {
		return;
	}

	n = read(fd, &state->buf[state->buf_used],
		 sizeof(state->buf) - state->buf_used);
	if (n > 0) {
		state->buf_used += n;
	} else if (n == 0) {
		if (fde == state->fde_stdout) {
			talloc_free(fde);
			state->fde_stdout = NULL;
		}
		if (fde == state->fde_stderr) {
			talloc_free(fde);
			state->fde_stderr = NULL;
		}
		if (state->fde_stdout == NULL &&
		    state->fde_stderr == NULL) {
			int status;
			/* the child has closed both stdout and
			 * stderr, assume its dead */
			pid_t pid = waitpid(state->pid, &status, 0);
			if (pid != state->pid) {
				if (errno == ECHILD) {
					/* this happens when the
					   parent has set SIGCHLD to
					   SIG_IGN. In that case we
					   can only get error
					   information for the child
					   via its logging. We should
					   stop using SIG_IGN on
					   SIGCHLD in the standard
					   process model.
					*/
					DEBUG(0, ("Error in waitpid() unexpectedly got ECHILD "
						  "for %s child %d - %s, "
						  "someone has set SIGCHLD to SIG_IGN!\n",
					state->arg0, (int)state->pid, strerror(errno)));
					tevent_req_error(req, errno);
					return;
				}
				DEBUG(0,("Error in waitpid() for child %s - %s \n",
					 state->arg0, strerror(errno)));
				if (errno == 0) {
					errno = ECHILD;
				}
				tevent_req_error(req, errno);
				return;
			}
			status = WEXITSTATUS(status);
			DEBUG(3,("Child %s exited with status %d - %s\n",
				 state->arg0, status, strerror(status)));
			if (status != 0) {
				tevent_req_error(req, status);
				return;
			}

			tevent_req_done(req);
			return;
		}
		return;
	}

	while (state->buf_used > 0 &&
	       (p = (char *)memchr(state->buf, '\n', state->buf_used)) != NULL) {
		int n1 = (p - state->buf)+1;
		int n2 = n1 - 1;
		/* swallow \r from child processes */
		if (n2 > 0 && state->buf[n2-1] == '\r') {
			n2--;
		}
		DEBUG(level,("%s: %*.*s\n", state->arg0, n2, n2, state->buf));
		memmove(state->buf, p+1, sizeof(state->buf) - n1);
		state->buf_used -= n1;
	}

	/* the buffer could have completely filled - unfortunately we have
	   no choice but to dump it out straight away */
	if (state->buf_used == sizeof(state->buf)) {
		DEBUG(level,("%s: %*.*s\n",
			     state->arg0, state->buf_used,
			     state->buf_used, state->buf));
		state->buf_used = 0;
	}
}
int iot_tls_connect(Network *pNetwork, TLSConnectParams params) {
	const char *pers = "aws_iot_tls_wrapper";
	unsigned char buf[MBEDTLS_SSL_MAX_CONTENT_LEN + 1];

	DEBUG("  . Loading the CA root certificate ...");
	ret = mbedtls_x509_crt_parse_file(&cacert, params.pRootCALocation);
	if (ret < 0) {
		ERROR(" failed\n  !  mbedtls_x509_crt_parse returned -0x%x\n\n", -ret);
		return ret;
	} DEBUG(" ok (%d skipped)\n", ret);

	DEBUG("  . Loading the client cert. and key...");
	ret = mbedtls_x509_crt_parse_file(&clicert, params.pDeviceCertLocation);
	if (ret != 0) {
		ERROR(" failed\n  !  mbedtls_x509_crt_parse returned -0x%x\n\n", -ret);
		return ret;
	}

	ret = mbedtls_pk_parse_keyfile(&pkey, params.pDevicePrivateKeyLocation, "");
	if (ret != 0) {
		ERROR(" failed\n  !  mbedtls_pk_parse_key returned -0x%x\n\n", -ret);
		return ret;
	} DEBUG(" ok\n");
	char portBuffer[6];
	sprintf(portBuffer, "%d", params.DestinationPort); DEBUG("  . Connecting to %s/%s...", params.pDestinationURL, portBuffer);
	if ((ret = mbedtls_net_connect(&server_fd, params.pDestinationURL, portBuffer, MBEDTLS_NET_PROTO_TCP)) != 0) {
		ERROR(" failed\n  ! mbedtls_net_connect returned -0x%x\n\n", -ret);
		return ret;
	}

	ret = mbedtls_net_set_block(&server_fd);
	if (ret != 0) {
		ERROR(" failed\n  ! net_set_(non)block() returned -0x%x\n\n", -ret);
		return ret;
	} DEBUG(" ok\n");

	DEBUG("  . Setting up the SSL/TLS structure...");
	if ((ret = mbedtls_ssl_config_defaults(&conf, MBEDTLS_SSL_IS_CLIENT, MBEDTLS_SSL_TRANSPORT_STREAM,
			MBEDTLS_SSL_PRESET_DEFAULT)) != 0) {
		ERROR(" failed\n  ! mbedtls_ssl_config_defaults returned -0x%x\n\n", -ret);
		return ret;
	}

	mbedtls_ssl_conf_verify(&conf, myCertVerify, NULL);
	if (params.ServerVerificationFlag == true) {
		mbedtls_ssl_conf_authmode(&conf, MBEDTLS_SSL_VERIFY_REQUIRED);
	} else {
		mbedtls_ssl_conf_authmode(&conf, MBEDTLS_SSL_VERIFY_OPTIONAL);
	}
	mbedtls_ssl_conf_rng(&conf, mbedtls_ctr_drbg_random, &ctr_drbg);

	mbedtls_ssl_conf_ca_chain(&conf, &cacert, NULL);
	if ((ret = mbedtls_ssl_conf_own_cert(&conf, &clicert, &pkey)) != 0) {
		ERROR(" failed\n  ! mbedtls_ssl_conf_own_cert returned %d\n\n", ret);
		return ret;
	}

	mbedtls_ssl_conf_read_timeout(&conf, params.timeout_ms);

	if ((ret = mbedtls_ssl_setup(&ssl, &conf)) != 0) {
		ERROR(" failed\n  ! mbedtls_ssl_setup returned -0x%x\n\n", -ret);
		return ret;
	}
	if ((ret = mbedtls_ssl_set_hostname(&ssl, params.pDestinationURL)) != 0) {
		ERROR(" failed\n  ! mbedtls_ssl_set_hostname returned %d\n\n", ret);
		return ret;
	}
	mbedtls_ssl_set_bio(&ssl, &server_fd, mbedtls_net_send, NULL, mbedtls_net_recv_timeout);
	DEBUG(" ok\n");

	DEBUG("  . Performing the SSL/TLS handshake...");
	while ((ret = mbedtls_ssl_handshake(&ssl)) != 0) {
		if (ret != MBEDTLS_ERR_SSL_WANT_READ && ret != MBEDTLS_ERR_SSL_WANT_WRITE) {
			ERROR(" failed\n  ! mbedtls_ssl_handshake returned -0x%x\n", -ret);
			if (ret == MBEDTLS_ERR_X509_CERT_VERIFY_FAILED) {
				ERROR("    Unable to verify the server's certificate. "
						"Either it is invalid,\n"
						"    or you didn't set ca_file or ca_path "
						"to an appropriate value.\n"
						"    Alternatively, you may want to use "
						"auth_mode=optional for testing purposes.\n");
			}
			return ret;
		}
	}

	DEBUG(" ok\n    [ Protocol is %s ]\n    [ Ciphersuite is %s ]\n", mbedtls_ssl_get_version(&ssl), mbedtls_ssl_get_ciphersuite(&ssl));
	if ((ret = mbedtls_ssl_get_record_expansion(&ssl)) >= 0) {
		DEBUG("    [ Record expansion is %d ]\n", ret);
	} else {
		DEBUG("    [ Record expansion is unknown (compression) ]\n");
	}

	DEBUG("  . Verifying peer X.509 certificate...");

	if (params.ServerVerificationFlag == true) {
		if ((flags = mbedtls_ssl_get_verify_result(&ssl)) != 0) {
			char vrfy_buf[512];
			ERROR(" failed\n");
			mbedtls_x509_crt_verify_info(vrfy_buf, sizeof(vrfy_buf), "  ! ", flags);
			ERROR("%s\n", vrfy_buf);
		} else {
			DEBUG(" ok\n");
			ret = NONE_ERROR;
		}
	} else {
		DEBUG(" Server Verification skipped\n");
		ret = NONE_ERROR;
	}

	if (mbedtls_ssl_get_peer_cert(&ssl) != NULL) {
		DEBUG("  . Peer certificate information    ...\n");
		mbedtls_x509_crt_info((char *) buf, sizeof(buf) - 1, "      ", mbedtls_ssl_get_peer_cert(&ssl));
		DEBUG("%s\n", buf);
	}

	mbedtls_ssl_conf_read_timeout(&conf, 10);

	return ret;
}
// ---------------------------------------------------------------------------
// CHssLoginTimer::ConstructL
// ---------------------------------------------------------------------------
//
void CHssLoginTimer::ConstructL()
    {
    DEBUG("CHssLoginTimer::ConstructL()");
    CTimer::ConstructL();
    CActiveScheduler::Add(this);
    }
Beispiel #12
0
static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
			   char *buffer)
{
	struct INFTLrecord *inftl = (void *)mbd;
	unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
	unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
	struct mtd_info *mtd = inftl->mbd.mtd;
	unsigned int status;
	int silly = MAX_LOOPS;
	struct inftl_bci bci;
	size_t retlen;

	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
		"buffer=%p)\n", inftl, block, buffer);

	while (thisEUN < inftl->nb_blocks) {
		if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
				  blockofs, 8, &retlen, (char *)&bci) < 0)
			status = SECTOR_IGNORE;
		else
			status = bci.Status | bci.Status1;

		switch (status) {
		case SECTOR_DELETED:
			thisEUN = BLOCK_NIL;
			goto foundit;
		case SECTOR_USED:
			goto foundit;
		case SECTOR_FREE:
		case SECTOR_IGNORE:
			break;
		default:
			printk(KERN_WARNING "INFTL: unknown status for "
				"block %ld in EUN %d: 0x%04x\n",
				block, thisEUN, status);
			break;
		}

		if (!silly--) {
			printk(KERN_WARNING "INFTL: infinite loop in "
				"Virtual Unit Chain 0x%lx\n",
				block / (inftl->EraseSize / SECTORSIZE));
			return 1;
		}

		thisEUN = inftl->PUtable[thisEUN];
	}

foundit:
	if (thisEUN == BLOCK_NIL) {
		/* The requested block is not on the media, return all 0x00 */
		memset(buffer, 0, SECTORSIZE);
	} else {
		size_t retlen;
		loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
		int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);

		/* Handle corrected bit flips gracefully */
		if (ret < 0 && ret != -EUCLEAN)
			return -EIO;
	}
	return 0;
}
Beispiel #13
0
/*
 * Given a Virtual Unit Chain, see if it can be deleted, and if so do it.
 */
static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
{
	struct mtd_info *mtd = inftl->mbd.mtd;
	unsigned char BlockUsed[MAX_SECTORS_PER_UNIT];
	unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
	unsigned int thisEUN, status;
	int block, silly;
	struct inftl_bci bci;
	size_t retlen;

	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
		"thisVUC=%d)\n", inftl, thisVUC);

	memset(BlockUsed, 0, sizeof(BlockUsed));
	memset(BlockDeleted, 0, sizeof(BlockDeleted));

	thisEUN = inftl->VUtable[thisVUC];
	if (thisEUN == BLOCK_NIL) {
		printk(KERN_WARNING "INFTL: trying to delete non-existent "
		       "Virtual Unit Chain %d!\n", thisVUC);
		return;
	}

	/*
	 * Scan through the Erase Units to determine whether any data is in
	 * each of the 512-byte blocks within the Chain.
	 */
	silly = MAX_LOOPS;
	while (thisEUN < inftl->nb_blocks) {
		for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) {
			if (BlockUsed[block] || BlockDeleted[block])
				continue;

			if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
					   + (block * SECTORSIZE), 8 , &retlen,
					  (char *)&bci) < 0)
				status = SECTOR_IGNORE;
			else
				status = bci.Status | bci.Status1;

			switch(status) {
			case SECTOR_FREE:
			case SECTOR_IGNORE:
				break;
			case SECTOR_USED:
				BlockUsed[block] = 1;
				continue;
			case SECTOR_DELETED:
				BlockDeleted[block] = 1;
				continue;
			default:
				printk(KERN_WARNING "INFTL: unknown status "
					"for block %d in EUN %d: 0x%x\n",
					block, thisEUN, status);
			}
		}

		if (!silly--) {
			printk(KERN_WARNING "INFTL: infinite loop in Virtual "
				"Unit Chain 0x%x\n", thisVUC);
			return;
		}

		thisEUN = inftl->PUtable[thisEUN];
	}

	for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++)
		if (BlockUsed[block])
			return;

	/*
	 * For each block in the chain free it and make it available
	 * for future use. Erase from the oldest unit first.
	 */
	DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC);

	for (;;) {
		u16 *prevEUN = &inftl->VUtable[thisVUC];
		thisEUN = *prevEUN;

		/* If the chain is all gone already, we're done */
		if (thisEUN == BLOCK_NIL) {
			DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
			return;
		}

		/* Find oldest unit in chain. */
		while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
			BUG_ON(thisEUN >= inftl->nb_blocks);

			prevEUN = &inftl->PUtable[thisEUN];
			thisEUN = *prevEUN;
		}

		DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n",
		      thisEUN, thisVUC);

		if (INFTL_formatblock(inftl, thisEUN) < 0) {
			/*
			 * Could not erase : mark block as reserved.
			 */
			inftl->PUtable[thisEUN] = BLOCK_RESERVED;
		} else {
			/* Correctly erased : mark it as free */
			inftl->PUtable[thisEUN] = BLOCK_FREE;
			inftl->numfreeEUNs++;
		}

		/* Now sort out whatever was pointing to it... */
		*prevEUN = BLOCK_NIL;

		/* Ideally we'd actually be responsive to new
		   requests while we're doing this -- if there's
		   free space why should others be made to wait? */
		cond_resched();
	}

	inftl->VUtable[thisVUC] = BLOCK_NIL;
}
Beispiel #14
0
/*
 * INFTL_findwriteunit: Return the unit number into which we can write
 *                      for this block. Make it available if it isn't already.
 */
static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
{
	unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE);
	unsigned int thisEUN, writeEUN, prev_block, status;
	unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1);
	struct mtd_info *mtd = inftl->mbd.mtd;
	struct inftl_oob oob;
	struct inftl_bci bci;
	unsigned char anac, nacs, parity;
	size_t retlen;
	int silly, silly2 = 3;

	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
		"block=%d)\n", inftl, block);

	do {
		/*
		 * Scan the media to find a unit in the VUC which has
		 * a free space for the block in question.
		 */
		writeEUN = BLOCK_NIL;
		thisEUN = inftl->VUtable[thisVUC];
		silly = MAX_LOOPS;

		while (thisEUN <= inftl->lastEUN) {
			inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
				       blockofs, 8, &retlen, (char *)&bci);

			status = bci.Status | bci.Status1;
			DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in "
				"EUN %d is %x\n", block , writeEUN, status);

			switch(status) {
			case SECTOR_FREE:
				writeEUN = thisEUN;
				break;
			case SECTOR_DELETED:
			case SECTOR_USED:
				/* Can't go any further */
				goto hitused;
			case SECTOR_IGNORE:
				break;
			default:
				/*
				 * Invalid block. Don't use it any more.
				 * Must implement.
				 */
				break;
			}

			if (!silly--) {
				printk(KERN_WARNING "INFTL: infinite loop in "
					"Virtual Unit Chain 0x%x\n", thisVUC);
				return BLOCK_NIL;
			}

			/* Skip to next block in chain */
			thisEUN = inftl->PUtable[thisEUN];
		}

hitused:
		if (writeEUN != BLOCK_NIL)
			return writeEUN;


		/*
		 * OK. We didn't find one in the existing chain, or there
		 * is no existing chain. Allocate a new one.
		 */
		writeEUN = INFTL_findfreeblock(inftl, 0);

		if (writeEUN == BLOCK_NIL) {
			/*
			 * That didn't work - there were no free blocks just
			 * waiting to be picked up. We're going to have to fold
			 * a chain to make room.
			 */
			thisEUN = INFTL_makefreeblock(inftl, block);

			/*
			 * Hopefully we free something, lets try again.
			 * This time we are desperate...
			 */
			DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 "
				"to find free EUN to accommodate write to "
				"VUC %d\n", thisVUC);
			writeEUN = INFTL_findfreeblock(inftl, 1);
			if (writeEUN == BLOCK_NIL) {
				/*
				 * Ouch. This should never happen - we should
				 * always be able to make some room somehow.
				 * If we get here, we've allocated more storage
				 * space than actual media, or our makefreeblock
				 * routine is missing something.
				 */
				printk(KERN_WARNING "INFTL: cannot make free "
					"space.\n");
#ifdef DEBUG
				INFTL_dumptables(inftl);
				INFTL_dumpVUchains(inftl);
#endif
				return BLOCK_NIL;
			}
		}

		/*
		 * Insert new block into virtual chain. Firstly update the
		 * block headers in flash...
		 */
		anac = 0;
		nacs = 0;
		thisEUN = inftl->VUtable[thisVUC];
		if (thisEUN != BLOCK_NIL) {
			inftl_read_oob(mtd, thisEUN * inftl->EraseSize
				       + 8, 8, &retlen, (char *)&oob.u);
			anac = oob.u.a.ANAC + 1;
			nacs = oob.u.a.NACs + 1;
		}

		prev_block = inftl->VUtable[thisVUC];
		if (prev_block < inftl->nb_blocks)
			prev_block -= inftl->firstEUN;

		parity = (nrbits(thisVUC, 16) & 0x1) ? 0x1 : 0;
		parity |= (nrbits(prev_block, 16) & 0x1) ? 0x2 : 0;
		parity |= (nrbits(anac, 8) & 0x1) ? 0x4 : 0;
		parity |= (nrbits(nacs, 8) & 0x1) ? 0x8 : 0;

		oob.u.a.virtualUnitNo = cpu_to_le16(thisVUC);
		oob.u.a.prevUnitNo = cpu_to_le16(prev_block);
		oob.u.a.ANAC = anac;
		oob.u.a.NACs = nacs;
		oob.u.a.parityPerField = parity;
		oob.u.a.discarded = 0xaa;

		inftl_write_oob(mtd, writeEUN * inftl->EraseSize + 8, 8,
				&retlen, (char *)&oob.u);

		/* Also back up header... */
		oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC);
		oob.u.b.prevUnitNo = cpu_to_le16(prev_block);
		oob.u.b.ANAC = anac;
		oob.u.b.NACs = nacs;
		oob.u.b.parityPerField = parity;
		oob.u.b.discarded = 0xaa;

		inftl_write_oob(mtd, writeEUN * inftl->EraseSize +
				SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u);

		inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC];
		inftl->VUtable[thisVUC] = writeEUN;

		inftl->numfreeEUNs--;
		return writeEUN;

	} while (silly2--);

	printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
		"Unit Chain 0x%x\n", thisVUC);
	return BLOCK_NIL;
}
Beispiel #15
0
int main(int argc, char *argv[])
{
	fr_pcap_t *in = NULL, *in_p;
	fr_pcap_t **in_head = &in;
	fr_pcap_t *out = NULL;

	int ret = 1;					/* Exit status */
	int limit = -1;					/* How many packets to sniff */

	char errbuf[PCAP_ERRBUF_SIZE];			/* Error buffer */
	int port = 1812;

	char buffer[1024];

	int opt;
	FR_TOKEN parsecode;
	char const *radius_dir = RADIUS_DIR;

	rs_stats_t stats;

	fr_debug_flag = 1;
	log_dst = stdout;

	talloc_set_log_stderr();

	conf = talloc_zero(NULL, rs_t);
	if (!fr_assert(conf)) {
		exit (1);
	}

	/*
	 *  We don't really want probes taking down machines
	 */
#ifdef HAVE_TALLOC_SET_MEMLIMIT
	/*
	 *	@fixme causes hang in talloc steal
	 */
	 //talloc_set_memlimit(conf, 524288000);		/* 50 MB */
#endif

	/*
	 *	Set some defaults
	 */
	conf->print_packet = true;
	conf->limit = -1;
	conf->promiscuous = true;
#ifdef HAVE_COLLECTDC_H
	conf->stats.prefix = RS_DEFAULT_PREFIX;
#endif
	conf->radius_secret = RS_DEFAULT_SECRET;

	/*
	 *  Get options
	 */
	while ((opt = getopt(argc, argv, "c:d:DFf:hi:I:mp:qr:s:Svw:xXW:T:P:O:")) != EOF) {
		switch (opt) {
		case 'c':
			limit = atoi(optarg);
			if (limit <= 0) {
				fprintf(stderr, "radsniff: Invalid number of packets \"%s\"", optarg);
				exit(1);
			}
			break;

		case 'd':
			radius_dir = optarg;
			break;

		case 'D':
			{
				pcap_if_t *all_devices = NULL;
				pcap_if_t *dev_p;

				if (pcap_findalldevs(&all_devices, errbuf) < 0) {
					ERROR("Error getting available capture devices: %s", errbuf);
					goto finish;
				}

				int i = 1;
				for (dev_p = all_devices;
				     dev_p;
				     dev_p = dev_p->next) {
					INFO("%i.%s", i++, dev_p->name);
				}
				ret = 0;
				goto finish;
			}

		case 'F':
			conf->from_stdin = true;
			conf->to_stdout = true;
			break;

		case 'f':
			conf->pcap_filter = optarg;
			break;

		case 'h':
			usage(0);
			break;

		case 'i':
			*in_head = fr_pcap_init(conf, optarg, PCAP_INTERFACE_IN);
			if (!*in_head) {
				goto finish;
			}
			in_head = &(*in_head)->next;
			conf->from_dev = true;
			break;

		case 'I':
			*in_head = fr_pcap_init(conf, optarg, PCAP_FILE_IN);
			if (!*in_head) {
				goto finish;
			}
			in_head = &(*in_head)->next;
			conf->from_file = true;
			break;
		case 'm':
			conf->promiscuous = false;
			break;

		case 'p':
			port = atoi(optarg);
			break;

		case 'q':
			if (fr_debug_flag > 0) {
				fr_debug_flag--;
			}
			break;

		case 'r':
			conf->radius_filter = optarg;
			break;

		case 's':
			conf->radius_secret = optarg;
			break;

		case 'S':
			conf->do_sort = true;
			break;

		case 'v':
#ifdef HAVE_COLLECTDC_H
			INFO("%s, %s, collectdclient version %s", radsniff_version, pcap_lib_version(),
			     lcc_version_string());
#else
			INFO("%s %s", radsniff_version, pcap_lib_version());
#endif
			exit(0);
			break;

		case 'w':
			out = fr_pcap_init(conf, optarg, PCAP_FILE_OUT);
			conf->to_file = true;
			break;

		case 'x':
		case 'X':
		  	fr_debug_flag++;
			break;

		case 'W':
			conf->stats.interval = atoi(optarg);
			conf->print_packet = false;
			if (conf->stats.interval <= 0) {
				ERROR("Stats interval must be > 0");
				usage(64);
			}
			break;

		case 'T':
			conf->stats.timeout = atoi(optarg);
			if (conf->stats.timeout <= 0) {
				ERROR("Timeout value must be > 0");
				usage(64);
			}
			break;

#ifdef HAVE_COLLECTDC_H
		case 'P':
			conf->stats.prefix = optarg;
			break;

		case 'O':
			conf->stats.collectd = optarg;
			conf->stats.out = RS_STATS_OUT_COLLECTD;
			break;
#endif
		default:
			usage(64);
		}
	}

	/* What's the point in specifying -F ?! */
	if (conf->from_stdin && conf->from_file && conf->to_file) {
		usage(64);
	}

	/* Can't read from both... */
	if (conf->from_file && conf->from_dev) {
		usage(64);
	}

	/* Reading from file overrides stdin */
	if (conf->from_stdin && (conf->from_file || conf->from_dev)) {
		conf->from_stdin = false;
	}

	/* Writing to file overrides stdout */
	if (conf->to_file && conf->to_stdout) {
		conf->to_stdout = false;
	}

	if (conf->to_stdout) {
		out = fr_pcap_init(conf, "stdout", PCAP_STDIO_OUT);
		if (!out) {
			goto finish;
		}
	}

	if (conf->from_stdin) {
		*in_head = fr_pcap_init(conf, "stdin", PCAP_STDIO_IN);
		if (!*in_head) {
			goto finish;
		}
		in_head = &(*in_head)->next;
	}

	if (conf->stats.interval && !conf->stats.out) {
		conf->stats.out = RS_STATS_OUT_STDIO;
	}

	if (conf->stats.timeout == 0) {
		conf->stats.timeout = RS_DEFAULT_TIMEOUT;
	}

	/*
	 *	If were writing pcap data stdout we *really* don't want to send
	 *	logging there as well.
	 */
 	log_dst = conf->to_stdout ? stderr : stdout;

#if !defined(HAVE_PCAP_FOPEN_OFFLINE) || !defined(HAVE_PCAP_DUMP_FOPEN)
	if (conf->from_stdin || conf->to_stdout) {
		ERROR("PCAP streams not supported");
		goto finish;
	}
#endif

	if (!conf->pcap_filter) {
		snprintf(buffer, sizeof(buffer), "udp port %d or %d or %d",
			 port, port + 1, 3799);
		conf->pcap_filter = buffer;
	}

	if (dict_init(radius_dir, RADIUS_DICTIONARY) < 0) {
		fr_perror("radsniff");
		ret = 64;
		goto finish;
	}
	fr_strerror();	/* Clear out any non-fatal errors */

	if (conf->radius_filter) {
		vp_cursor_t cursor;
		VALUE_PAIR *vp;

		parsecode = userparse(NULL, conf->radius_filter, &filter_vps);
		if (parsecode == T_OP_INVALID) {
			ERROR("Invalid RADIUS filter \"%s\" (%s)", conf->radius_filter, fr_strerror());
			ret = 64;
			goto finish;
		}

		if (!filter_vps) {
			ERROR("Empty RADIUS filter \"%s\"", conf->radius_filter);
			ret = 64;
			goto finish;
		}

		for (vp = paircursor(&cursor, &filter_vps);
		     vp;
		     vp = pairnext(&cursor)) {
		     	/*
		     	 *	xlat expansion isn't support hered
		     	 */
		     	if (vp->type == VT_XLAT) {
		     		vp->type = VT_DATA;
		     		vp->vp_strvalue = vp->value.xlat;
		     	}
		}
	}

	/*
	 *	Setup the request tree
	 */
	request_tree = rbtree_create((rbcmp) rs_packet_cmp, _rb_rad_free, 0);
	if (!request_tree) {
		ERROR("Failed creating request tree");
		goto finish;
	}

	/*
	 *	Get the default capture device
	 */
	if (!conf->from_stdin && !conf->from_file && !conf->from_dev) {
		pcap_if_t *all_devices;			/* List of all devices libpcap can listen on */
		pcap_if_t *dev_p;

		if (pcap_findalldevs(&all_devices, errbuf) < 0) {
			ERROR("Error getting available capture devices: %s", errbuf);
			goto finish;
		}

		if (!all_devices) {
			ERROR("No capture files specified and no live interfaces available");
			ret = 64;
			goto finish;
		}

		for (dev_p = all_devices;
		     dev_p;
		     dev_p = dev_p->next) {
		     	/* Don't use the any devices, it's horribly broken */
		     	if (!strcmp(dev_p->name, "any")) continue;
			*in_head = fr_pcap_init(conf, dev_p->name, PCAP_INTERFACE_IN);
			in_head = &(*in_head)->next;
		}
		conf->from_auto = true;
		conf->from_dev = true;
		INFO("Defaulting to capture on all interfaces");
	}

	/*
	 *	Print captures values which will be used
	 */
	if (fr_debug_flag > 2) {
			DEBUG2("Sniffing with options:");
		if (conf->from_dev)	{
			char *buff = fr_pcap_device_names(conf, in, ' ');
			DEBUG2("  Device(s)                : [%s]", buff);
			talloc_free(buff);
		}
		if (conf->to_file || conf->to_stdout) {
			DEBUG2("  Writing to               : [%s]", out->name);
		}
		if (conf->limit > 0)	{
			DEBUG2("  Capture limit (packets)  : [%" PRIu64 "]", conf->limit);
		}
			DEBUG2("  PCAP filter              : [%s]", conf->pcap_filter);
			DEBUG2("  RADIUS secret            : [%s]", conf->radius_secret);
		if (filter_vps){
			DEBUG2("  RADIUS filter            :");
			vp_printlist(log_dst, filter_vps);
		}
	}

	/*
	 *	Open our interface to collectd
	 */
#ifdef HAVE_COLLECTDC_H
	if (conf->stats.out == RS_STATS_OUT_COLLECTD) {
		size_t i;
		rs_stats_tmpl_t *tmpl, **next;

		if (rs_stats_collectd_open(conf) < 0) {
			exit(1);
		}

		next = &conf->stats.tmpl;

		for (i = 0; i < (sizeof(rs_useful_codes) / sizeof(*rs_useful_codes)); i++) {
			tmpl = rs_stats_collectd_init_latency(conf, next, conf, "exchanged",
							      &stats.exchange[rs_useful_codes[i]],
							      rs_useful_codes[i]);
			if (!tmpl) {
				ERROR("Error allocating memory for stats template");
				goto finish;
			}
			next = &(tmpl->next);
		}
	}
#endif

	/*
	 *	This actually opens the capture interfaces/files (we just allocated the memory earlier)
	 */
	{
		fr_pcap_t *tmp;
		fr_pcap_t **tmp_p = &tmp;

		for (in_p = in;
		     in_p;
		     in_p = in_p->next) {
		     	in_p->promiscuous = conf->promiscuous;
			if (fr_pcap_open(in_p) < 0) {
				if (!conf->from_auto) {
					ERROR("Failed opening pcap handle for %s", in_p->name);
					goto finish;
				}

				DEBUG("Failed opening pcap handle: %s", fr_strerror());
				continue;
			}

			if (conf->pcap_filter) {
				if (fr_pcap_apply_filter(in_p, conf->pcap_filter) < 0) {
					ERROR("Failed applying filter");
					goto finish;
				}
			}

			*tmp_p = in_p;
			tmp_p = &(in_p->next);
		}
		*tmp_p = NULL;
		in = tmp;
	}

	/*
	 *	Open our output interface (if we have one);
	 */
	if (out) {
		if (fr_pcap_open(out) < 0) {
			ERROR("Failed opening pcap output");
			goto finish;
		}
	}

	/*
	 *	Setup signal handlers so we always exit gracefully, ensuring output buffers are always
	 *	flushed.
	 */
	{
#ifdef HAVE_SIGACTION
		struct sigaction action;
		memset(&action, 0, sizeof(action));

		action.sa_handler = rs_cleanup;
		sigaction(SIGINT, &action, NULL);
		sigaction(SIGQUIT, &action, NULL);
#else
		signal(SIGINT, rs_cleanup);
#  ifdef SIGQUIT
		signal(SIGQUIT, rs_cleanup);
#  endif
#endif
	}

	/*
	 *	Setup and enter the main event loop. Who needs libev when you can roll your own...
	 */
	 {
	 	struct timeval now;
	 	rs_update_t		update;

	 	char *buff;

		memset(&stats, 0, sizeof(stats));
		memset(&update, 0, sizeof(update));

	 	events = fr_event_list_create(conf, _rs_event_status);
	 	if (!events) {
	 		ERROR();
	 		goto finish;
	 	}

		for (in_p = in;
	     	     in_p;
	     	     in_p = in_p->next) {
	     	     	rs_event_t *event;

	     	     	event = talloc_zero(events, rs_event_t);
	     	     	event->list = events;
	     	     	event->in = in_p;
	     	     	event->out = out;
	     	     	event->stats = &stats;

			if (!fr_event_fd_insert(events, 0, in_p->fd, rs_got_packet, event)) {
				ERROR("Failed inserting file descriptor");
				goto finish;
			}
		}

		buff = fr_pcap_device_names(conf, in, ' ');
		INFO("Sniffing on (%s)", buff);
		talloc_free(buff);

		gettimeofday(&now, NULL);

		/*
		 *	Insert our stats processor
		 */
		if (conf->stats.interval) {
			update.list = events;
			update.stats = &stats;
			update.in = in;

			now.tv_sec += conf->stats.interval;
			now.tv_usec = 0;
			fr_event_insert(events, rs_stats_process, (void *) &update, &now, NULL);
		}

		ret = fr_event_loop(events);	/* Enter the main event loop */
	}

	INFO("Done sniffing");

	finish:

	cleanup = true;
	/*
	 *	Free all the things! This also closes all the sockets and file descriptors
	 */
	talloc_free(conf);

	return ret;
}
/*
  construct the parent GUID for an entry from a message
*/
static int construct_parent_guid(struct ldb_module *module,
				 struct ldb_message *msg, enum ldb_scope scope,
				 struct ldb_request *parent)
{
	struct ldb_result *res, *parent_res;
	const struct ldb_val *parent_guid;
	const char *attrs[] = { "instanceType", NULL };
	const char *attrs2[] = { "objectGUID", NULL };
	uint32_t instanceType;
	int ret;
	struct ldb_dn *parent_dn;
	struct ldb_val v;

	/* determine if the object is NC by instance type */
	ret = dsdb_module_search_dn(module, msg, &res, msg->dn, attrs,
	                            DSDB_FLAG_NEXT_MODULE |
	                            DSDB_SEARCH_SHOW_RECYCLED, parent);
	if (ret != LDB_SUCCESS) {
		return ret;
	}

	instanceType = ldb_msg_find_attr_as_uint(res->msgs[0],
						 "instanceType", 0);
	talloc_free(res);
	if (instanceType & INSTANCE_TYPE_IS_NC_HEAD) {
		DEBUG(4,(__location__ ": Object %s is NC\n",
			 ldb_dn_get_linearized(msg->dn)));
		return LDB_SUCCESS;
	}
	parent_dn = ldb_dn_get_parent(msg, msg->dn);

	if (parent_dn == NULL) {
		DEBUG(4,(__location__ ": Failed to find parent for dn %s\n",
					 ldb_dn_get_linearized(msg->dn)));
		return LDB_SUCCESS;
	}
	ret = dsdb_module_search_dn(module, msg, &parent_res, parent_dn, attrs2,
	                            DSDB_FLAG_NEXT_MODULE |
	                            DSDB_SEARCH_SHOW_RECYCLED, parent);
	talloc_free(parent_dn);

	/* not NC, so the object should have a parent*/
	if (ret == LDB_ERR_NO_SUCH_OBJECT) {
		return ldb_error(ldb_module_get_ctx(module), LDB_ERR_OPERATIONS_ERROR, 
				 talloc_asprintf(msg, "Parent dn for %s does not exist", 
						 ldb_dn_get_linearized(msg->dn)));
	} else if (ret != LDB_SUCCESS) {
		return ret;
	}

	parent_guid = ldb_msg_find_ldb_val(parent_res->msgs[0], "objectGUID");
	if (!parent_guid) {
		talloc_free(parent_res);
		return LDB_SUCCESS;
	}

	v = data_blob_dup_talloc(parent_res, *parent_guid);
	if (!v.data) {
		talloc_free(parent_res);
		return ldb_oom(ldb_module_get_ctx(module));
	}
	ret = ldb_msg_add_steal_value(msg, "parentGUID", &v);
	talloc_free(parent_res);
	return ret;
}
/* Lookup groups a user is a member of. */
static NTSTATUS lookup_usergroups(struct winbindd_domain *domain,
				  TALLOC_CTX *mem_ctx,
				  DOM_SID *user_sid, 
				  uint32 *num_groups, DOM_SID ***user_gids)
{
	struct winbind_cache *cache = get_cache(domain);
	struct cache_entry *centry = NULL;
	NTSTATUS status;
	unsigned int i;
	fstring sid_string;

	if (!cache->tdb)
		goto do_query;

	centry = wcache_fetch(cache, domain, "UG/%s", sid_to_string(sid_string, user_sid));
	
	/* If we have an access denied cache entry and a cached info3 in the
           samlogon cache then do a query.  This will force the rpc back end
           to return the info3 data. */

	if (NT_STATUS_V(domain->last_status) == NT_STATUS_V(NT_STATUS_ACCESS_DENIED) &&
	    netsamlogon_cache_have(user_sid)) {
		DEBUG(10, ("query_user: cached access denied and have cached info3\n"));
		domain->last_status = NT_STATUS_OK;
		centry_free(centry);
		goto do_query;
	}
	
	if (!centry)
		goto do_query;

	*num_groups = centry_uint32(centry);
	
	if (*num_groups == 0)
		goto do_cached;

	(*user_gids) = talloc(mem_ctx, sizeof(**user_gids) * (*num_groups));
	if (! (*user_gids))
		smb_panic("lookup_usergroups out of memory");
	for (i=0; i<(*num_groups); i++) {
		(*user_gids)[i] = centry_sid(centry, mem_ctx);
	}

do_cached:	
	status = centry->status;

	DEBUG(10,("lookup_usergroups: [Cached] - cached info for domain %s status %s\n",
		domain->name, get_friendly_nt_error_msg(status) ));

	centry_free(centry);
	return status;

do_query:
	(*num_groups) = 0;
	(*user_gids) = NULL;

	/* Return status value returned by seq number check */

	if (!NT_STATUS_IS_OK(domain->last_status))
		return domain->last_status;

	DEBUG(10,("lookup_usergroups: [Cached] - doing backend query for info for domain %s\n",
		domain->name ));

	status = domain->backend->lookup_usergroups(domain, mem_ctx, user_sid, num_groups, user_gids);

	/* and save it */
	refresh_sequence_number(domain, False);
	centry = centry_start(domain, status);
	if (!centry)
		goto skip_save;
	centry_put_uint32(centry, *num_groups);
	for (i=0; i<(*num_groups); i++) {
		centry_put_sid(centry, (*user_gids)[i]);
	}	
	centry_end(centry, "UG/%s", sid_to_string(sid_string, user_sid));
	centry_free(centry);

skip_save:
	return status;
}
Beispiel #18
0
Csock* CWebSock::GetSockObj(const CString& sHost, unsigned short uPort) {
	// All listening is done by CListener, thus CWebSock should never have
	// to listen, but since GetSockObj() is pure virtual...
	DEBUG("CWebSock::GetSockObj() called - this should never happen!");
	return NULL;
}
static struct cache_entry *wcache_fetch(struct winbind_cache *cache, 
					struct winbindd_domain *domain,
					const char *format, ...)
{
	va_list ap;
	char *kstr;
	TDB_DATA data;
	struct cache_entry *centry;
	TDB_DATA key;

	refresh_sequence_number(domain, False);

	va_start(ap, format);
	smb_xvasprintf(&kstr, format, ap);
	va_end(ap);
	
	key.dptr = kstr;
	key.dsize = strlen(kstr);
	data = tdb_fetch(wcache->tdb, key);
	if (!data.dptr) {
		/* a cache miss */
		free(kstr);
		return NULL;
	}

	centry = smb_xmalloc(sizeof(*centry));
	centry->data = (unsigned char *)data.dptr;
	centry->len = data.dsize;
	centry->ofs = 0;

	if (centry->len < 8) {
		/* huh? corrupt cache? */
		DEBUG(10,("wcache_fetch: Corrupt cache for key %s domain %s (len < 8) ?\n",
			kstr, domain->name ));
		centry_free(centry);
		free(kstr);
		return NULL;
	}
	
	centry->status = NT_STATUS(centry_uint32(centry));
	centry->sequence_number = centry_uint32(centry);

	if (centry_expired(domain, kstr, centry)) {
		extern BOOL opt_dual_daemon;

		DEBUG(10,("wcache_fetch: entry %s expired for domain %s\n",
			 kstr, domain->name ));

		if (opt_dual_daemon) {
			extern BOOL background_process;
			background_process = True;
			DEBUG(10,("wcache_fetch: background processing expired entry %s for domain %s\n",
				 kstr, domain->name ));
		} else {
			centry_free(centry);
			free(kstr);
			return NULL;
		}
	}

	DEBUG(10,("wcache_fetch: returning entry %s for domain %s\n",
		 kstr, domain->name ));

	free(kstr);
	return centry;
}
Beispiel #20
0
/** Load client entries from Couchbase client documents on startup
 *
 * This function executes the view defined in the module configuration and loops
 * through all returned rows.  The view is called with "stale=false" to ensure the
 * most accurate data available when the view is called.  This will force an index
 * rebuild on this design document in Couchbase.  However, since this function is only
 * run once at sever startup this should not be a concern.
 *
 * @param  inst The module instance.
 * @param  tmpl Default values for new clients.
 * @param  map  The client attribute configuration section.
 * @return      Returns 0 on success, -1 on error.
 */
int mod_load_client_documents(rlm_couchbase_t *inst, CONF_SECTION *tmpl, CONF_SECTION *map)
{
	rlm_couchbase_handle_t *handle = NULL; /* connection pool handle */
	char vpath[256], vid[MAX_KEY_SIZE], vkey[MAX_KEY_SIZE];  /* view path and fields */
	char error[512];                                         /* view error return */
	int idx = 0;                                             /* row array index counter */
	int retval = 0;                                          /* return value */
	lcb_error_t cb_error = LCB_SUCCESS;                      /* couchbase error holder */
	json_object *json, *jval;                                /* json object holders */
	json_object *jrows = NULL;                               /* json object to hold view rows */
	CONF_SECTION *client;                                    /* freeradius config section */
	RADCLIENT *c;                                            /* freeradius client */

	/* get handle */
	handle = fr_connection_get(inst->pool);

	/* check handle */
	if (!handle) return -1;

	/* set couchbase instance */
	lcb_t cb_inst = handle->handle;

	/* set cookie */
	cookie_t *cookie = handle->cookie;

	/* build view path */
	snprintf(vpath, sizeof(vpath), "%s?stale=false", inst->client_view);

	/* query view for document */
	cb_error = couchbase_query_view(cb_inst, cookie, vpath, NULL);

	/* check error and object */
	if (cb_error != LCB_SUCCESS || cookie->jerr != json_tokener_success || !cookie->jobj) {
		/* log error */
		ERROR("rlm_couchbase: failed to execute view request or parse return");
		/* set return */
		retval = -1;
		/* return */
		goto free_and_return;
	}

	/* debugging */
	DEBUG3("rlm_couchbase: cookie->jobj == %s", json_object_to_json_string(cookie->jobj));

	/* check for error in json object */
	if (json_object_object_get_ex(cookie->jobj, "error", &json)) {
		/* build initial error buffer */
		strlcpy(error, json_object_get_string(json), sizeof(error));
		/* get error reason */
		if (json_object_object_get_ex(cookie->jobj, "reason", &json)) {
			/* append divider */
			strlcat(error, " - ", sizeof(error));
			/* append reason */
			strlcat(error, json_object_get_string(json), sizeof(error));
		}
		/* log error */
		ERROR("rlm_couchbase: view request failed with error: %s", error);
		/* set return */
		retval = -1;
		/* return */
		goto free_and_return;
	}

	/* check for document id in return */
	if (!json_object_object_get_ex(cookie->jobj, "rows", &json)) {
		/* log error */
		ERROR("rlm_couchbase: failed to fetch rows from view payload");
		/* set return */
		retval = -1;
		/* return */
		goto free_and_return;
	}

	/* get and hold rows */
	jrows = json_object_get(json);

	/* free cookie object */
	if (cookie->jobj) {
		json_object_put(cookie->jobj);
		cookie->jobj = NULL;
	}

	/* debugging */
	DEBUG3("rlm_couchbase: jrows == %s", json_object_to_json_string(jrows));

	/* check for valid row value */
	if (!json_object_is_type(jrows, json_type_array) || json_object_array_length(jrows) < 1) {
		/* log error */
		ERROR("rlm_couchbase: no valid rows returned from view: %s", vpath);
		/* set return */
		retval = -1;
		/* return */
		goto free_and_return;
	}

	/* loop across all row elements */
	for (idx = 0; idx < json_object_array_length(jrows); idx++) {
		/* fetch current index */
		json = json_object_array_get_idx(jrows, idx);

		/* get view id */
		if (json_object_object_get_ex(json, "id", &jval)) {
			/* clear view id */
			memset(vid, 0, sizeof(vid));
			/* copy and check length */
			if (strlcpy(vid, json_object_get_string(jval), sizeof(vid)) >= sizeof(vid)) {
				ERROR("rlm_couchbase: id from row longer than MAX_KEY_SIZE (%d)",
				      MAX_KEY_SIZE);
				continue;
			}
		} else {
			WARN("rlm_couchbase: failed to fetch id from row - skipping");
			continue;
		}

		/* get view key */
		if (json_object_object_get_ex(json, "key", &jval)) {
			/* clear view key */
			memset(vkey, 0, sizeof(vkey));
			/* copy and check length */
			if (strlcpy(vkey, json_object_get_string(jval), sizeof(vkey)) >= sizeof(vkey)) {
				ERROR("rlm_couchbase: key from row longer than MAX_KEY_SIZE (%d)",
				      MAX_KEY_SIZE);
				continue;
			}
		} else {
			WARN("rlm_couchbase: failed to fetch key from row - skipping");
			continue;
		}

		/* fetch document */
		cb_error = couchbase_get_key(cb_inst, cookie, vid);

		/* check error and object */
		if (cb_error != LCB_SUCCESS || cookie->jerr != json_tokener_success || !cookie->jobj) {
			/* log error */
			ERROR("rlm_couchbase: failed to execute get request or parse return");
			/* set return */
			retval = -1;
			/* return */
			goto free_and_return;
		}

		/* debugging */
		DEBUG3("rlm_couchbase: cookie->jobj == %s", json_object_to_json_string(cookie->jobj));

		/* allocate conf section */
		client = tmpl ? cf_section_dup(NULL, tmpl, "client", vkey, true) :
				cf_section_alloc(NULL, "client", vkey);

		if (client_map_section(client, map, _get_client_value, cookie->jobj) < 0) {
			/* free config setion */
			talloc_free(client);
			/* set return */
			retval = -1;
			/* return */
			goto free_and_return;
		}

		/*
		 * @todo These should be parented from something.
		 */
		c = client_afrom_cs(NULL, client, false, false);
		if (!c) {
			ERROR("rlm_couchbase: failed to allocate client");
			/* free config setion */
			talloc_free(client);
			/* set return */
			retval = -1;
			/* return */
			goto free_and_return;
		}

		/*
		 * Client parents the CONF_SECTION which defined it.
		 */
		talloc_steal(c, client);

		/* attempt to add client */
		if (!client_add(NULL, c)) {
			ERROR("rlm_couchbase: failed to add client '%s' from '%s', possible duplicate?", vkey, vid);
			/* free client */
			client_free(c);
			/* set return */
			retval = -1;
			/* return */
			goto free_and_return;
		}

		/* debugging */
		DEBUG("rlm_couchbase: client '%s' added", c->longname);

		/* free json object */
		if (cookie->jobj) {
			json_object_put(cookie->jobj);
			cookie->jobj = NULL;
		}
	}

	free_and_return:

	/* free rows */
	if (jrows) {
		json_object_put(jrows);
	}

	/* free json object */
	if (cookie->jobj) {
		json_object_put(cookie->jobj);
		cookie->jobj = NULL;
	}

	/* release handle */
	if (handle) {
		fr_connection_release(inst->pool, handle);
	}

	/* return */
	return retval;
}
/* list all domain groups */
static NTSTATUS enum_dom_groups(struct winbindd_domain *domain,
				TALLOC_CTX *mem_ctx,
				uint32 *num_entries, 
				struct acct_info **info)
{
	struct winbind_cache *cache = get_cache(domain);
	struct cache_entry *centry = NULL;
	NTSTATUS status;
	unsigned int i;

	if (!cache->tdb)
		goto do_query;

	centry = wcache_fetch(cache, domain, "GL/%s/domain", domain->name);
	if (!centry)
		goto do_query;

	*num_entries = centry_uint32(centry);
	
	if (*num_entries == 0)
		goto do_cached;

	(*info) = talloc(mem_ctx, sizeof(**info) * (*num_entries));
	if (! (*info))
		smb_panic("enum_dom_groups out of memory");
	for (i=0; i<(*num_entries); i++) {
		fstrcpy((*info)[i].acct_name, centry_string(centry, mem_ctx));
		fstrcpy((*info)[i].acct_desc, centry_string(centry, mem_ctx));
		(*info)[i].rid = centry_uint32(centry);
	}

do_cached:	
	status = centry->status;

	DEBUG(10,("enum_dom_groups: [Cached] - cached list for domain %s status %s\n",
		domain->name, get_friendly_nt_error_msg(status) ));

	centry_free(centry);
	return status;

do_query:
	*num_entries = 0;
	*info = NULL;

	/* Return status value returned by seq number check */

	if (!NT_STATUS_IS_OK(domain->last_status))
		return domain->last_status;

	DEBUG(10,("enum_dom_groups: [Cached] - doing backend query for list for domain %s\n",
		domain->name ));

	status = domain->backend->enum_dom_groups(domain, mem_ctx, num_entries, info);

	/* and save it */
	refresh_sequence_number(domain, False);
	centry = centry_start(domain, status);
	if (!centry)
		goto skip_save;
	centry_put_uint32(centry, *num_entries);
	for (i=0; i<(*num_entries); i++) {
		centry_put_string(centry, (*info)[i].acct_name);
		centry_put_string(centry, (*info)[i].acct_desc);
		centry_put_uint32(centry, (*info)[i].rid);
	}	
	centry_end(centry, "GL/%s/domain", domain->name);
	centry_free(centry);

skip_save:
	return status;
}
Beispiel #22
0
void PseuGUI::Shutdown(void)
{
     DEBUG(logdebug("PseuGUI::Shutdown()"));
    _mustdie = true;
}
/* convert a single name to a sid in a domain */
static NTSTATUS name_to_sid(struct winbindd_domain *domain,
			    TALLOC_CTX *mem_ctx,
			    const char *name,
			    DOM_SID *sid,
			    enum SID_NAME_USE *type)
{
	struct winbind_cache *cache = get_cache(domain);
	struct cache_entry *centry = NULL;
	NTSTATUS status;
	fstring uname;
	DOM_SID *sid2;

	if (!cache->tdb)
		goto do_query;

	fstrcpy(uname, name);
	strupper_m(uname);
	centry = wcache_fetch(cache, domain, "NS/%s/%s", domain->name, uname);
	if (!centry)
		goto do_query;
	*type = (enum SID_NAME_USE)centry_uint32(centry);
	sid2 = centry_sid(centry, mem_ctx);
	if (!sid2) {
		ZERO_STRUCTP(sid);
	} else {
		sid_copy(sid, sid2);
	}

	status = centry->status;

	DEBUG(10,("name_to_sid: [Cached] - cached name for domain %s status %s\n",
		domain->name, get_friendly_nt_error_msg(status) ));

	centry_free(centry);
	return status;

do_query:
	ZERO_STRUCTP(sid);

	/* If the seq number check indicated that there is a problem
	 * with this DC, then return that status... except for
	 * access_denied.  This is special because the dc may be in
	 * "restrict anonymous = 1" mode, in which case it will deny
	 * most unauthenticated operations, but *will* allow the LSA
	 * name-to-sid that we try as a fallback. */

	if (!(NT_STATUS_IS_OK(domain->last_status)
	      || NT_STATUS_EQUAL(domain->last_status, NT_STATUS_ACCESS_DENIED)))
		return domain->last_status;

	DEBUG(10,("name_to_sid: [Cached] - doing backend query for name for domain %s\n",
		domain->name ));

	status = domain->backend->name_to_sid(domain, mem_ctx, name, sid, type);

	/* and save it */
	wcache_save_name_to_sid(domain, status, name, sid, *type);

	/* We can't save the sid to name mapping as we don't know the
	   correct case of the name without looking it up */

	return status;
}
Beispiel #24
0
void PseuGUI::Run(void)
{
    if(!_initialized)
        this->_Init();
    if(!_initialized) // recheck
    {
        logerror("PseuGUI: not initialized, using non-GUI mode");
        Cancel();
        return;
    }

    DEBUG(logdebug("PseuGUI::Run() _device=%X",_device));

    int lastFPS = -1, fps = -1;

    while(_device && _device->run() && !_mustdie)
    {
        _lastpasstime = _passtime;
        _passtime = _timer->getTime();
        _passtimediff = _passtime - _lastpasstime;

        if (!_device->isWindowActive())
        {
            _device->sleep(10); // save cpu & gpu power if not focused
        }

        _UpdateSceneState();

        if(!_scene)
        {
            _device->sleep(10);
            continue;
        }

        if(_screendimension != _driver->getScreenSize())
        {
            _scene->OnResize();
            _screendimension = _driver->getScreenSize();
        }

        if(_updateScene)
        {
            _updateScene = false;
            _scene->OnManualUpdate();
        }

        _scene->OnUpdate(_passtimediff); // custom: process input, set camera, etc
        _driver->beginScene(true, true, _scene->GetBackgroundColor()); // irr: call driver to start drawing
        _scene->OnDrawBegin(); // custom: draw everything before irrlicht draws everything by itself
        _smgr->drawAll(); // irr: draw all scene nodes
        _guienv->drawAll(); // irr: draw gui elements
        _scene->OnDraw(); // custom: draw everything that has to be draw late (post-processing also belongs here)
        _driver->endScene(); // irr: drawing done
        if(_driver->getFPS()>100 && _throttle < 10)//Primitive FPS-Limiter - upper cap hardcoded 100 FPS.
            _throttle++;                           //lowercap 60 (if it drops below, limiting is eased).
        if(_driver->getFPS()<60 && _throttle>0)    //but honestly, a 10 msec delay is not worth this amount of code.
            _throttle--;                           //If the FPS is down, it will never be because of this
        if(_throttle>0)                            //Thus i opt for dropping the charade and using a fixed conf value of max 10.
            _device->sleep(_throttle);             //sleeps max 10 msec (=_throttle) here.


        fps = _driver->getFPS();

        if (lastFPS != fps)
        {
            core::stringw str = L"PseuWoW [";
            str += _driver->getName();
            str += "] FPS:";
            str += fps;

            _device->setWindowCaption(str.c_str());

            lastFPS = fps;
        }

    }
    domgr.UnlinkAll(); // At this point the irr::device is probably closed and deleted already, which means it deleted
                       // all SceneNodes and everything. the ptrs are still stored in the DrawObjects, means they need to be unlinked now not to cause a crash.
    DEBUG(logdebug("PseuGUI::Run() finished"));
    Cancel(); // already got shut down somehow, we can now safely cancel and drop the device
}
Beispiel #25
0
static bool posix_lock_in_range(SMB_OFF_T *offset_out, SMB_OFF_T *count_out,
				uint64_t u_offset, uint64_t u_count)
{
	SMB_OFF_T offset = (SMB_OFF_T)u_offset;
	SMB_OFF_T count = (SMB_OFF_T)u_count;

	/*
	 * For the type of system we are, attempt to
	 * find the maximum positive lock offset as an SMB_OFF_T.
	 */

#if defined(MAX_POSITIVE_LOCK_OFFSET) /* Some systems have arbitrary limits. */

	SMB_OFF_T max_positive_lock_offset = (MAX_POSITIVE_LOCK_OFFSET);

#elif defined(LARGE_SMB_OFF_T) && !defined(HAVE_BROKEN_FCNTL64_LOCKS)

	/*
	 * In this case SMB_OFF_T is 64 bits,
	 * and the underlying system can handle 64 bit signed locks.
	 */

	SMB_OFF_T mask2 = ((SMB_OFF_T)0x4) << (SMB_OFF_T_BITS-4);
	SMB_OFF_T mask = (mask2<<1);
	SMB_OFF_T max_positive_lock_offset = ~mask;

#else /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */

	/*
	 * In this case either SMB_OFF_T is 32 bits,
	 * or the underlying system cannot handle 64 bit signed locks.
	 * All offsets & counts must be 2^31 or less.
	 */

	SMB_OFF_T max_positive_lock_offset = 0x7FFFFFFF;

#endif /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */

	/*
	 * POSIX locks of length zero mean lock to end-of-file.
	 * Win32 locks of length zero are point probes. Ignore
	 * any Win32 locks of length zero. JRA.
	 */

	if (count == (SMB_OFF_T)0) {
		DEBUG(10,("posix_lock_in_range: count = 0, ignoring.\n"));
		return False;
	}

	/*
	 * If the given offset was > max_positive_lock_offset then we cannot map this at all
	 * ignore this lock.
	 */

	if (u_offset & ~((uint64_t)max_positive_lock_offset)) {
		DEBUG(10,("posix_lock_in_range: (offset = %.0f) offset > %.0f and we cannot handle this. Ignoring lock.\n",
				(double)u_offset, (double)((uint64_t)max_positive_lock_offset) ));
		return False;
	}

	/*
	 * We must truncate the count to less than max_positive_lock_offset.
	 */

	if (u_count & ~((uint64_t)max_positive_lock_offset)) {
		count = max_positive_lock_offset;
	}

	/*
	 * Truncate count to end at max lock offset.
	 */

	if (offset + count < 0 || offset + count > max_positive_lock_offset) {
		count = max_positive_lock_offset - offset;
	}

	/*
	 * If we ate all the count, ignore this lock.
	 */

	if (count == 0) {
		DEBUG(10,("posix_lock_in_range: Count = 0. Ignoring lock u_offset = %.0f, u_count = %.0f\n",
				(double)u_offset, (double)u_count ));
		return False;
	}

	/*
	 * The mapping was successful.
	 */

	DEBUG(10,("posix_lock_in_range: offset_out = %.0f, count_out = %.0f\n",
			(double)offset, (double)count ));

	*offset_out = offset;
	*count_out = count;
	
	return True;
}
Beispiel #26
0
/*
  handle recv events on a nbt dgram socket
*/
static void dgm_socket_recv(struct nbt_dgram_socket *dgmsock)
{
	TALLOC_CTX *tmp_ctx = talloc_new(dgmsock);
	NTSTATUS status;
	struct socket_address *src;
	DATA_BLOB blob;
	size_t nread, dsize;
	struct nbt_dgram_packet *packet;
	const char *mailslot_name;
	enum ndr_err_code ndr_err;

	status = socket_pending(dgmsock->sock, &dsize);
	if (!NT_STATUS_IS_OK(status)) {
		talloc_free(tmp_ctx);
		return;
	}

	blob = data_blob_talloc(tmp_ctx, NULL, dsize);
	if (blob.data == NULL) {
		talloc_free(tmp_ctx);
		return;
	}

	status = socket_recvfrom(dgmsock->sock, blob.data, blob.length, &nread,
				 tmp_ctx, &src);
	if (!NT_STATUS_IS_OK(status)) {
		talloc_free(tmp_ctx);
		return;
	}
	blob.length = nread;

	DEBUG(5,("Received dgram packet of length %d from %s:%d\n", 
		 (int)blob.length, src->addr, src->port));

	packet = talloc(tmp_ctx, struct nbt_dgram_packet);
	if (packet == NULL) {
		talloc_free(tmp_ctx);
		return;
	}

	/* parse the request */
	ndr_err = ndr_pull_struct_blob(&blob, packet, packet,
				      (ndr_pull_flags_fn_t)ndr_pull_nbt_dgram_packet);
	if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
		status = ndr_map_error2ntstatus(ndr_err);
		DEBUG(2,("Failed to parse incoming NBT DGRAM packet - %s\n",
			 nt_errstr(status)));
		talloc_free(tmp_ctx);
		return;
	}

	/* if this is a mailslot message, then see if we can dispatch it to a handler */
	mailslot_name = dgram_mailslot_name(packet);
	if (mailslot_name) {
		struct dgram_mailslot_handler *dgmslot;
		dgmslot = dgram_mailslot_find(dgmsock, mailslot_name);
		if (dgmslot) {
			dgmslot->handler(dgmslot, packet, src);
		} else {
			DEBUG(2,("No mailslot handler for '%s'\n", mailslot_name));
		}
	} else {
		/* dispatch if there is a general handler */
		if (dgmsock->incoming.handler) {
			dgmsock->incoming.handler(dgmsock, packet, src);
		}
	}

	talloc_free(tmp_ctx);
}
Beispiel #27
0
static void nl_callback(int sock)
{
	int len, attrlen;
	socklen_t addrlen;
	struct nlmsghdr *nlm;
	struct nlmsgerr *nlmerr;
	char buf[BUFLEN];
	struct in_addr dest_addr, src_addr;	
	struct ifaddrmsg *ifm;
	struct rtattr *rta;
	kaodv_rt_msg_t *m;
	rt_table_t *rt, *fwd_rt, *rev_rt = NULL;
		
	addrlen = sizeof(peer);


	len = recvfrom(sock, buf, BUFLEN, 0, (struct sockaddr *)&peer, &addrlen);

	if (len <= 0)
		return;
	
	nlm = (struct nlmsghdr *)buf;

	switch (nlm->nlmsg_type) {
	case NLMSG_ERROR:
		nlmerr = NLMSG_DATA(nlm);
		if (nlmerr->error == 0) {
			DEBUG(LOG_DEBUG, 0, "NLMSG_ACK");
		} else
			DEBUG(LOG_DEBUG, 0, "NLMSG_ERROR, error=%d", 
			      nlmerr->error);
		break;
	case RTM_NEWLINK:
		DEBUG(LOG_DEBUG, 0, "RTM_NEWADDR");
		break;
	case RTM_NEWADDR:
		DEBUG(LOG_DEBUG, 0, "RTM_NEWADDR");

		ifm = NLMSG_DATA(nlm);

		rta = (struct rtattr *)((char *)ifm + sizeof(ifm));
		
		attrlen = nlm->nlmsg_len - 
			sizeof(struct nlmsghdr) - 
			sizeof(struct ifaddrmsg);
		
		for (; RTA_OK(rta, attrlen); rta = RTA_NEXT(rta, attrlen)) {

			if (rta->rta_type == IFA_ADDRESS) {
				struct in_addr ifaddr;
				
				memcpy(&ifaddr, RTA_DATA(rta), RTA_PAYLOAD(rta));
				
				DEBUG(LOG_DEBUG, 0, 
				      "Interface index %d changed address to %s", 
				      ifm->ifa_index, ip_to_str(ifaddr));
				
			}
		}
		break;
	case KAODVM_TIMEOUT:
		m = NLMSG_DATA(nlm);
		dest_addr.s_addr = m->dst;

		DEBUG(LOG_DEBUG, 0,
		      "Got TIMEOUT msg from kernel for %s",
		      ip_to_str(dest_addr));

		rt = rt_table_find(dest_addr);

		if (rt && rt->state == VALID)
			route_expire_timeout(rt);
		else
			DEBUG(LOG_DEBUG, 0,
			      "Got rt timeoute event but there is no route");
		break;
	case KAODVM_NOROUTE:
		m = NLMSG_DATA(nlm);
		dest_addr.s_addr = m->dst;

		DEBUG(LOG_DEBUG, 0,
		      "Got NOROUTE msg from kernel for %s",
		      ip_to_str(dest_addr));

		rreq_route_discovery(dest_addr, 0, NULL);
		break;
	case KAODVM_REPAIR:
		m = NLMSG_DATA(nlm);
		dest_addr.s_addr = m->dst;
		src_addr.s_addr = m->src;

		DEBUG(LOG_DEBUG, 0, "Got REPAIR msg from kernel for %s",
		      ip_to_str(dest_addr));

		fwd_rt = rt_table_find(dest_addr);

		if (fwd_rt)
			rreq_local_repair(fwd_rt, src_addr, NULL);

		break;
	case KAODVM_ROUTE_UPDATE:
		m = NLMSG_DATA(nlm);
		dest_addr.s_addr = m->dst;
		src_addr.s_addr = m->src;
	
		if (dest_addr.s_addr == AODV_BROADCAST ||
		    dest_addr.s_addr ==
		    DEV_IFINDEX(m->ifindex).broadcast.s_addr)
			return;

		fwd_rt = rt_table_find(dest_addr);
		rev_rt = rt_table_find(src_addr);

		rt_table_update_route_timeouts(fwd_rt, rev_rt);

		break;
	case KAODVM_SEND_RERR:
		m = NLMSG_DATA(nlm);
		dest_addr.s_addr = m->dst;
		src_addr.s_addr = m->src;

		if (dest_addr.s_addr == AODV_BROADCAST ||
		    dest_addr.s_addr ==
		    DEV_IFINDEX(m->ifindex).broadcast.s_addr)
			return;

		fwd_rt = rt_table_find(dest_addr);
		rev_rt = rt_table_find(src_addr);

		do {
			struct in_addr rerr_dest;
			RERR *rerr;

			DEBUG(LOG_DEBUG, 0,
			      "Sending RERR for unsolicited message from %s to dest %s",
			      ip_to_str(src_addr),
			      ip_to_str(dest_addr));

			if (fwd_rt) {
				rerr = rerr_create(0, fwd_rt->dest_addr,
						   fwd_rt->dest_seqno);

				rt_table_update_timeout(fwd_rt,
							DELETE_PERIOD);
			} else
				rerr = rerr_create(0, dest_addr, 0);

			/* Unicast the RERR to the source of the data transmission
			 * if possible, otherwise we broadcast it. */

			if (rev_rt && rev_rt->state == VALID)
				rerr_dest = rev_rt->next_hop;
			else
				rerr_dest.s_addr = AODV_BROADCAST;

			aodv_socket_send((AODV_msg *) rerr, rerr_dest,
					 RERR_CALC_SIZE(rerr), 1,
					 &DEV_IFINDEX(m->ifindex));

			if (wait_on_reboot) {
				DEBUG(LOG_DEBUG, 0,
				      "Wait on reboot timer reset.");
				timer_set_timeout(&worb_timer,
						  DELETE_PERIOD);
			}
		} while (0);
		break;
	default:
		DEBUG(LOG_DEBUG, 0, "Got mesg type=%d\n", nlm->nlmsg_type);
	}
} 
Beispiel #28
0
static void rs_packet_process(uint64_t count, rs_event_t *event, struct pcap_pkthdr const *header, uint8_t const *data)
{
	rs_stats_t		*stats = event->stats;
	struct timeval		elapsed;
	struct timeval		latency;

	/*
	 *	Pointers into the packet data we just received
	 */
	size_t len;
	uint8_t const		*p = data;
	struct ip_header const	*ip = NULL;	/* The IP header */
	struct ip_header6 const	*ip6 = NULL;	/* The IPv6 header */
	struct udp_header const	*udp;		/* The UDP header */
	uint8_t			version;	/* IP header version */
	bool			response;	/* Was it a response code */

	decode_fail_t		reason;		/* Why we failed decoding the packet */
	static uint64_t		captured = 0;

	RADIUS_PACKET *current;			/* Current packet were processing */
	rs_request_t *original;

	if (!start_pcap.tv_sec) {
		start_pcap = header->ts;
	}

	if (header->caplen <= 5) {
		INFO("Packet too small, captured %i bytes", header->caplen);
		return;
	}

	/*
	 *	Loopback header
	 */
	if ((p[0] == 2) && (p[1] == 0) && (p[2] == 0) && (p[3] == 0)) {
		p += 4;
	/*
	 *	Ethernet header
	 */
	} else {
		p += sizeof(struct ethernet_header);
	}

	version = (p[0] & 0xf0) >> 4;
	switch (version) {
	case 4:
		ip = (struct ip_header const *)p;
		len = (0x0f & ip->ip_vhl) * 4;	/* ip_hl specifies length in 32bit words */
		p += len;
		break;

	case 6:
		ip6 = (struct ip_header6 const *)p;
		p += sizeof(struct ip_header6);

		break;

	default:
		DEBUG("IP version invalid %i", version);
		return;
	}

	/*
	 *	End of variable length bits, do basic check now to see if packet looks long enough
	 */
	len = (p - data) + sizeof(struct udp_header) + (sizeof(radius_packet_t) - 1);	/* length value */
	if (len > header->caplen) {
		DEBUG("Packet too small, we require at least %zu bytes, captured %i bytes",
		      (size_t) len, header->caplen);
		return;
	}

	udp = (struct udp_header const *)p;
	p += sizeof(struct udp_header);

	/*
	 *	With artificial talloc memory limits there's a good chance we can
	 *	recover once some requests timeout, so make an effort to deal
	 *	with allocation failures gracefully.
	 */
	current = rad_alloc(conf, 0);
	if (!current) {
		ERROR("Failed allocating memory to hold decoded packet");
		rs_tv_add_ms(&header->ts, conf->stats.timeout, &stats->quiet);
		return;
	}
	current->timestamp = header->ts;
	current->data_len = header->caplen - (p - data);
	memcpy(&current->data, &p, sizeof(current->data));

	/*
	 *	Populate IP/UDP fields from PCAP data
	 */
	if (ip) {
		current->src_ipaddr.af = AF_INET;
		current->src_ipaddr.ipaddr.ip4addr.s_addr = ip->ip_src.s_addr;

		current->dst_ipaddr.af = AF_INET;
		current->dst_ipaddr.ipaddr.ip4addr.s_addr = ip->ip_dst.s_addr;
	} else {
		current->src_ipaddr.af = AF_INET6;
		memcpy(&current->src_ipaddr.ipaddr.ip6addr.s6_addr, &ip6->ip_src.s6_addr,
		       sizeof(current->src_ipaddr.ipaddr.ip6addr.s6_addr));

		current->dst_ipaddr.af = AF_INET6;
		memcpy(&current->dst_ipaddr.ipaddr.ip6addr.s6_addr, &ip6->ip_dst.s6_addr,
		       sizeof(current->dst_ipaddr.ipaddr.ip6addr.s6_addr));
	}

	current->src_port = ntohs(udp->udp_sport);
	current->dst_port = ntohs(udp->udp_dport);

	if (!rad_packet_ok(current, 0, &reason)) {
		RIDEBUG("(%" PRIu64 ") ** %s **", count, fr_strerror());
		RIDEBUG("(%" PRIu64 ") %s Id %i %s:%s:%d -> %s:%d\t+%u.%03u", count,
			fr_packet_codes[current->code], current->id,
			event->in->name,
			fr_inet_ntop(current->src_ipaddr.af, &current->src_ipaddr.ipaddr), current->src_port,
			fr_inet_ntop(current->dst_ipaddr.af, &current->dst_ipaddr.ipaddr), current->dst_port,
			(unsigned int) elapsed.tv_sec, ((unsigned int) elapsed.tv_usec / 1000));

		rad_free(&current);
		return;
	}

	switch (current->code) {
	case PW_CODE_ACCOUNTING_RESPONSE:
	case PW_CODE_AUTHENTICATION_REJECT:
	case PW_CODE_AUTHENTICATION_ACK:
	case PW_CODE_COA_NAK:
	case PW_CODE_COA_ACK:
	case PW_CODE_DISCONNECT_NAK:
	case PW_CODE_DISCONNECT_ACK:
	case PW_CODE_STATUS_CLIENT:
		{
			rs_request_t search;
			struct timeval when;

			rs_tv_add_ms(&header->ts, conf->stats.timeout, &when);

			/* look for a matching request and use it for decoding */
			search.packet = current;
			original = rbtree_finddata(request_tree, &search);

			/*
			 *	Only decode attributes if we want to print them or filter on them
			 *	rad_packet_ok does checks to verify the packet is actually valid.
			 */
			if (filter_vps || conf->print_packet) {
				if (rad_decode(current, original ? original->packet : NULL,
					       conf->radius_secret) != 0) {
					rad_free(&current);
					fr_perror("decode");
					return;
				}
			}

			/*
			 *	Check if we've managed to link it to a request
			 */
			if (original) {
				/*
				 *	Is this a retransmit?
				 */
				if (!original->linked) {
					original->stats_rsp = &stats->exchange[current->code];
				} else {
					RDEBUG("(%" PRIu64 ") ** RETRANSMISSION **", count);
					original->rt_rsp++;

					rad_free(&original->linked);
					fr_event_delete(event->list, &original->event);
				}

				original->linked = talloc_steal(original, current);

				/*
				 *	Some RADIUS servers and proxy servers may not cache
				 *	Accounting-Responses (and possibly other code),
				 *	and may immediately re-use a RADIUS packet src
				 *	port/id combination on receipt of a response.
				 */
				if (conf->dequeue[current->code]) {
					fr_event_delete(event->list, &original->event);
					rbtree_deletebydata(request_tree, original);
				} else {
					if (!fr_event_insert(event->list, rs_packet_cleanup, original, &when,
						    	     &original->event)) {
						ERROR("Failed inserting new event");
						/*
						 *	Delete the original request/event, it's no longer valid
						 *	for statistics.
						 */
						original->forced_cleanup = true;
						fr_event_delete(event->list, &original->event);
						rbtree_deletebydata(request_tree, original);

						return;
					}
				}
			/*
			 *	No request seen, or request was dropped by attribute filter
			 */
			} else {
				/*
				 *	If filter_vps are set assume the original request was dropped,
				 *	the alternative is maintaining another 'filter', but that adds
				 *	complexity, reduces max capture rate, and is generally a PITA.
				 */
				if (filter_vps) {
					rad_free(&current);
					RDEBUG2("(%" PRIu64 ") Dropped by attribute filter", count);
					return;
				}

				RDEBUG("(%" PRIu64 ") ** UNLINKED **", count);
				stats->exchange[current->code].interval.unlinked_total++;
			}

			response = true;
		}
			break;
	case PW_CODE_ACCOUNTING_REQUEST:
	case PW_CODE_AUTHENTICATION_REQUEST:
	case PW_CODE_COA_REQUEST:
	case PW_CODE_DISCONNECT_REQUEST:
	case PW_CODE_STATUS_SERVER:
		{
			rs_request_t search;
			struct timeval when;

			/*
			 *	Only decode attributes if we want to print them or filter on them
			 *	rad_packet_ok does checks to verify the packet is actually valid.
			 */
			if (filter_vps || conf->print_packet) {
				if (rad_decode(current, NULL, conf->radius_secret) != 0) {
					rad_free(&current);
					fr_perror("decode");
					return;
				}
			}

			/*
			 *	Now verify the packet passes the attribute filter
			 */
			if (filter_vps && !pairvalidate_relaxed(filter_vps, current->vps)) {
				rad_free(&current);
				RDEBUG2("(%" PRIu64 ") Dropped by attribute filter", count);
				return;
			}

			/*
			 *	save the request for later matching
			 */
			search.packet = rad_alloc_reply(conf, current);
			if (!search.packet) {
				ERROR("Failed allocating memory to hold expected reply");
				rs_tv_add_ms(&header->ts, conf->stats.timeout, &stats->quiet);
				rad_free(&current);
				return;
			}
			search.packet->code = current->code;

			rs_tv_add_ms(&header->ts, conf->stats.timeout, &when);

			original = rbtree_finddata(request_tree, &search);

			/*
			 *	Upstream device re-used src/dst ip/port id without waiting
			 *	for the timeout period to expire, or a response.
			 */
			if (original && memcmp(original->packet->vector, current->vector,
					       sizeof(original->packet->vector) != 0)) {
				RDEBUG2("(%" PRIu64 ") ** PREMATURE ID RE-USE **", count);
				stats->exchange[current->code].interval.reused_total++;
				original->forced_cleanup = true;

				fr_event_delete(event->list, &original->event);
				rbtree_deletebydata(request_tree, original);
				original = NULL;
			}

			if (original) {
				RDEBUG("(%" PRIu64 ") ** RETRANSMISSION **", count);
				original->rt_req++;

				rad_free(&original->packet);
				original->packet = talloc_steal(original, search.packet);

				/* We may of seen the response, but it may of been lost upstream */
				rad_free(&original->linked);
				fr_event_delete(event->list, &original->event);
			} else {
				original = talloc_zero(conf, rs_request_t);
				talloc_set_destructor(original, _request_free);

				original->id = count;
				original->in = event->in;
				original->stats_req = &stats->exchange[current->code];
				original->packet = talloc_steal(original, search.packet);

				rbtree_insert(request_tree, original);
			}

			/* update the timestamp in either case */
			original->packet->timestamp = header->ts;

			if (!fr_event_insert(event->list, rs_packet_cleanup, original, &when, &original->event)) {
				ERROR("Failed inserting new event");
				rbtree_deletebydata(request_tree, original);

				return;
			}
			response = false;
		}
			break;
		default:
			RDEBUG("** Unsupported code %i **", current->code);
			rad_free(&current);

			return;
	}

	if (event->out) {
		pcap_dump((void *) (event->out->dumper), header, data);
	}

	rs_tv_sub(&header->ts, &start_pcap, &elapsed);

	/*
	 *	Increase received count
	 */
	stats->exchange[current->code].interval.received_total++;

	/*
	 *	It's a linked response
	 */
	if (original && original->linked) {
		rs_tv_sub(&current->timestamp, &original->packet->timestamp, &latency);

		/*
		 *	Update stats for both the request and response types.
		 *
		 *	This isn't useful for things like Access-Requests, but will be useful for
		 *	CoA and Disconnect Messages, as we get the average latency across both
		 *	response types.
		 *
		 *	It also justifies allocating 255 instances rs_latency_t.
		 */
		rs_stats_update_latency(&stats->exchange[current->code], &latency);
		rs_stats_update_latency(&stats->exchange[original->packet->code], &latency);


		/*
		 *	Print info about the request/response.
		 */
		RIDEBUG("(%" PRIu64 ") %s Id %i %s:%s:%d %s %s:%d\t+%u.%03u\t+%u.%03u", count,
			fr_packet_codes[current->code], current->id,
			event->in->name,
			fr_inet_ntop(current->src_ipaddr.af, &current->src_ipaddr.ipaddr), current->src_port,
			response ? "<-" : "->",
			fr_inet_ntop(current->dst_ipaddr.af, &current->dst_ipaddr.ipaddr), current->dst_port,
			(unsigned int) elapsed.tv_sec, ((unsigned int) elapsed.tv_usec / 1000),
			(unsigned int) latency.tv_sec, ((unsigned int) latency.tv_usec / 1000));
	/*
	 *	It's the original request
	 */
	} else {
		/*
		 *	Print info about the request
		 */
		RIDEBUG("(%" PRIu64 ") %s Id %i %s:%s:%d %s %s:%d\t+%u.%03u", count,
			fr_packet_codes[current->code], current->id,
			event->in->name,
			fr_inet_ntop(current->src_ipaddr.af, &current->src_ipaddr.ipaddr), current->src_port,
			response ? "<-" : "->",
			fr_inet_ntop(current->dst_ipaddr.af, &current->dst_ipaddr.ipaddr), current->dst_port,
			(unsigned int) elapsed.tv_sec, ((unsigned int) elapsed.tv_usec / 1000));
	}

	if (conf->print_packet && (fr_debug_flag > 1) && current->vps) {
		pairsort(&current->vps, true);
		vp_printlist(log_dst, current->vps);
		pairfree(&current->vps);
	}

	if (!conf->to_stdout && (fr_debug_flag > 4)) {
		rad_print_hex(current);
	}

	fflush(log_dst);

	/*
	 *	If it's a request, a duplicate of the packet will of already been stored.
	 *	If it's a unlinked response, we need to free it explicitly, as it will
	 *	not be done by the event queue.
	 */
	if (!response || !original) {
		rad_free(&current);
	}

	captured++;
	/*
	 *	We've hit our capture limit, break out of the event loop
	 */
	if ((conf->limit > 0) && (captured >= conf->limit)) {
		INFO("Captured %" PRIu64 " packets, exiting...", captured);
		fr_event_loop_exit(events, 1);
	}
}
int compute_password_element (pwd_session_t *sess, uint16_t grp_num,
			      char const *password, int password_len,
			      char const *id_server, int id_server_len,
			      char const *id_peer, int id_peer_len,
			      uint32_t *token)
{
	BIGNUM *x_candidate = NULL, *rnd = NULL, *cofactor = NULL;
	HMAC_CTX ctx;
	uint8_t pwe_digest[SHA256_DIGEST_LENGTH], *prfbuf = NULL, ctr;
	int nid, is_odd, primebitlen, primebytelen, ret = 0;

	switch (grp_num) { /* from IANA registry for IKE D-H groups */
	case 19:
		nid = NID_X9_62_prime256v1;
		break;

	case 20:
		nid = NID_secp384r1;
		break;

	case 21:
		nid = NID_secp521r1;
		break;

	case 25:
		nid = NID_X9_62_prime192v1;
		break;

	case 26:
		nid = NID_secp224r1;
		break;

	default:
		DEBUG("unknown group %d", grp_num);
		goto fail;
	}

	sess->pwe = NULL;
	sess->order = NULL;
	sess->prime = NULL;

	if ((sess->group = EC_GROUP_new_by_curve_name(nid)) == NULL) {
		DEBUG("unable to create EC_GROUP");
		goto fail;
	}

	if (((rnd = BN_new()) == NULL) ||
	    ((cofactor = BN_new()) == NULL) ||
	    ((sess->pwe = EC_POINT_new(sess->group)) == NULL) ||
	    ((sess->order = BN_new()) == NULL) ||
	    ((sess->prime = BN_new()) == NULL) ||
	    ((x_candidate = BN_new()) == NULL)) {
		DEBUG("unable to create bignums");
		goto fail;
	}

	if (!EC_GROUP_get_curve_GFp(sess->group, sess->prime, NULL, NULL, NULL)) {
		DEBUG("unable to get prime for GFp curve");
		goto fail;
	}

	if (!EC_GROUP_get_order(sess->group, sess->order, NULL)) {
		DEBUG("unable to get order for curve");
		goto fail;
	}

	if (!EC_GROUP_get_cofactor(sess->group, cofactor, NULL)) {
		DEBUG("unable to get cofactor for curve");
		goto fail;
	}

	primebitlen = BN_num_bits(sess->prime);
	primebytelen = BN_num_bytes(sess->prime);
	if ((prfbuf = talloc_zero_array(sess, uint8_t, primebytelen)) == NULL) {
		DEBUG("unable to alloc space for prf buffer");
		goto fail;
	}
	ctr = 0;
	while (1) {
		if (ctr > 10) {
			DEBUG("unable to find random point on curve for group %d, something's fishy", grp_num);
			goto fail;
		}
		ctr++;

		/*
		 * compute counter-mode password value and stretch to prime
		 *    pwd-seed = H(token | peer-id | server-id | password |
		 *		   counter)
		 */
		H_Init(&ctx);
		H_Update(&ctx, (uint8_t *)token, sizeof(*token));
		H_Update(&ctx, (uint8_t const *)id_peer, id_peer_len);
		H_Update(&ctx, (uint8_t const *)id_server, id_server_len);
		H_Update(&ctx, (uint8_t const *)password, password_len);
		H_Update(&ctx, (uint8_t *)&ctr, sizeof(ctr));
		H_Final(&ctx, pwe_digest);

		BN_bin2bn(pwe_digest, SHA256_DIGEST_LENGTH, rnd);
		eap_pwd_kdf(pwe_digest, SHA256_DIGEST_LENGTH, "EAP-pwd Hunting And Pecking",
			    strlen("EAP-pwd Hunting And Pecking"), prfbuf, primebitlen);

		BN_bin2bn(prfbuf, primebytelen, x_candidate);
		/*
		 * eap_pwd_kdf() returns a string of bits 0..primebitlen but
		 * BN_bin2bn will treat that string of bits as a big endian
		 * number. If the primebitlen is not an even multiple of 8
		 * then excessive bits-- those _after_ primebitlen-- so now
		 * we have to shift right the amount we masked off.
		 */
		if (primebitlen % 8) BN_rshift(x_candidate, x_candidate, (8 - (primebitlen % 8)));
		if (BN_ucmp(x_candidate, sess->prime) >= 0) continue;

		/*
		 * need to unambiguously identify the solution, if there is
		 * one...
		 */
		is_odd = BN_is_odd(rnd) ? 1 : 0;

		/*
		 * solve the quadratic equation, if it's not solvable then we
		 * don't have a point
		 */
		if (!EC_POINT_set_compressed_coordinates_GFp(sess->group, sess->pwe, x_candidate, is_odd, NULL)) {
			continue;
		}

		/*
		 * If there's a solution to the equation then the point must be
		 * on the curve so why check again explicitly? OpenSSL code
		 * says this is required by X9.62. We're not X9.62 but it can't
		 * hurt just to be sure.
		 */
		if (!EC_POINT_is_on_curve(sess->group, sess->pwe, NULL)) {
			DEBUG("EAP-pwd: point is not on curve");
			continue;
		}

		if (BN_cmp(cofactor, BN_value_one())) {
			/* make sure the point is not in a small sub-group */
			if (!EC_POINT_mul(sess->group, sess->pwe, NULL, sess->pwe,
				cofactor, NULL)) {
				DEBUG("EAP-pwd: cannot multiply generator by order");
				continue;
			}

			if (EC_POINT_is_at_infinity(sess->group, sess->pwe)) {
				DEBUG("EAP-pwd: point is at infinity");
				continue;
			}
		}
		/* if we got here then we have a new generator. */
		break;
	}

	sess->group_num = grp_num;
	if (0) {
		fail:		/* DON'T free sess, it's in handler->opaque */
		ret = -1;
	}

	/* cleanliness and order.... */
	BN_free(cofactor);
	BN_free(x_candidate);
	BN_free(rnd);
	talloc_free(prfbuf);

	return ret;
}
Beispiel #30
0
static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock)
{
	u16 BlockMap[MAX_SECTORS_PER_UNIT];
	unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
	unsigned int thisEUN, prevEUN, status;
	struct mtd_info *mtd = inftl->mbd.mtd;
	int block, silly;
	unsigned int targetEUN;
	struct inftl_oob oob;
	size_t retlen;

	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
		"pending=%d)\n", inftl, thisVUC, pendingblock);

	memset(BlockMap, 0xff, sizeof(BlockMap));
	memset(BlockDeleted, 0, sizeof(BlockDeleted));

	thisEUN = targetEUN = inftl->VUtable[thisVUC];

	if (thisEUN == BLOCK_NIL) {
		printk(KERN_WARNING "INFTL: trying to fold non-existent "
		       "Virtual Unit Chain %d!\n", thisVUC);
		return BLOCK_NIL;
	}

	/*
	 * Scan to find the Erase Unit which holds the actual data for each
	 * 512-byte block within the Chain.
	 */
	silly = MAX_LOOPS;
	while (thisEUN < inftl->nb_blocks) {
		for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
			if ((BlockMap[block] != BLOCK_NIL) ||
			    BlockDeleted[block])
				continue;

			if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
					   + (block * SECTORSIZE), 16, &retlen,
					   (char *)&oob) < 0)
				status = SECTOR_IGNORE;
			else
				status = oob.b.Status | oob.b.Status1;

			switch(status) {
			case SECTOR_FREE:
			case SECTOR_IGNORE:
				break;
			case SECTOR_USED:
				BlockMap[block] = thisEUN;
				continue;
			case SECTOR_DELETED:
				BlockDeleted[block] = 1;
				continue;
			default:
				printk(KERN_WARNING "INFTL: unknown status "
					"for block %d in EUN %d: %x\n",
					block, thisEUN, status);
				break;
			}
		}

		if (!silly--) {
			printk(KERN_WARNING "INFTL: infinite loop in Virtual "
				"Unit Chain 0x%x\n", thisVUC);
			return BLOCK_NIL;
		}

		thisEUN = inftl->PUtable[thisEUN];
	}

	/*
	 * OK. We now know the location of every block in the Virtual Unit
	 * Chain, and the Erase Unit into which we are supposed to be copying.
	 * Go for it.
	 */
	DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n",
		thisVUC, targetEUN);

	for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
		unsigned char movebuf[SECTORSIZE];
		int ret;

		/*
		 * If it's in the target EUN already, or if it's pending write,
		 * do nothing.
		 */
		if (BlockMap[block] == targetEUN || (pendingblock ==
		    (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) {
			continue;
		}

		/*
		 * Copy only in non free block (free blocks can only
                 * happen in case of media errors or deleted blocks).
		 */
		if (BlockMap[block] == BLOCK_NIL)
			continue;

		ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
				(block * SECTORSIZE), SECTORSIZE, &retlen,
				movebuf);
		if (ret < 0 && ret != -EUCLEAN) {
			ret = mtd->read(mtd,
					(inftl->EraseSize * BlockMap[block]) +
					(block * SECTORSIZE), SECTORSIZE,
					&retlen, movebuf);
			if (ret != -EIO)
				DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went "
				      "away on retry?\n");
		}
		memset(&oob, 0xff, sizeof(struct inftl_oob));
		oob.b.Status = oob.b.Status1 = SECTOR_USED;

		inftl_write(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) +
			    (block * SECTORSIZE), SECTORSIZE, &retlen,
			    movebuf, (char *)&oob);
	}

	/*
	 * Newest unit in chain now contains data from _all_ older units.
	 * So go through and erase each unit in chain, oldest first. (This
	 * is important, by doing oldest first if we crash/reboot then it
	 * it is relatively simple to clean up the mess).
	 */
	DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n",
		thisVUC);

	for (;;) {
		/* Find oldest unit in chain. */
		thisEUN = inftl->VUtable[thisVUC];
		prevEUN = BLOCK_NIL;
		while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
			prevEUN = thisEUN;
			thisEUN = inftl->PUtable[thisEUN];
		}

		/* Check if we are all done */
		if (thisEUN == targetEUN)
			break;

		/* Unlink the last block from the chain. */
		inftl->PUtable[prevEUN] = BLOCK_NIL;

		/* Now try to erase it. */
		if (INFTL_formatblock(inftl, thisEUN) < 0) {
			/*
			 * Could not erase : mark block as reserved.
			 */
			inftl->PUtable[thisEUN] = BLOCK_RESERVED;
		} else {
			/* Correctly erased : mark it as free */
			inftl->PUtable[thisEUN] = BLOCK_FREE;
			inftl->numfreeEUNs++;
		}
	}

	return targetEUN;
}