Ejemplo n.º 1
0
double BrennerPotential::GetPotentialEnergy(PyObject *a)
{
  VERB(" Energy[");
  Calculate(a);
  VERB("]");
  return Epot;
}
Ejemplo n.º 2
0
const vector<Vec> &BrennerPotential::GetForces(PyObject *a)
{
  VERB(" Force[");
  Calculate(a);
  VERB("]");
  return force;
}
Ejemplo n.º 3
0
static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
		struct drm_file *file_priv)
{
	struct drm_omap_gem_cpu_fini *args = data;
	struct drm_gem_object *obj;
	int ret;

	VERB("%p:%p: handle=%d", dev, file_priv, args->handle);

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (!obj) {
		return -ENOENT;
	}

	/* XXX flushy, flushy */
	ret = 0;

	if (!ret) {
		ret = omap_gem_op_finish(obj, args->op);
	}

	drm_gem_object_unreference_unlocked(obj);

	return ret;
}
Ejemplo n.º 4
0
static enum drm_connector_status omap_connector_detect(
		struct drm_connector *connector, bool force)
{
	struct omap_connector *omap_connector = to_omap_connector(connector);
	struct omap_dss_device *dssdev = omap_connector->dssdev;
	struct omap_dss_driver *dssdrv = dssdev->driver;
	enum drm_connector_status ret;

	if (dssdrv->detect) {
		if (dssdrv->detect(dssdev))
			ret = connector_status_connected;
		else
			ret = connector_status_disconnected;
	} else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
			dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
			dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
			dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
		ret = connector_status_connected;
	} else {
		ret = connector_status_unknown;
	}

	VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);

	return ret;
}
Ejemplo n.º 5
0
/**
 * mdp5_ctl_commit() - Register Flush
 *
 * The flush register is used to indicate several registers are all
 * programmed, and are safe to update to the back copy of the double
 * buffered registers.
 *
 * Some registers FLUSH bits are shared when the hardware does not have
 * dedicated bits for them; handling these is the job of fix_sw_flush().
 *
 * CTL registers need to be flushed in some circumstances; if that is the
 * case, some trigger bits will be present in both flush mask and
 * ctl->pending_ctl_trigger.
 *
 * Return H/W flushed bit mask.
 */
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	struct op_mode *pipeline = &ctl->pipeline;
	unsigned long flags;

	pipeline->start_mask &= ~flush_mask;

	VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
			pipeline->start_mask, ctl->pending_ctl_trigger);

	if (ctl->pending_ctl_trigger & flush_mask) {
		flush_mask |= MDP5_CTL_FLUSH_CTL;
		ctl->pending_ctl_trigger = 0;
	}

	flush_mask |= fix_sw_flush(ctl, flush_mask);

	flush_mask &= ctl_mgr->flush_hw_mask;

	if (flush_mask) {
		spin_lock_irqsave(&ctl->hw_lock, flags);
		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
		spin_unlock_irqrestore(&ctl->hw_lock, flags);
	}

	if (start_signal_needed(ctl)) {
		send_start_signal(ctl);
		refill_start_mask(ctl);
	}

	return flush_mask;
}
Ejemplo n.º 6
0
void BrennerPotential::Calculate(PyObject *a)
{
  assert(atoms != NULL);
  atoms->Begin(a);
  z = atoms->GetAtomicNumbers();
  positions = atoms->GetPositions();
  nAtoms = atoms->GetNumberOfAtoms();
  if (counter != atoms->GetPositionsCounter())
    {
      // The atoms have been modified.  Do a calculation.
      Epot = 0.0;
      force.resize(nAtoms);
      for (vector<Vec>::iterator i = force.begin();
	   i != force.end(); ++i)
	(*i)[0] = (*i)[1] = (*i)[2] = 0.0;
      
      if (counter_z != atoms->GetNumbersCounter())
	{
	  CountAtoms();
	  counter_z = atoms->GetNumbersCounter();
	}
      CheckAndUpdateNeighborList();
      VERB("c");
      Epot = caguts();  // Do the actual calculation.
      counter = atoms->GetPositionsCounter();
    }
  atoms->End();
}
Ejemplo n.º 7
0
/* flush an area of the framebuffer (in case of manual update display that
 * is not automatically flushed)
 */
void omap_connector_flush(struct drm_connector *connector,
		int x, int y, int w, int h)
{
	struct omap_connector *omap_connector = to_omap_connector(connector);

	/* TODO: enable when supported in dss */
	VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
}
void omap_connector_flush(struct drm_connector *connector,
		int x, int y, int w, int h)
{
	struct omap_connector *omap_connector = to_omap_connector(connector);

	
	VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
}
Ejemplo n.º 9
0
static int ioctl_gem_new(struct drm_device *dev, void *data,
		struct drm_file *file_priv)
{
	struct drm_omap_gem_new *args = data;
	VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
			args->size.bytes, args->flags);
	return omap_gem_new_handle(dev, file_priv, args->size,
			args->flags, &args->handle);
}
Ejemplo n.º 10
0
void BrennerPotential::SetAtoms(PyObject *a, Atoms* accessobj)
{
  VERB(" SetAtoms");
  if (accessobj != NULL)
    throw AsapError("BrennerPotential::SetAtoms called with accessobj != NULL");
  if (atoms == NULL)
    atoms = new NormalAtoms();
  assert(atoms != NULL);
}
Ejemplo n.º 11
0
static int sim_ctl_set_issm(Client * cl, struct sim_ctl * ctl)
{
	int issm = *(int *)ctl->data;

	VERB("set issm %d port %" PRIx64, issm, cl->port->portguid);
	cl->issm = issm;
	set_issm(cl->port, issm);

	return 0;
}
Ejemplo n.º 12
0
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_gem_object *obj = vma->vm_private_data;
	struct drm_device *dev = obj->dev;
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
	int ret;

	/* Make sure we don't parallel update on a fault, nor move or remove
	 * something from beneath our feet
	 */
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		goto out;

	/* make sure we have pages attached now */
	pages = get_pages(obj);
	if (IS_ERR(pages)) {
		ret = PTR_ERR(pages);
		goto out_unlock;
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
	pgoff = ((unsigned long)vmf->virtual_address -
			vma->vm_start) >> PAGE_SHIFT;

	pfn = page_to_pfn(pages[pgoff]);

	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
			pfn, pfn << PAGE_SHIFT);

	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
			__pfn_to_pfn_t(pfn, PFN_DEV));

out_unlock:
	mutex_unlock(&dev->struct_mutex);
out:
	switch (ret) {
	case -EAGAIN:
	case 0:
	case -ERESTARTSYS:
	case -EINTR:
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
		return VM_FAULT_NOPAGE;
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
		return VM_FAULT_SIGBUS;
	}
}
Ejemplo n.º 13
0
void BrennerPotential::CheckAndUpdateNeighborList()
{
  VERB("n");
  if (nblist == NULL)
    {
      // Make a new neighbor list.
      VERB("N");
#ifdef SMARTCUTOFF
      rmax_nosq = 0.0;
      set<int> elements;
      atoms->GetListOfElements(elements);
      for (set<int>::const_iterator z1 = elements.begin();
	   z1 != elements.end(); ++z1)
	for (set<int>::const_iterator z2 = elements.begin();
	     z2 != elements.end(); ++z2)
	  {
	    int k1 = z_to_ktype[*z1] - 1;
	    int k2 = z_to_ktype[*z2] - 1;
	    double r = sqrt(rmax[k1][k2]);
	    if (r > rmax_nosq)
	      rmax_nosq = r;
	  }
#endif
      double driftfactor = 0.1;
      PyAsap_NeighborLocatorObject *nbl =
	PyAsap_NewNeighborList(atoms, rmax_nosq, driftfactor);
      nblist = dynamic_cast<NeighborList *>(nbl->cobj);
      assert(nblist != NULL);
      nblist_obj = (PyObject *) nbl;
      nblist->EnableFullNeighborLists();
      nblist->CheckAndUpdateNeighborList();
      return;
    }
  bool update = (nblist->IsInvalid() || nblist->CheckNeighborList());
  if (update)
    {
      VERB("U");
      nblist->UpdateNeighborList();
    }
}
Ejemplo n.º 14
0
ci_fd_t ci_udp_ep_ctor(citp_socket* ep, ci_netif* netif, int domain, int type)
{
  ci_udp_state* us;
  ci_fd_t fd;

  VERB( log(LPFIN "ctor( )" ) );

  ci_assert(ep);
  ci_assert(netif);

  ci_netif_lock(netif);
  us = ci_udp_get_state_buf(netif);
  if (!us) {
    ci_netif_unlock(netif);
    LOG_E(ci_log("%s: [%d] out of socket buffers", __FUNCTION__,NI_ID(netif)));
    return -ENOMEM;
  }

  /* It's required to set protocol before ci_tcp_helper_sock_attach()
   * since it's used to determine if TCP or UDP file operations should be
   * attached to the file descriptor in kernel. */
   sock_protocol(&us->s) = IPPROTO_UDP;

  /* NB: this attach will close the os_sock_fd */
  fd = ci_tcp_helper_sock_attach(ci_netif_get_driver_handle(netif),  
                                 SC_SP(&us->s), domain, type);
  if( fd < 0 ) {
    if( fd == -EAFNOSUPPORT )
      LOG_U(ci_log("%s: ci_tcp_helper_sock_attach (domain=%d, type=%d) "
                   "failed %d", __FUNCTION__, domain, type, fd));
    else
      LOG_E(ci_log("%s: ci_tcp_helper_sock_attach (domain=%d, type=%d) "
                   "failed %d", __FUNCTION__, domain, type, fd));
    ci_netif_unlock(netif);
    return fd;
  }

  ci_assert(~us->s.b.sb_aflags & CI_SB_AFLAG_ORPHAN);

  us->s.rx_errno = 0;
  us->s.tx_errno = 0;
  us->s.so_error = 0;
  us->s.cp.sock_cp_flags |= OO_SCP_UDP_WILD;

  ep->s = &us->s;
  ep->netif = netif;
  CHECK_UEP(ep);
  ci_netif_unlock(netif);
  return fd;
}
Ejemplo n.º 15
0
static int sim_read_pkt(int fd, int client)
{
	char buf[512];
	Client *cl = clients + client, *dcl;
	int size, ret;

	if (client >= IBSIM_MAX_CLIENTS || !cl->pid) {
		IBWARN("pkt from unconnected client %d?!", client);
		return -1;
	}
	for (;;) {
		if ((size = read(fd, buf, sizeof(buf))) <= 0)
			return size;

		if ((size = process_packet(cl, buf, size, &dcl)) < 0) {
			IBWARN("process packet error - discarded");
			continue;	// not a network error
		}

		if (!dcl)
			continue;

		VERB("%s %d bytes (%zu) to client %d fd %d",
		     dcl == cl ? "replying" : "forwarding",
		     size, sizeof(struct sim_request), dcl->id, dcl->fd);

		// reply
		do {
			ret = write(dcl->fd, buf, size);
		} while ((errno == EAGAIN) && (ret == -1));
			 
		if (ret == size)
			return 0;

		if (ret < 0 && (errno == ECONNREFUSED || errno == ENOTCONN)) {
			IBWARN("client %u seems to be dead - disconnecting.",
			       dcl->id);
			disconnect_client(dcl->id);
		}
		IBWARN("write failed: %m - pkt dropped");
		if (dcl != cl) { /* reply timeout */
			struct sim_request *r = (struct sim_request *)buf;
			r->status = htonl(110);
			ret = write(cl->fd, buf, size);
		}
	}

	return -1;		// never reached
}
Ejemplo n.º 16
0
void BrennerPotential::CountAtoms()
{
  VERB("+");
  for (int i = 0; i < 5; i++)
    noa[i] = 0;
  for (int i = 0; i < nAtoms; i++)
    {
      int zz = z[i];
      if (zz < 1 || zz > MAXATNO)
	throw AsapError("Invalid atomic number: z[") << i << "]=" << zz;
      noa[z_to_ktype[zz]]++;
    }
  if (noa[0])
    throw AsapError("BrennerPotential only supports Hydrogen, Carbon, Silicon and Germanium.");
}
Ejemplo n.º 17
0
/**
 * mdp5_ctl_commit() - Register Flush
 *
 * The flush register is used to indicate several registers are all
 * programmed, and are safe to update to the back copy of the double
 * buffered registers.
 *
 * Some registers FLUSH bits are shared when the hardware does not have
 * dedicated bits for them; handling these is the job of fix_sw_flush().
 *
 * CTL registers need to be flushed in some circumstances; if that is the
 * case, some trigger bits will be present in both flush mask and
 * ctl->pending_ctl_trigger.
 *
 * Return H/W flushed bit mask.
 */
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
		    struct mdp5_pipeline *pipeline,
		    u32 flush_mask, bool start)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	unsigned long flags;
	u32 flush_id = ctl->id;
	u32 curr_ctl_flush_mask;

	VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);

	if (ctl->pending_ctl_trigger & flush_mask) {
		flush_mask |= MDP5_CTL_FLUSH_CTL;
		ctl->pending_ctl_trigger = 0;
	}

	flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);

	flush_mask &= ctl_mgr->flush_hw_mask;

	curr_ctl_flush_mask = flush_mask;

	fix_for_single_flush(ctl, &flush_mask, &flush_id);

	if (!start) {
		ctl->flush_mask |= flush_mask;
		return curr_ctl_flush_mask;
	} else {
		flush_mask |= ctl->flush_mask;
		ctl->flush_mask = 0;
	}

	if (flush_mask) {
		spin_lock_irqsave(&ctl->hw_lock, flags);
		ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
		spin_unlock_irqrestore(&ctl->hw_lock, flags);
	}

	if (start_signal_needed(ctl, pipeline)) {
		send_start_signal(ctl);
	}

	return curr_ctl_flush_mask;
}
Ejemplo n.º 18
0
void set_body(object new_body)
{
	object *mobiles;

	ACCESS_CHECK(TEXT() || GAME() || VERB());

	if (body) {
		mobiles = body->query_property("mobiles");

		if (mobiles) {
			mobiles -= ({ this_object(), nil });

			if (!sizeof(mobiles)) {
				mobiles = nil;
			}

			body->set_property("mobiles", mobiles);
		}
	}
Ejemplo n.º 19
0
static irqreturn_t mdss_irq(int irq, void *arg)
{
	struct msm_mdss *mdss = arg;
	u32 intr;

	intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);

	VERB("intr=%08x", intr);

	while (intr) {
		irq_hw_number_t hwirq = fls(intr) - 1;

		generic_handle_irq(irq_find_mapping(
				mdss->irqcontroller.domain, hwirq));
		intr &= ~(1 << hwirq);
	}

	return IRQ_HANDLED;
}
Ejemplo n.º 20
0
static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
{
	struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
	struct drm_device *dev = mdp5_kms->dev;
	struct msm_drm_private *priv = dev->dev_private;
	unsigned int id;
	uint32_t status, enable;

	enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
	status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
	mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);

	VERB("status=%08x", status);

	mdp_dispatch_irqs(mdp_kms, status);

	for (id = 0; id < priv->num_crtcs; id++)
		if (status & mdp5_crtc_vblank(priv->crtcs[id]))
			drm_handle_vblank(dev, id);
}
Ejemplo n.º 21
0
static int ioctl_gem_info(struct drm_device *dev, void *data,
		struct drm_file *file_priv)
{
	struct drm_omap_gem_info *args = data;
	struct drm_gem_object *obj;
	int ret = 0;

	VERB("%p:%p: handle=%d", dev, file_priv, args->handle);

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (!obj)
		return -ENOENT;

	args->size = omap_gem_mmap_size(obj);
	args->offset = omap_gem_mmap_offset(obj);

	drm_gem_object_unreference_unlocked(obj);

	return ret;
}
Ejemplo n.º 22
0
static int sim_ctl_disconnect_client(Client * cl, struct sim_ctl * ctl)
{
	int client = ctl->clientid;

	VERB("disconnecting client %d", client);
	if (client >= IBSIM_MAX_CLIENTS) {
		IBWARN("no connection for client %d", client);
		ctl->type = SIM_CTL_ERROR;
		return -1;
	}
	if (!cl->pid) {
		DEBUG("client %d is not connected", client);
		return 0;	// ?
	}

	DEBUG("Detaching client %d from node \"%s\" port 0x%" PRIx64,
	      client, cl->port->node->nodeid, cl->port->portguid);
	cl->pid = 0;

	return 0;
}
Ejemplo n.º 23
0
static void nc_apps_add(const char* comm, struct nc_apps* apps) {
	int i;

	if (comm[0] == '\0') {
		return;
	}

	for (i = 0; i < NC_APPS_MAX; ++i) {
		if (apps->valid[i] == 0) {
			break;
		}
	}

	if (i == NC_APPS_MAX) {
		VERB("Too many running/crashed libnetconf apps.");
		return;
	}

	apps->valid[i] = 1;
	apps->pids[i] = getpid();
	strcpy(apps->comms[i], comm);
}
enum drm_connector_status omap_connector_detect(
		struct drm_connector *connector, bool force)
{
	struct omap_connector *omap_connector = to_omap_connector(connector);
	struct omap_dss_device *dssdev = omap_connector->dssdev;
	struct omap_dss_driver *dssdrv = dssdev->driver;
	enum drm_connector_status ret;

	if (dssdrv->detect) {
		if (dssdrv->detect(dssdev)) {
			ret = connector_status_connected;
		} else {
			ret = connector_status_disconnected;
		}
	} else {
		ret = connector_status_unknown;
	}

	VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);

	return ret;
}
Ejemplo n.º 25
0
static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
		struct drm_file *file_priv)
{
	struct drm_omap_gem_cpu_prep *args = data;
	struct drm_gem_object *obj;
	int ret;

	VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (!obj)
		return -ENOENT;

	ret = omap_gem_op_sync(obj, args->op);

	if (!ret)
		ret = omap_gem_op_start(obj, args->op);

	drm_gem_object_unreference_unlocked(obj);

	return ret;
}
Ejemplo n.º 26
0
irqreturn_t mdp4_irq(struct msm_kms *kms)
{
	struct mdp_kms *mdp_kms = to_mdp_kms(kms);
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
	struct drm_device *dev = mdp4_kms->dev;
	struct msm_drm_private *priv = dev->dev_private;
	unsigned int id;
	uint32_t status;

	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);

	VERB("status=%08x", status);

	mdp_dispatch_irqs(mdp_kms, status);

	for (id = 0; id < priv->num_crtcs; id++)
		if (status & mdp4_crtc_vblank(priv->crtcs[id]))
			drm_handle_vblank(dev, id);

	return IRQ_HANDLED;
}
Ejemplo n.º 27
0
/* flush an area of the framebuffer (in case of manual update display that
 * is not automatically flushed)
 */
void omap_framebuffer_flush(struct drm_framebuffer *fb,
		int x, int y, int w, int h)
{
	struct drm_connector *connector = NULL;

	VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);

	while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
		/* only consider connectors that are part of a chain */
		if (connector->encoder && connector->encoder->crtc) {
			/* TODO: maybe this should propagate thru the crtc who
			 * could do the coordinate translation..
			 */
			struct drm_crtc *crtc = connector->encoder->crtc;
			int cx = max(0, x - crtc->x);
			int cy = max(0, y - crtc->y);
			int cw = w + (x - crtc->x) - cx;
			int ch = h + (y - crtc->y) - cy;

			omap_connector_flush(connector, cx, cy, cw, ch);
		}
	}
}
Ejemplo n.º 28
0
irqreturn_t mdp5_irq(struct msm_kms *kms)
{
	struct mdp_kms *mdp_kms = to_mdp_kms(kms);
	struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
	uint32_t intr;

	intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);

	VERB("intr=%08x", intr);

	if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
		mdp5_irq_mdp(mdp_kms);
		intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
	}

	while (intr) {
		irq_hw_number_t hwirq = fls(intr) - 1;
		generic_handle_irq(irq_find_mapping(
				mdp5_kms->irqcontroller.domain, hwirq));
		intr &= ~(1 << hwirq);
	}

	return IRQ_HANDLED;
}
Ejemplo n.º 29
0
struct nc_session *nc_session_connect_tls_socket(const char* username, const char* UNUSED(host), int sock)
{
	struct nc_session *retval;
	struct passwd *pw;
	pthread_mutexattr_t mattr;
	int verify, r;
	SSL_CTX* tls_ctx;

	tls_ctx = pthread_getspecific(tls_ctx_key);
	if (tls_ctx == NULL) {
		ERROR("TLS subsystem not initiated.");
		return (NULL);
	}

	/* get current user if username not explicitely specified */
	if (username == NULL || strisempty(username)) {
		pw = getpwuid(getuid());
		if (pw == NULL) {
			/* unable to get correct username (errno from getpwuid) */
			ERROR("Unable to set a username for the SSH connection (%s).", strerror(errno));
			return (NULL);
		}
		username = pw->pw_name;
	}

	/* allocate netconf session structure */
	retval = calloc(1, sizeof(struct nc_session));
	if (retval == NULL) {
		ERROR("Memory allocation failed (%s)", strerror(errno));
		return (NULL);
	}
	memset(retval, 0, sizeof(struct nc_session));
	if ((retval->stats = malloc (sizeof (struct nc_session_stats))) == NULL) {
		ERROR("Memory allocation failed (%s)", strerror(errno));
		free(retval);
		return NULL;
	}

	/* prepare a new TLS structure */
	if ((retval->tls = SSL_new(tls_ctx)) == NULL) {
		ERROR("%s: Unable to prepare TLS structure (%s)", __func__, ERR_reason_error_string(ERR_get_error()));
		free(retval->stats);
		free(retval);
		return (NULL);
	}

	/* connect SSL with existing socket */
	SSL_set_fd(retval->tls, sock);

	/* Set the SSL_MODE_AUTO_RETRY flag to allow OpenSSL perform re-handshake automatically */
	SSL_set_mode(retval->tls, SSL_MODE_AUTO_RETRY);

	/* connect and perform the handshake */
	if (SSL_connect(retval->tls) != 1) {
		ERROR("Connecting over TLS failed (%s).", ERR_reason_error_string(ERR_get_error()));
		SSL_free(retval->tls);
		free(retval->stats);
		free(retval);
		return (NULL);
	}

	/* check certificate checking */
	verify = SSL_get_verify_result(retval->tls);
	switch (verify) {
	case X509_V_OK:
		VERB("Server certificate successfully verified.");
		break;
	default:
		WARN("I'm not happy with the server certificate (%s).", verify_ret_msg[verify]);
	}

	/* fill session structure */
	retval->transport_socket = sock;
	retval->fd_input = -1;
	retval->fd_output = -1;
	retval->username = strdup(username);
	retval->groups = NULL; /* client side does not need this information */
	retval->msgid = 1;
	retval->queue_event = NULL;
	retval->queue_msg = NULL;
	retval->logintime = NULL;
	retval->monitored = 0;
	retval->nacm_recovery = 0; /* not needed/decidable on the client side */
	retval->stats->in_rpcs = 0;
	retval->stats->in_bad_rpcs = 0;
	retval->stats->out_rpc_errors = 0;
	retval->stats->out_notifications = 0;

	if (pthread_mutexattr_init(&mattr) != 0) {
		ERROR("Memory allocation failed (%s:%d).", __FILE__, __LINE__);
		free(retval);
		return (NULL);
	}
	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
	retval->mut_channel = (pthread_mutex_t *) malloc(sizeof(pthread_mutex_t));
	if ((r = pthread_mutex_init(retval->mut_channel, &mattr)) != 0 ||
			(r = pthread_mutex_init(&(retval->mut_mqueue), &mattr)) != 0 ||
			(r = pthread_mutex_init(&(retval->mut_equeue), &mattr)) != 0 ||
			(r = pthread_mutex_init(&(retval->mut_ntf), &mattr)) != 0 ||
			(r = pthread_mutex_init(&(retval->mut_session), &mattr)) != 0) {
		ERROR("Mutex initialization failed (%s).", strerror(r));
		pthread_mutexattr_destroy(&mattr);
		free(retval);
		return (NULL);
	}
	pthread_mutexattr_destroy(&mattr);

	return (retval);
}
Ejemplo n.º 30
0
API int nc_tls_init(const char* peer_cert, const char* peer_key, const char *CAfile, const char *CApath, const char *CRLfile, const char *CRLpath)
{
	const char* key_ = peer_key;
	SSL_CTX* tls_ctx;
	X509_LOOKUP* lookup;
	X509_STORE* tls_store;
	int destroy = 0, ret;

	if (peer_cert == NULL) {
		ERROR("%s: Invalid parameter.", __func__);
		return (EXIT_FAILURE);
	}

	pthread_once(&tls_ctx_once, tls_ctx_init);

	tls_ctx = pthread_getspecific(tls_ctx_key);
	if (tls_ctx) {
		VERB("TLS subsystem reinitiation. Resetting certificates settings");
		/*
		 * continue with creation of a new TLS context, the current will be
		 * destroyed after everything successes
		 */
		destroy = 1;
	}

	/* prepare global SSL context, allow only mandatory TLS 1.2  */
	if ((tls_ctx = SSL_CTX_new(TLSv1_2_client_method())) == NULL) {
		ERROR("Unable to create OpenSSL context (%s)", ERR_reason_error_string(ERR_get_error()));
		return (EXIT_FAILURE);
	}

	/* force peer certificate verification (NO_PEER_CERT and CLIENT_ONCE are ignored when
	 * acting as client, but included just in case) and optionaly set CRL checking callback */
	if (CRLfile != NULL || CRLpath != NULL) {
		/* set the revocation store with the correct paths for the callback */
		tls_store = X509_STORE_new();
		tls_store->cache = 0;

		if (CRLfile != NULL) {
			if ((lookup = X509_STORE_add_lookup(tls_store, X509_LOOKUP_file())) == NULL) {
				ERROR("Failed to add lookup method in CRL checking");
				return (EXIT_FAILURE);
			}
			if (X509_LOOKUP_add_dir(lookup, CRLfile, X509_FILETYPE_PEM) != 1) {
				ERROR("Failed to add revocation lookup file");
				return (EXIT_FAILURE);
			}
		}

		if (CRLpath != NULL) {
			if ((lookup = X509_STORE_add_lookup(tls_store, X509_LOOKUP_hash_dir())) == NULL) {
				ERROR("Failed to add lookup method in CRL checking");
				return (EXIT_FAILURE);
			}
			if (X509_LOOKUP_add_dir(lookup, CRLpath, X509_FILETYPE_PEM) != 1) {
				ERROR("Failed to add revocation lookup directory");
				return (EXIT_FAILURE);
			}
		}

		if ((ret = pthread_key_create(&tls_store_key, (void (*)(void *))X509_STORE_free)) != 0) {
			ERROR("Unable to create pthread key: %s", strerror(ret));
			return (EXIT_FAILURE);
		}
		if ((ret = pthread_setspecific(tls_store_key, tls_store)) != 0) {
			ERROR("Unable to set thread-specific data: %s", strerror(ret));
			return (EXIT_FAILURE);
		}

		SSL_CTX_set_verify(tls_ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT | SSL_VERIFY_CLIENT_ONCE, verify_callback);
	} else {
		/* CRL checking will be skipped */
		SSL_CTX_set_verify(tls_ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT | SSL_VERIFY_CLIENT_ONCE, NULL);
	}

	/* get peer certificate */
	if (SSL_CTX_use_certificate_file(tls_ctx, peer_cert, SSL_FILETYPE_PEM) != 1) {
		ERROR("Loading a peer certificate from \'%s\' failed (%s).", peer_cert, ERR_reason_error_string(ERR_get_error()));
		return (EXIT_FAILURE);
	}

	if (key_ == NULL) {
		/*
		 * if the file with private key not specified, expect that the private
		 * key is stored altogether with the certificate
		 */
		key_ = peer_cert;
	}
	if (SSL_CTX_use_PrivateKey_file(tls_ctx, key_, SSL_FILETYPE_PEM) != 1) {
		ERROR("Loading a peer certificate from \'%s\' failed (%s).", key_, ERR_reason_error_string(ERR_get_error()));
		return (EXIT_FAILURE);
	}

	if(! SSL_CTX_load_verify_locations(tls_ctx, CAfile, CApath))	{
		WARN("SSL_CTX_load_verify_locations() failed (%s).", ERR_reason_error_string(ERR_get_error()));
	}

	/* store TLS context for thread */
	if (destroy) {
		nc_tls_destroy();
	}
	pthread_setspecific(tls_ctx_key, tls_ctx);

	return (EXIT_SUCCESS);
}