Example #1
0
static void uct_cm_iface_handle_sidr_req(uct_cm_iface_t *iface,
                                         struct ib_cm_event *event)
{
    uct_cm_hdr_t *hdr = event->private_data;
    struct ib_cm_sidr_rep_param rep;
    ucs_status_t status;
    void *cm_desc, *desc;
    int ret;

    VALGRIND_MAKE_MEM_DEFINED(hdr, sizeof(hdr));
    VALGRIND_MAKE_MEM_DEFINED(hdr + 1, hdr->length);

    uct_cm_iface_trace_data(iface, UCT_AM_TRACE_TYPE_RECV, hdr, "RX: SIDR_REQ");

    /* Allocate temporary buffer to serve as receive descriptor */
    cm_desc = ucs_malloc(iface->super.config.rx_payload_offset + hdr->length,
                         "cm_recv_desc");
    if (cm_desc == NULL) {
        ucs_error("failed to allocate cm receive descriptor");
        return;
    }

    /* Send reply */
    ucs_trace_data("TX: SIDR_REP");
    memset(&rep, 0, sizeof rep);
    rep.status = IB_SIDR_SUCCESS;
    ret = ib_cm_send_sidr_rep(event->cm_id, &rep);
    if (ret) {
        ucs_error("ib_cm_send_sidr_rep() failed: %m");
    }

    /* Call active message handler */
    desc = cm_desc + iface->super.config.rx_headroom_offset;
    uct_recv_desc_iface(desc) = &iface->super.super.super;
    status = uct_iface_invoke_am(&iface->super.super, hdr->am_id, hdr + 1,
                                 hdr->length, desc);
    if (status == UCS_OK) {
        ucs_free(cm_desc);
    }
}
Example #2
0
int transport_read_layer(rdpTransport* transport, BYTE* data, int bytes)
{
	int read = 0;
	int status = -1;

	if (!transport->frontBio)
	{
		transport->layer = TRANSPORT_LAYER_CLOSED;
		return -1;
	}

	while (read < bytes)
	{
		status = BIO_read(transport->frontBio, data + read, bytes - read);

		if (status <= 0)
		{
			if (!transport->frontBio || !BIO_should_retry(transport->frontBio))
			{
				/* something unexpected happened, let's close */
				if (!transport->frontBio)
				{
					WLog_ERR(TAG, "BIO_read: transport->frontBio null");
					return -1;
				}
				WLog_ERR_BIO(TAG, "BIO_read", transport->frontBio);
				transport->layer = TRANSPORT_LAYER_CLOSED;
				return -1;
			}

			/* non blocking will survive a partial read */
			if (!transport->blocking)
				return read;

			/* blocking means that we can't continue until we have read the number of requested bytes */
			if (BIO_wait_read(transport->frontBio, 100) < 0)
			{
				WLog_ERR_BIO(TAG, "BIO_wait_read", transport->frontBio);
				return -1;
			}

			continue;
		}

#ifdef HAVE_VALGRIND_MEMCHECK_H
		VALGRIND_MAKE_MEM_DEFINED(data + read, bytes - read);
#endif
		read += status;
	}

	return read;
}
Example #3
0
  void BakerGC::reset() {
    check_growth_finish();

    next->reset();
    eden->reset();

#ifdef HAVE_VALGRIND_H
    (void)VALGRIND_MAKE_MEM_NOACCESS(next->start().as_int(), next->size());
    (void)VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size());
#endif
    mprotect(next->start(), next->size(), PROT_NONE);
    mprotect(current->start(), current->size(), PROT_READ | PROT_WRITE);
  }
Example #4
0
static Block* 
sec_block_create (size_t size,
                  const char *during_tag)
{
	Block *block;
	Cell *cell;

	ASSERT (during_tag);

	/* We can force all all memory to be malloced */
	if (getenv ("SECMEM_FORCE_FALLBACK"))
		return NULL;

	block = pool_alloc ();
	if (!block)
		return NULL;

	cell = pool_alloc ();
	if (!cell) {
		pool_free (block);
		return NULL;
	}

	/* The size above is a minimum, we're free to go bigger */
	if (size < DEFAULT_BLOCK_SIZE)
		size = DEFAULT_BLOCK_SIZE;
		
	block->words = sec_acquire_pages (&size, during_tag);
	block->n_words = size / sizeof (word_t);
	if (!block->words) {
		pool_free (block);
		pool_free (cell);
		return NULL;
	}
	
#ifdef WITH_VALGRIND
	VALGRIND_MAKE_MEM_DEFINED (block->words, size);
#endif
	
	/* The first cell to allocate from */
	cell->words = block->words;
	cell->n_words = block->n_words;
	cell->requested = 0;
	sec_write_guards (cell);
	sec_insert_cell_ring (&block->unused_cells, cell);

	block->next = all_blocks;
	all_blocks = block;
	
	return block;
}
Example #5
0
int rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
{
	struct ucma_abi_destroy_id *cmd;
	struct ucma_abi_destroy_id_resp *resp;
	struct cma_id_private *id_priv;
	struct cma_multicast *mc, **pos;
	void *msg;
	int ret, size, addrlen;
	
	addrlen = ucma_addrlen(addr);
	if (!addrlen)
		return ERR(EINVAL);

	id_priv = container_of(id, struct cma_id_private, id);
	pthread_mutex_lock(&id_priv->mut);
	for (pos = &id_priv->mc_list; *pos; pos = &(*pos)->next)
		if (!memcmp(&(*pos)->addr, addr, addrlen))
			break;

	mc = *pos;
	if (*pos)
		*pos = mc->next;
	pthread_mutex_unlock(&id_priv->mut);
	if (!mc)
		return ERR(EADDRNOTAVAIL);

	if (id->qp)
		ibv_detach_mcast(id->qp, &mc->mgid, mc->mlid);
	
	CMA_CREATE_MSG_CMD_RESP(msg, cmd, resp, UCMA_CMD_LEAVE_MCAST, size);
	cmd->id = mc->handle;

	ret = write(id->channel->fd, msg, size);
	if (ret != size) {
		ret = (ret >= 0) ? ERR(ECONNREFUSED) : -1;
		goto free;
	}

	VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);

	pthread_mutex_lock(&id_priv->mut);
	while (mc->events_completed < resp->events_reported)
		pthread_cond_wait(&mc->cond, &id_priv->mut);
	pthread_mutex_unlock(&id_priv->mut);

	ret = 0;
free:
	free(mc);
	return ret;
}
Example #6
0
int main(void)
{
   int *array, *array3;
   int x;

   array = custom_alloc(sizeof(int) * 10);
   array[8]  = 8;
   array[9]  = 8;
   array[10] = 10;      // invalid write (ok w/o MALLOCLIKE -- in superblock)

   VALGRIND_RESIZEINPLACE_BLOCK(array, sizeof(int) * 10, sizeof(int) * 5, RZ);
   array[4] = 7;
   array[5] = 9; // invalid write

   // Make the entire array defined again such that it can be verified whether
   // the red zone is marked properly when resizing in place.
   VALGRIND_MAKE_MEM_DEFINED(array, sizeof(int) * 10);

   VALGRIND_RESIZEINPLACE_BLOCK(array, sizeof(int) * 5, sizeof(int) * 7, RZ);
   if (array[5]) array[4]++; // uninitialized read of array[5]
   array[5]  = 11;
   array[6]  = 7;
   array[7] = 8; // invalid write

   // invalid realloc
   VALGRIND_RESIZEINPLACE_BLOCK(array+1, sizeof(int) * 7, sizeof(int) * 8, RZ);

   custom_free(array);  // ok

   custom_free((void*)0x1);  // invalid free

   array3 = malloc(sizeof(int) * 10);
   custom_free(array3); // mismatched free (ok without MALLOCLIKE)

   make_leak();
   x = array[0];        // use after free (ok without MALLOCLIKE/MAKE_MEM_NOACCESS)
                        // (nb: initialised because is_zeroed==1 above)
                        // unfortunately not identified as being in a free'd
                        // block because the freeing of the block and shadow
                        // chunk isn't postponed.

   // Bug 137073: passing 0 to MALLOCLIKE_BLOCK was causing an assertion
   // failure.  Test for this (and likewise for FREELIKE_BLOCK).
   VALGRIND_MALLOCLIKE_BLOCK(0,0,0,0);
   VALGRIND_FREELIKE_BLOCK(0,0);
   
   return x;

   // leak from make_leak()
}
Example #7
0
/*
 * Add a whole-catalog inval entry
 */
static void
AddCatalogInvalidationMessage(InvalidationListHeader *hdr,
							  Oid dbId, Oid catId)
{
	SharedInvalidationMessage msg;

	msg.cat.id = SHAREDINVALCATALOG_ID;
	msg.cat.dbId = dbId;
	msg.cat.catId = catId;
	/* check AddCatcacheInvalidationMessage() for an explanation */
	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));

	AddInvalidationMessage(&hdr->cclist, &msg);
}
Example #8
0
void
_cairo_freelist_fini (cairo_freelist_t *freelist)
{
    cairo_freelist_node_t *node = freelist->first_free_node;
    while (node) {
        cairo_freelist_node_t *next;

        VG (VALGRIND_MAKE_MEM_DEFINED (node, sizeof (node->next)));
        next = node->next;

        free (node);
        node = next;
    }
}
Example #9
0
/*
 * CacheInvalidateSmgr
 *		Register invalidation of smgr references to a physical relation.
 *
 * Sending this type of invalidation msg forces other backends to close open
 * smgr entries for the rel.  This should be done to flush dangling open-file
 * references when the physical rel is being dropped or truncated.  Because
 * these are nontransactional (i.e., not-rollback-able) operations, we just
 * send the inval message immediately without any queuing.
 *
 * Note: in most cases there will have been a relcache flush issued against
 * the rel at the logical level.  We need a separate smgr-level flush because
 * it is possible for backends to have open smgr entries for rels they don't
 * have a relcache entry for, e.g. because the only thing they ever did with
 * the rel is write out dirty shared buffers.
 *
 * Note: because these messages are nontransactional, they won't be captured
 * in commit/abort WAL entries.  Instead, calls to CacheInvalidateSmgr()
 * should happen in low-level smgr.c routines, which are executed while
 * replaying WAL as well as when creating it.
 *
 * Note: In order to avoid bloating SharedInvalidationMessage, we store only
 * three bytes of the backend ID using what would otherwise be padding space.
 * Thus, the maximum possible backend ID is 2^23-1.
 */
void
CacheInvalidateSmgr(RelFileNodeBackend rnode)
{
	SharedInvalidationMessage msg;

	msg.sm.id = SHAREDINVALSMGR_ID;
	msg.sm.backend_hi = rnode.backend >> 16;
	msg.sm.backend_lo = rnode.backend & 0xffff;
	msg.sm.rnode = rnode.node;
	/* check AddCatcacheInvalidationMessage() for an explanation */
	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));

	SendSharedInvalidMessages(&msg, 1);
}
Example #10
0
    /// The connection manager event handler.
    void poll_cm_events()
    {
        int err;
        struct rdma_cm_event* event;
        struct rdma_cm_event event_copy;
        void* private_data_copy = nullptr;

        while ((err = rdma_get_cm_event(ec_, &event)) == 0) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
            VALGRIND_MAKE_MEM_DEFINED(event, sizeof(struct rdma_cm_event));
            memcpy(&event_copy, event, sizeof(struct rdma_cm_event));
            if (event_copy.param.conn.private_data) {
                VALGRIND_MAKE_MEM_DEFINED(
                    event_copy.param.conn.private_data,
                    event_copy.param.conn.private_data_len);
                private_data_copy =
                    malloc(event_copy.param.conn.private_data_len);
                if (!private_data_copy)
                    throw InfinibandException("malloc failed");
                memcpy(private_data_copy, event_copy.param.conn.private_data,
                       event_copy.param.conn.private_data_len);
                event_copy.param.conn.private_data = private_data_copy;
            }
#pragma GCC diagnostic pop
            rdma_ack_cm_event(event);
            on_cm_event(&event_copy);
            if (private_data_copy) {
                free(private_data_copy);
                private_data_copy = nullptr;
            }
        }
        if (err == -1 && errno == EAGAIN)
            return;
        if (err)
            throw InfinibandException("rdma_get_cm_event failed");
    }
Example #11
0
void dm_free_aux(void *p)
{
	char *ptr;
	size_t i;
	struct memblock *mb = ((struct memblock *) p) - 1;
	if (!p)
		return;

	dm_bounds_check();

	/* sanity check */
	assert(mb->magic == p);
#ifdef VALGRIND_POOL
	VALGRIND_MAKE_MEM_DEFINED(p, mb->length);
#endif
	/* check data at the far boundary */
	ptr = (char *) p + mb->length;
	for (i = 0; i < sizeof(unsigned long); i++)
		if (ptr[i] != (char) mb->id)
			assert(!"Damage at far end of block");

	/* have we freed this before ? */
	assert(mb->id != 0);

	/* unlink */
	if (mb->prev)
		mb->prev->next = mb->next;
	else
		_head = mb->next;

	if (mb->next)
		mb->next->prev = mb->prev;
	else
		_tail = mb->prev;

	mb->id = 0;

	/* stomp a different pattern across the memory */
	ptr = p;
	for (i = 0; i < mb->length; i++)
		ptr[i] = i & 1 ? (char) 0xde : (char) 0xad;

	assert(_mem_stats.blocks_allocated);
	_mem_stats.blocks_allocated--;
	_mem_stats.bytes -= mb->length;

	/* free the memory */
	free(mb);
}
Example #12
0
File: signal.c Project: yugui/ruby
static sighandler_t
ruby_signal(int signum, sighandler_t handler)
{
    struct sigaction sigact, old;

#if 0
    rb_trap_accept_nativethreads[signum] = 0;
#endif

    sigemptyset(&sigact.sa_mask);
#ifdef USE_SIGALTSTACK
    if (handler == SIG_IGN || handler == SIG_DFL) {
        sigact.sa_handler = handler;
        sigact.sa_flags = 0;
    }
    else {
        sigact.sa_sigaction = (ruby_sigaction_t*)handler;
        sigact.sa_flags = SA_SIGINFO;
    }
#else
    sigact.sa_handler = handler;
    sigact.sa_flags = 0;
#endif

    switch (signum) {
#ifdef SA_NOCLDWAIT
      case SIGCHLD:
	if (handler == SIG_IGN)
	    sigact.sa_flags |= SA_NOCLDWAIT;
	break;
#endif
#if defined(SA_ONSTACK) && defined(USE_SIGALTSTACK)
      case SIGSEGV:
#ifdef SIGBUS
      case SIGBUS:
#endif
	sigact.sa_flags |= SA_ONSTACK;
	break;
#endif
    }
    (void)VALGRIND_MAKE_MEM_DEFINED(&old, sizeof(old));
    if (sigaction(signum, &sigact, &old) < 0) {
	return SIG_ERR;
    }
    if (old.sa_flags & SA_SIGINFO)
	return (sighandler_t)old.sa_sigaction;
    else
	return old.sa_handler;
}
Example #13
0
static int ucma_query_route(struct rdma_cm_id *id)
{
	struct ucma_abi_query_route_resp *resp;
	struct ucma_abi_query_route *cmd;
	struct cma_id_private *id_priv;
	void *msg;
	int ret, size, i;
	
	CMA_CREATE_MSG_CMD_RESP(msg, cmd, resp, UCMA_CMD_QUERY_ROUTE, size);
	id_priv = container_of(id, struct cma_id_private, id);
	cmd->id = id_priv->handle;

	ret = write(id->channel->fd, msg, size);
	if (ret != size)
		return (ret >= 0) ? ERR(ECONNREFUSED) : -1;

	VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);

	if (resp->num_paths) {
		id->route.path_rec = malloc(sizeof *id->route.path_rec *
					    resp->num_paths);
		if (!id->route.path_rec)
			return ERR(ENOMEM);

		id->route.num_paths = resp->num_paths;
		for (i = 0; i < resp->num_paths; i++)
			ibv_copy_path_rec_from_kern(&id->route.path_rec[i],
						    &resp->ib_route[i]);
	}

	memcpy(id->route.addr.addr.ibaddr.sgid.raw, resp->ib_route[0].sgid,
	       sizeof id->route.addr.addr.ibaddr.sgid);
	memcpy(id->route.addr.addr.ibaddr.dgid.raw, resp->ib_route[0].dgid,
	       sizeof id->route.addr.addr.ibaddr.dgid);
	id->route.addr.addr.ibaddr.pkey = resp->ib_route[0].pkey;
	memcpy(&id->route.addr.src_addr, &resp->src_addr,
	       sizeof resp->src_addr);
	memcpy(&id->route.addr.dst_addr, &resp->dst_addr,
	       sizeof resp->dst_addr);

	if (!id_priv->cma_dev && resp->node_guid) {
		ret = ucma_get_device(id_priv, resp->node_guid);
		if (ret)
			return ret;
		id_priv->id.port_num = resp->port_num;
	}

	return 0;
}
Example #14
0
static void __recvpath
psmi_mq_req_copy(psm_mq_req_t req, psm_epaddr_t epaddr, const void *buf, 
		 uint32_t nbytes)
{
    // recv_msglen may be changed by unexpected receive buf.
    uint32_t msglen_left = req->recv_msglen - req->recv_msgoff;
    uint32_t msglen_this = min(msglen_left, nbytes);
    uint8_t *msgptr = (uint8_t *)req->buf + req->recv_msgoff;
    
    VALGRIND_MAKE_MEM_DEFINED(msgptr, msglen_this);
    psmi_mq_mtucpy(msgptr, buf, msglen_this);
    
    req->recv_msgoff += msglen_this;
    req->send_msgoff += nbytes;
    return;
}
Example #15
0
void *
_cairo_freelist_alloc (cairo_freelist_t *freelist)
{
    if (freelist->first_free_node) {
        cairo_freelist_node_t *node;

        node = freelist->first_free_node;
        VG (VALGRIND_MAKE_MEM_DEFINED (node, sizeof (node->next)));
        freelist->first_free_node = node->next;
        VG (VALGRIND_MAKE_MEM_UNDEFINED (node, freelist->nodesize));

        return node;
    }

    return malloc (freelist->nodesize);
}
Example #16
0
int transport_read_layer(rdpTransport* transport, BYTE* data, int bytes)
{
	int read = 0;
	int status = -1;

	if (!transport->frontBio)
	{
		transport->layer = TRANSPORT_LAYER_CLOSED;
		return -1;
	}

	while (read < bytes)
	{
		status = BIO_read(transport->frontBio, data + read, bytes - read);

		if (status <= 0)
		{
			if (!transport->frontBio || !BIO_should_retry(transport->frontBio))
			{
				/* something unexpected happened, let's close */
				transport->layer = TRANSPORT_LAYER_CLOSED;
				return -1;
			}

			/* non blocking will survive a partial read */
			if (!transport->blocking)
				return read;

			/* blocking means that we can't continue until we have read the number of
			 * requested bytes */
			if (transport_wait_for_read(transport) < 0)
			{
				fprintf(stderr, "%s: error when selecting for read\n", __FUNCTION__);
				return -1;
			}
			continue;
		}

#ifdef HAVE_VALGRIND_MEMCHECK_H
		VALGRIND_MAKE_MEM_DEFINED(data + read, bytes - read);
#endif

		read += status;
	}

	return read;
}
Example #17
0
static UCS_F_ALWAYS_INLINE 
ucs_status_t uct_ud_mlx5_iface_poll_rx(uct_ud_mlx5_iface_t *iface)
{
    struct mlx5_cqe64 *cqe;
    uint16_t ci;
    uct_ib_iface_recv_desc_t *desc;
    uint32_t len;
    void *packet;
    ucs_status_t status;

    ci     = iface->rx.wq.cq_wqe_counter & iface->rx.wq.mask;
    packet = (void *)ntohll(iface->rx.wq.wqes[ci].addr);
    ucs_prefetch(packet + UCT_IB_GRH_LEN);
    desc   = (uct_ib_iface_recv_desc_t *)(packet - iface->super.super.config.rx_hdr_offset);

    cqe = uct_ib_mlx5_get_cqe(&iface->rx.cq, UCT_IB_MLX5_CQE64_SIZE_LOG);
    if (cqe == NULL) {
        status = UCS_ERR_NO_PROGRESS;
        goto out;
    }
    uct_ib_mlx5_log_cqe(cqe);
    ucs_assert(0 == (cqe->op_own & 
               (MLX5_INLINE_SCATTER_32|MLX5_INLINE_SCATTER_64)));
    ucs_assert(ntohs(cqe->wqe_counter) == iface->rx.wq.cq_wqe_counter);

    iface->super.rx.available++;
    iface->rx.wq.cq_wqe_counter++;

    len = ntohl(cqe->byte_cnt);
    VALGRIND_MAKE_MEM_DEFINED(packet, len);

    uct_ud_ep_process_rx(&iface->super,
                         (uct_ud_neth_t *)(packet + UCT_IB_GRH_LEN),
                         len - UCT_IB_GRH_LEN,
                         (uct_ud_recv_skb_t *)desc);
    status = UCS_OK;

out:
    if (iface->super.rx.available >= iface->super.config.rx_max_batch) {
        /* we need to try to post buffers always. Otherwise it is possible
         * to run out of rx wqes if receiver is slow and there are always
         * cqe to process
         */
        uct_ud_mlx5_iface_post_recv(iface);
    }
    return status;
}
Example #18
0
static ssize_t _tls_recv(int fs, void *buf, size_t count)
{
    ssize_t status = SSL_read(ssl, buf, count);

    /*
    ** Valgrind thinks buf still contains uninitialized
    ** bits after a call to SSL_read, whereas it's an
    ** expected behavior from OpenSSL. Tell Valgrind to
    ** forget about it.
    */
#ifdef VALGRIND_MAKE_MEM_DEFINED
    if (status > 0)
        VALGRIND_MAKE_MEM_DEFINED(buf, status);
#endif

    return status;
}
Example #19
0
File: signal.c Project: yugui/ruby
static int
signal_ignored(int sig)
{
    sighandler_t func;
#ifdef POSIX_SIGNAL
    struct sigaction old;
    (void)VALGRIND_MAKE_MEM_DEFINED(&old, sizeof(old));
    if (sigaction(sig, NULL, &old) < 0) return FALSE;
    func = old.sa_handler;
#else
    sighandler_t old = signal(sig, SIG_DFL);
    signal(sig, old);
    func = old;
#endif
    if (func == SIG_IGN) return 1;
    return func == sighandler ? 0 : -1;
}
Example #20
0
int rpc_out_channel_read(RpcOutChannel* outChannel, BYTE* data, int length)
{
	int status;
	status = BIO_read(outChannel->tls->bio, data, length);

	if (status > 0)
	{
#ifdef HAVE_VALGRIND_MEMCHECK_H
		VALGRIND_MAKE_MEM_DEFINED(data, status);
#endif
		return status;
	}

	if (BIO_should_retry(outChannel->tls->bio))
		return 0;

	return -1;
}
Example #21
0
/**
 *
 *
 *  @author FloSoft
 */
unsigned int VideoDriverWrapper::GenerateTexture()
{
    if(texture_pos >= texture_list.size())
    {
        fatal_error("texture-limit reached!!!!\n");
        return 0;
    }

    GLuint newTexture = 0;
    glGenTextures(1, &newTexture);
#if !defined(NDEBUG) && defined(HAVE_MEMCHECK_H)
    VALGRIND_MAKE_MEM_DEFINED(&newTexture, sizeof(newTexture));
#endif

    texture_list[texture_pos] = newTexture;

    return texture_list[texture_pos++];
}
Example #22
0
static int ucma_destroy_kern_id(int fd, uint32_t handle)
{
	struct ucma_abi_destroy_id_resp *resp;
	struct ucma_abi_destroy_id *cmd;
	void *msg;
	int ret, size;
	
	CMA_CREATE_MSG_CMD_RESP(msg, cmd, resp, UCMA_CMD_DESTROY_ID, size);
	cmd->id = handle;

	ret = write(fd, msg, size);
	if (ret != size)
		return (ret >= 0) ? ERR(ECONNREFUSED) : -1;

	VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);

	return resp->events_reported;
}
Example #23
0
void
psmi_mq_handle_rts_complete(psm_mq_req_t req) 
{
    psm_mq_t mq = req->mq;

    /* Stats on rendez-vous messages */
    psmi_mq_stats_rts_account(req);
    req->state = MQ_STATE_COMPLETE;
    mq_qq_append(&mq->completed_q, req);
#ifdef PSM_VALGRIND
    if (MQE_TYPE_IS_RECV(req->type))
	PSM_VALGRIND_DEFINE_MQ_RECV(req->buf, req->buf_len, req->recv_msglen);
    else
	VALGRIND_MAKE_MEM_DEFINED(req->buf, req->buf_len);
#endif
    _IPATH_VDBG("RTS complete, req=%p, recv_msglen = %d\n", 
		    req, req->recv_msglen);
    return;
}
Example #24
0
void _util_vec_delete(void *data, size_t line, const char *file) {
    char *ident = (char *)data - IDENT_SIZE;
    if (!strcmp(ident, IDENT_MEM)) {
        stat_mem_block_t *block = (stat_mem_block_t*)((char *)data - IDENT_MEM_TOP);
        VALGRIND_MAKE_MEM_DEFINED(block, sizeof(stat_mem_block_t));
        con_err("internal warning: invalid use of vec_free:\n");
        con_err("internal warning:    memory block last allocated (size: %u (bytes), at %s:%u)\n",
            (unsigned)block->size,
            block->file,
            (unsigned)block->line);
        con_err("internal warning:    released with with wrong routine at %s:%u\n", file, (unsigned)line);
        con_err("internal warning:    forwarding to mem_d, please fix it\n");
        VALGRIND_MAKE_MEM_NOACCESS(block, sizeof(stat_mem_block_t));
        mem_d(data);
        return;
    }
    /* forward */
    stat_mem_deallocate((void*)(ident - sizeof(vector_t)), line, file);
}
Example #25
0
void
_cairo_debug_check_image_surface_is_defined (const cairo_surface_t *surface)
{
    const cairo_image_surface_t *image = (cairo_image_surface_t *) surface;
    const uint8_t *bits;
    int row, width;

    if (surface == NULL)
	return;

    if (! RUNNING_ON_VALGRIND)
	return;

    bits = image->data;
    switch (image->format) {
    case CAIRO_FORMAT_A1:
	width = (image->width + 7)/8;
	break;
    case CAIRO_FORMAT_A8:
	width = image->width;
	break;
    case CAIRO_FORMAT_RGB16_565:
	width = image->width*2;
	break;
    case CAIRO_FORMAT_RGB24:
    case CAIRO_FORMAT_RGB30:
    case CAIRO_FORMAT_ARGB32:
	width = image->width*4;
	break;
    case CAIRO_FORMAT_INVALID:
    default:
	/* XXX compute width from pixman bpp */
	return;
    }

    for (row = 0; row < image->height; row++) {
	VALGRIND_CHECK_MEM_IS_DEFINED (bits, width);
	/* and then silence any future valgrind warnings */
	VALGRIND_MAKE_MEM_DEFINED (bits, width);
	bits += image->stride;
    }
}
Example #26
0
sparseset
sparseset_alloc (SPARSESET_ELT_TYPE n_elms)
{
  unsigned int n_bytes = sizeof (struct sparseset_def)
			 + ((n_elms - 1) * 2 * sizeof (SPARSESET_ELT_TYPE));

  sparseset set = XNEWVAR (struct sparseset_def, n_bytes);

  /* Mark the sparseset as defined to silence some valgrind uninitialized
     read errors when accessing set->sparse[n] when "n" is not, and never has
     been, in the set.  These uninitialized reads are expected, by design and
     harmless.  */
  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (set, n_bytes));

  set->dense = &(set->elms[0]);
  set->sparse = &(set->elms[n_elms]);
  set->size = n_elms;
  sparseset_clear (set);
  return set;
}
Example #27
0
static UCS_F_ALWAYS_INLINE ucs_status_t
uct_ud_verbs_iface_poll_rx(uct_ud_verbs_iface_t *iface)
{
    uct_ib_iface_recv_desc_t *desc;
    struct ibv_wc wc[UCT_IB_MAX_WC];
    int i, ret;
    char *packet;
    ucs_status_t status;


    ret = ibv_poll_cq(iface->super.super.recv_cq, UCT_IB_MAX_WC, wc);
    if (ret == 0) {
        status = UCS_ERR_NO_PROGRESS;
        goto out;
    } 
    if (ucs_unlikely(ret < 0)) {
        ucs_fatal("Failed to poll receive CQ");
    }

    for (i = 0; i < ret; ++i) {
        if (ucs_unlikely(wc[i].status != IBV_WC_SUCCESS)) {
            ucs_fatal("Receive completion with error: %s", 
                      ibv_wc_status_str(wc[i].status));
        }

        desc = (void*)wc[i].wr_id;
        ucs_trace_data("pkt rcvd: buf=%p len=%d", desc, wc[i].byte_len);
        packet = uct_ib_iface_recv_desc_hdr(&iface->super.super, desc);
        VALGRIND_MAKE_MEM_DEFINED(packet, wc[i].byte_len);

        uct_ud_ep_process_rx(&iface->super, 
                             (uct_ud_neth_t *)(packet + UCT_IB_GRH_LEN),
                             wc[i].byte_len - UCT_IB_GRH_LEN,
                             (uct_ud_recv_skb_t *)desc); 
    }
    iface->super.rx.available += ret;
    status = UCS_OK;
out:
    uct_ud_verbs_iface_post_recv(iface);
    return status;
}
Example #28
0
static ucs_status_t uct_xmpem_reg(void *address, size_t size, uct_mm_id_t *mmid_p)
{
    xpmem_segid_t segid;
    void *start, *end;

    start = ucs_align_down_pow2_ptr(address, ucs_get_page_size());
    end   = ucs_align_up_pow2_ptr(address + size, ucs_get_page_size());
    ucs_assert_always(start <= end);

    segid = xpmem_make(start, end - start, XPMEM_PERMIT_MODE, (void*)0666);
    VALGRIND_MAKE_MEM_DEFINED(&segid, sizeof(segid));
    if (segid < 0) {
        ucs_error("Failed to register %p..%p with xpmem: %m",
                  start, end);
        return UCS_ERR_IO_ERROR;
    }

    ucs_trace("xpmem registered %p..%p segment 0x%llx", start, end, segid);
    *mmid_p = segid;
    return UCS_OK;
}
Example #29
0
static void
readwrap(exa_ringbuf_t *rng, char *buf, size_t nbytes)
{
  char *data = rng->data;
  int n, p;

  EXA_ASSERT(nbytes < rng->size);

#ifdef HAVE_VALGRIND_MEMCHECK_H
#  ifdef VALGRIND_MAKE_MEM_DEFINED /* Valgrind >= 3.2 */
      VALGRIND_MAKE_MEM_DEFINED(rng->data, rng->size);
#  else /* Valgrind < 3.2 */
      VALGRIND_MAKE_READABLE(rng->data, rng->size);
#  endif
#endif

  p = rng->pRd;

  if (p + nbytes >= rng->size)
    {
      /* wrap around */
      n = rng->size - p;
      if (buf)
	{
	  memcpy(buf, data + p, n);
	  buf += n;
	}
      nbytes -= n;
      p = 0;
    }

  /* direct read */
  if (buf)
    memcpy(buf, data + p, nbytes);
  rng->pRd = p + nbytes;

  EXA_ASSERT(rng->pRd < rng->size);
  EXA_ASSERT(rng->pWr < rng->size);
}
Example #30
0
/*
 * Add a snapshot inval entry
 */
static void
AddSnapshotInvalidationMessage(InvalidationListHeader *hdr,
							   Oid dbId, Oid relId)
{
	SharedInvalidationMessage msg;

	/* Don't add a duplicate item */
	/* We assume dbId need not be checked because it will never change */
	ProcessMessageList(hdr->rclist,
					   if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
						   msg->sn.relId == relId)
					   return);

	/* OK, add the item */
	msg.sn.id = SHAREDINVALSNAPSHOT_ID;
	msg.sn.dbId = dbId;
	msg.sn.relId = relId;
	/* check AddCatcacheInvalidationMessage() for an explanation */
	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));

	AddInvalidationMessage(&hdr->rclist, &msg);
}