static INLINE void
_fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
                      struct fenced_buffer *fenced_buf)
{
   struct pipe_winsys *winsys = fenced_list->winsys;

   assert(fenced_buf->fence);
   assert(fenced_buf->list == fenced_list);
   
   winsys->fence_reference(winsys, &fenced_buf->fence, NULL);
   fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
   
   assert(fenced_buf->head.prev);
   assert(fenced_buf->head.next);
   LIST_DEL(&fenced_buf->head);
#ifdef DEBUG
   fenced_buf->head.prev = NULL;
   fenced_buf->head.next = NULL;
#endif
   
   assert(fenced_list->numDelayed);
   --fenced_list->numDelayed;
   
   if(!fenced_buf->base.base.refcount)
      _fenced_buffer_destroy(fenced_buf);
}
Exemple #2
0
/**
 * Delete a session and its associated queues.
 * @param   ses         session
 */
static void unvme_session_delete(unvme_session_t* ses)
{
    if (ses->id > 0) {
        pthread_spin_lock(&ses->iomem.lock);
        if (ses->iomem.size) {
            int i;
            for (i = 0; i < ses->iomem.count; i++) {
                (void) vfio_dma_free(ses->iomem.map[i]);
            }
            ses->iomem.size = ses->iomem.count = 0;
            free(ses->iomem.map);
        }
        pthread_spin_unlock(&ses->iomem.lock);
        pthread_spin_destroy(&ses->iomem.lock);
    }

    if (ses == ses->next) {
        DEBUG_FN("%x: adminq", unvme_dev.vfiodev->pci);
        unvme_adminq_delete(ses->queues);
    } else {
        DEBUG_FN("%x: q=%d-%d", unvme_dev.vfiodev->pci, ses->id,
                                ses->id + ses->qcount -1);
        while (--ses->qcount >= 0) {
            unvme_queue_t* ioq = &ses->queues[ses->qcount];
            if (ioq->ses) unvme_ioq_delete(ioq);
        }
    }
    LIST_DEL(unvme_dev.ses, ses);
    free(ses);
}
Exemple #3
0
int
usb_close(usb_dev_handle * udev)
{
	struct usb_device *dev;
	int err;

	err = libusb20_dev_close((void *)udev);

	if (err)
		return (-1);

	if (usb_backend != NULL) {
		/*
		 * Enqueue USB device to backend queue so that it gets freed
		 * when the backend is re-scanned:
		 */
		libusb20_be_enqueue_device(usb_backend, (void *)udev);
	} else {
		/*
		 * The backend is gone. Free device data so that we
		 * don't start leaking memory!
		 */
		dev = usb_device(udev);
		libusb20_dev_free((void *)udev);
		LIST_DEL(usb_global_bus.devices, dev);
		free(dev);
	}
	return (0);
}
Exemple #4
0
/* This function tries to resume a temporarily disabled listener. Paused, full,
 * limited and disabled listeners are handled, which means that this function
 * may replace enable_listener(). The resulting state will either be LI_READY
 * or LI_FULL. 0 is returned in case of failure to resume (eg: dead socket).
 */
int resume_listener(struct listener *l)
{
	if (l->state < LI_PAUSED)
		return 0;

	if (l->proto->sock_prot == IPPROTO_TCP &&
	    l->state == LI_PAUSED &&
	    listen(l->fd, l->backlog ? l->backlog : l->maxconn) != 0)
		return 0;

	if (l->state == LI_READY)
		return 1;

	if (l->state == LI_LIMITED)
		LIST_DEL(&l->wait_queue);

	if (l->nbconn >= l->maxconn) {
		l->state = LI_FULL;
		return 1;
	}

	fd_want_recv(l->fd);
	l->state = LI_READY;
	return 1;
}
/**
 * Patch up the upload DMA command reserved by svga_buffer_upload_command
 * with the final ranges.
 */
void
svga_buffer_upload_flush(struct svga_context *svga,
                         struct svga_buffer *sbuf)
{
   SVGA3dCopyBox *boxes;
   unsigned i;
   struct pipe_resource *dummy;

   if (!sbuf->dma.pending) {
      return;
   }

   assert(sbuf->handle);
   assert(sbuf->hwbuf);
   assert(sbuf->map.num_ranges);
   assert(sbuf->dma.svga == svga);
   assert(sbuf->dma.boxes);

   /*
    * Patch the DMA command with the final copy box.
    */

   SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);

   boxes = sbuf->dma.boxes;
   for (i = 0; i < sbuf->map.num_ranges; ++i) {
      SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
               sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);

      boxes[i].x = sbuf->map.ranges[i].start;
      boxes[i].y = 0;
      boxes[i].z = 0;
      boxes[i].w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
      boxes[i].h = 1;
      boxes[i].d = 1;
      boxes[i].srcx = sbuf->map.ranges[i].start;
      boxes[i].srcy = 0;
      boxes[i].srcz = 0;
   }

   sbuf->map.num_ranges = 0;

   assert(sbuf->head.prev && sbuf->head.next);
   LIST_DEL(&sbuf->head);
#ifdef DEBUG
   sbuf->head.next = sbuf->head.prev = NULL;
#endif
   sbuf->dma.pending = FALSE;
   sbuf->dma.flags.discard = FALSE;
   sbuf->dma.flags.unsynchronized = FALSE;

   sbuf->dma.svga = NULL;
   sbuf->dma.boxes = NULL;

   /* Decrement reference count (and potentially destroy) */
   dummy = &sbuf->b.b;
   pipe_resource_reference(&dummy, NULL);
}
static _ies_task_job_handle_queue()
{
    /*----------------------------------------------------------------*/
    /* Local Variables                                                */
    /*----------------------------------------------------------------*/
    srv_ies_job             *pJob;
    SRV_IES_JOB_STATE_ENUM  state;

    /*----------------------------------------------------------------*/
    /* Code Body                                                      */
    /*----------------------------------------------------------------*/
    pJob = NULL;

    if (LIST_EMPTY(&(g_ies_task_context.normal)))
    {
        if (!LIST_EMPTY(&(g_ies_task_context.lowest)))
        {
            pJob = (srv_ies_job*)(g_ies_task_context.lowest.pNext);
            LIST_DEL((srv_ies_list_head_struct*)pJob);
        }
    }
    else
    {
        pJob = (srv_ies_job*)(g_ies_task_context.normal.pNext);
        LIST_DEL(((srv_ies_list_head_struct*)pJob));
    }

    if (pJob)
    {
        kal_take_mutex(g_srv_ies_job_mutex);
        state = pJob->state;
        kal_give_mutex(g_srv_ies_job_mutex);

        ASSERT(SRV_IES_JOB_STATE_FINISHED != pJob->state);

        if (SRV_IES_JOB_STATE_CANCELLED != state)
        {
            g_ies_task_context.pJob = pJob;
            if (_ies_task_job_handle_start(g_ies_task_context.pJob))
            {
                g_ies_task_context.pJob = NULL;
            }
		}
    }
}
Exemple #7
0
static void *gp_worker_main(void *pvt)
{
    struct gp_thread *t = (struct gp_thread *)pvt;
    struct gp_query *q = NULL;
    char dummy = 0;
    int ret;

    while (!t->pool->shutdown) {

        /* ======> COND_MUTEX */
        pthread_mutex_lock(&t->cond_mutex);
        while (t->query == NULL) {
            /* wait for next query */
            pthread_cond_wait(&t->cond_wakeup, &t->cond_mutex);
            if (t->pool->shutdown) {
                pthread_exit(NULL);
            }
        }

        /* grab the query off the shared pointer */
        q = t->query;
        t->query = NULL;

        /* <====== COND_MUTEX */
        pthread_mutex_unlock(&t->cond_mutex);

        /* handle the client request */
        gp_handle_query(t->pool, q);

        /* now get lock on main queue, to play with the reply list */
        /* ======> POOL LOCK */
        pthread_mutex_lock(&t->pool->lock);

        /* put back query so that dispatcher can send reply */
        q->next = t->pool->reply_list;
        t->pool->reply_list = q;

        /* add us back to the free list but only if we are not
         * shutting down */
        if (!t->pool->shutdown) {
            LIST_DEL(t->pool->busy_list, t);
            LIST_ADD(t->pool->free_list, t);
        }

        /* <====== POOL LOCK */
        pthread_mutex_unlock(&t->pool->lock);

        /* and wake up dispatcher so it will handle it */
        ret = write(t->pool->sig_pipe[1], &dummy, 1);
        if (ret == -1) {
            GPERROR("Failed to signal dispatcher!");
        }
    }

    pthread_exit(NULL);
}
Exemple #8
0
void *mm_request_bf(int n, int *steps)
{
    void *p, *prev, *next;
    void *best;
    unsigned int best_size;
    unsigned int size;
    void *new_p;
    if (steps != NULL) *steps = 0;
    if (n <= 0) return NULL;
    best = NULL;
    best_size = INT_MAX;
    LIST_FOR_EACH(FREE_LIST_HEAD, p) {
        if (steps != NULL) (*steps)++;
        size = GET_SIZE_FRONT(p);
        if (size >= (unsigned int)n) {
            best_size = size;
            best = p;
            break;
        }
    }
    if (best == NULL) return NULL;
    if (best_size >= (unsigned int)n && best_size < n + HEAD_SIZE + FOOT_SIZE + 1) { //刚好相等 或 多出来的不足以存放(head+foot+1),就把整块给用户
        LIST_DEL(best);
        SET_TAG_FRONT(best, TAG_USED);
        return GET_USER_START_FRONT(best);
    } else if (best_size >= n + HEAD_SIZE + FOOT_SIZE + 1) { //足够多,可以分成两块,一块给用户,另一块空闲
        prev = GET_PREV_FRONT(best);
        next = GET_NEXT_FRONT(best);
        SET_SIZE_FRONT(best, n);
        SET_TAG_FRONT(best, TAG_USED);
        new_p = GET_BACK_FRONT(best);
        SET_SIZE_FRONT(new_p, best_size - n - FOOT_SIZE - HEAD_SIZE);
        SET_TAG_FRONT(new_p, TAG_FREE);
        SET_NEXT_FRONT(prev, new_p);
        SET_PREV_FRONT(new_p, prev);
        SET_PREV_FRONT(next, new_p);
        SET_NEXT_FRONT(new_p, next);
        LIST_DEL(new_p);
        list_add_bf(new_p, FREE_LIST_HEAD);
        return GET_USER_START_FRONT(best);
    }
    return NULL;
}
Exemple #9
0
/* Marks a ready listener as full so that the session code tries to re-enable
 * it upon next close() using resume_listener().
 */
void listener_full(struct listener *l)
{
	if (l->state >= LI_READY) {
		if (l->state == LI_LIMITED)
			LIST_DEL(&l->wait_queue);

		EV_FD_CLR(l->fd, DIR_RD);
		l->state = LI_FULL;
	}
}
Exemple #10
0
/* This function removes the specified listener's file descriptor from the
 * polling lists if it is in the LI_READY or in the LI_FULL state. The listener
 * enters LI_LISTEN.
 */
void disable_listener(struct listener *listener)
{
	if (listener->state < LI_READY)
		return;
	if (listener->state == LI_READY)
		EV_FD_CLR(listener->fd, DIR_RD);
	if (listener->state == LI_LIMITED)
		LIST_DEL(&listener->wait_queue);
	listener->state = LI_LISTEN;
}
Exemple #11
0
 ~ipcwbuf_s()
 {
     while (first_a)
     {
         ipcwspace *spc = first_a;
         LIST_DEL(spc, first_a, last_a, prev, next);
         spc->~ipcwspace();
         ph_allocator::mf(spc);
     }
 }
Exemple #12
0
/* Marks a ready listener as full so that the session code tries to re-enable
 * it upon next close() using resume_listener().
 */
void listener_full(struct listener *l)
{
	if (l->state >= LI_READY) {
		if (l->state == LI_LIMITED)
			LIST_DEL(&l->wait_queue);

		fd_stop_recv(l->fd);
		l->state = LI_FULL;
	}
}
Exemple #13
0
/* This function removes the specified listener's file descriptor from the
 * polling lists if it is in the LI_READY or in the LI_FULL state. The listener
 * enters LI_LISTEN.
 */
void disable_listener(struct listener *listener)
{
	if (listener->state < LI_READY)
		return;
	if (listener->state == LI_READY)
		fd_stop_recv(listener->fd);
	if (listener->state == LI_LIMITED)
		LIST_DEL(&listener->wait_queue);
	listener->state = LI_LISTEN;
}
Exemple #14
0
/*
 * Detaches pending connection <p>, decreases the pending count, and frees
 * the pending connection. The connection might have been queued to a specific
 * server as well as to the proxy. The session also gets marked unqueued.
 */
void pendconn_free(struct pendconn *p)
{
    LIST_DEL(&p->list);
    p->sess->pend_pos = NULL;
    if (p->srv)
        p->srv->nbpend--;
    else
        p->sess->be->nbpend--;
    p->sess->be->totpend--;
    pool_free2(pool2_pendconn, p);
}
Exemple #15
0
void
req_acl_free(struct list *r) {
	struct req_acl_rule *tr, *pr;

	list_for_each_entry_safe(pr, tr, r, list) {
		LIST_DEL(&pr->list);
		if (pr->action == PR_REQ_ACL_ACT_HTTP_AUTH)
			free(pr->http_auth.realm);

		free(pr);
	}
Exemple #16
0
/**
 * Delete an I/O queue.
 * @param   ioq         io queue
 */
static void unvme_ioq_delete(unvme_queue_t* ioq)
{
    DEBUG_FN("%x: q=%d", unvme_dev.vfiodev->pci, ioq->id);
    if (ioq->nvq) (void) nvme_delete_ioq(ioq->nvq);
    if (ioq->prplist) (void) vfio_dma_free(ioq->prplist);
    if (ioq->cqdma) (void) vfio_dma_free(ioq->cqdma);
    if (ioq->sqdma) (void) vfio_dma_free(ioq->sqdma);
    if (ioq->cidmask) free(ioq->cidmask);

    unvme_desc_t* desc;
    while ((desc = ioq->desclist) != NULL) {
        LIST_DEL(ioq->desclist, desc);
        free(desc);
    }
    while ((desc = ioq->descfree) != NULL) {
        LIST_DEL(ioq->descfree, desc);
        free(desc);
    }

    unvme_dev.numioqs--;
}
Exemple #17
0
void account_list_free(struct list_head* list)
{
  struct list_head* get = NULL;
  struct list_head* n = NULL;

  list_iterate_safe(get, n, list)
  {
    struct account_desc* tmp = list_get(get, struct account_desc, list);
    LIST_DEL(&tmp->list);
    account_desc_free(&tmp);
  }
}
/**
 * Actually destroy the buffer.
 */
static INLINE void
_pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
{
   struct pb_cache_manager *mgr = buf->mgr;

   LIST_DEL(&buf->head);
   assert(mgr->numDelayed);
   --mgr->numDelayed;
   assert(!pipe_is_referenced(&buf->base.base.reference));
   pb_reference(&buf->buffer, NULL);
   FREE(buf);
}
void
MXUserRemoveFromList(MXUserHeader *header)  // IN/OUT:
{
    MXRecLock *listLock = MXUserInternalSingleton(&mxLockMemPtr);

    /* Tolerate a failure. This is too low down to log */
    if (listLock) {
        MXRecLockAcquire(listLock,
                         NULL);  // non-stats
        LIST_DEL(&header->item, &mxUserLockList);
        MXRecLockRelease(listLock);
    }
}
static void
nv30_query_object_del(struct nv30_screen *screen, struct nv30_query_object **po)
{
   struct nv30_query_object *qo = *po; *po = NULL;
   if (qo) {
      volatile uint32_t *ntfy = nv30_ntfy(screen, qo);
      while (ntfy[3] & 0xff000000) {
      }
      nouveau_heap_free(&qo->hw);
      LIST_DEL(&qo->list);
      FREE(qo);
   }
}
/**
 * Patch up the upload DMA command reserved by svga_buffer_upload_command
 * with the final ranges.
 */
static void
svga_buffer_upload_flush(struct svga_context *svga,
                         struct svga_buffer *sbuf)
{
   SVGA3dCopyBox *boxes;
   unsigned i;

   assert(sbuf->handle); 
   assert(sbuf->hwbuf);
   assert(sbuf->map.num_ranges);
   assert(sbuf->dma.svga == svga);
   assert(sbuf->dma.boxes);
   
   /*
    * Patch the DMA command with the final copy box.
    */

   SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);

   boxes = sbuf->dma.boxes;
   for(i = 0; i < sbuf->map.num_ranges; ++i) {
      SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
               sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);

      boxes[i].x = sbuf->map.ranges[i].start;
      boxes[i].y = 0;
      boxes[i].z = 0;
      boxes[i].w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
      boxes[i].h = 1;
      boxes[i].d = 1;
      boxes[i].srcx = sbuf->map.ranges[i].start;
      boxes[i].srcy = 0;
      boxes[i].srcz = 0;
   }

   sbuf->map.num_ranges = 0;

   assert(sbuf->head.prev && sbuf->head.next);
   LIST_DEL(&sbuf->head);
#ifdef DEBUG
   sbuf->head.next = sbuf->head.prev = NULL; 
#endif
   sbuf->dma.pending = FALSE;

   sbuf->dma.svga = NULL;
   sbuf->dma.boxes = NULL;

   /* Decrement reference count */
   pipe_reference(&(sbuf->b.b.reference), NULL);
   sbuf = NULL;
}
Exemple #22
0
/**
 * Actually destroy the buffer.
 */
static void
destroy_buffer_locked(struct pb_cache_entry *entry)
{
   struct pb_cache *mgr = entry->mgr;

   assert(!pipe_is_referenced(&entry->buffer->reference));
   if (entry->head.next) {
      LIST_DEL(&entry->head);
      assert(mgr->num_buffers);
      --mgr->num_buffers;
      mgr->cache_size -= entry->buffer->size;
   }
   entry->mgr->destroy_buffer(entry->buffer);
}
Exemple #23
0
void applet_run_active()
{
	struct appctx *curr;
	struct stream_interface *si;

	if (LIST_ISEMPTY(&applet_active_queue))
		return;

	/* move active queue to run queue */
	applet_active_queue.n->p = &applet_cur_queue;
	applet_active_queue.p->n = &applet_cur_queue;

	applet_cur_queue = applet_active_queue;
	LIST_INIT(&applet_active_queue);

	/* The list is only scanned from the head. This guarantees that if any
	 * applet removes another one, there is no side effect while walking
	 * through the list.
	 */
	while (!LIST_ISEMPTY(&applet_cur_queue)) {
		curr = LIST_ELEM(applet_cur_queue.n, typeof(curr), runq);
		si = curr->owner;

		/* Now we'll try to allocate the input buffer. We wake up the
		 * applet in all cases. So this is the applet responsibility to
		 * check if this buffer was allocated or not. This let a chance
		 * for applets to do some other processing if needed. */
		if (!channel_alloc_buffer(si_ic(si), &curr->buffer_wait))
			si_applet_cant_put(si);

		/* We always pretend the applet can't get and doesn't want to
		 * put, it's up to it to change this if needed. This ensures
		 * that one applet which ignores any event will not spin.
		 */
		si_applet_cant_get(si);
		si_applet_stop_put(si);

		curr->applet->fct(curr);
		si_applet_wake_cb(si);
		channel_release_buffer(si_ic(si), &curr->buffer_wait);

		if (applet_cur_queue.n == &curr->runq) {
			/* curr was left in the list, move it back to the active list */
			LIST_DEL(&curr->runq);
			LIST_ADDQ(&applet_active_queue, &curr->runq);
		}
	}
}
Exemple #24
0
/**
 * Put a descriptor entry back by moving it from the use to the free list.
 * @param   desc    descriptor
 */
static void unvme_put_desc(unvme_desc_t* desc)
{
    unvme_queue_t* ioq = desc->ioq;

    if (ioq->descnext == desc) {
        if (desc != desc->next) ioq->descnext = desc->next;
        else ioq->descnext = NULL;
    }

    LIST_DEL(ioq->desclist, desc);
    memset(desc, 0, sizeof(unvme_desc_t) + ioq->ses->masksize);
    desc->ioq = ioq;
    LIST_ADD(ioq->descfree, desc);

    ioq->desccount--;
}
/**
 * Add the buffer to the fenced list.
 *
 * Reference count should be incremented before calling this function.
 */
static INLINE void
fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
                         struct fenced_buffer *fenced_buf)
{
   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
   assert(fenced_buf->fence);

   p_atomic_inc(&fenced_buf->base.base.reference.count);

   LIST_DEL(&fenced_buf->head);
   assert(fenced_mgr->num_unfenced);
   --fenced_mgr->num_unfenced;
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
   ++fenced_mgr->num_fenced;
}
Exemple #26
0
void
debug_free(const char *file, unsigned line, const char *function,
           void *ptr) 
{
   struct debug_memory_header *hdr;
   struct debug_memory_footer *ftr;
   
   if(!ptr)
      return;
   
   hdr = header_from_data(ptr);
   if(hdr->magic != DEBUG_MEMORY_MAGIC) {
      debug_printf("%s:%u:%s: freeing bad or corrupted memory %p\n",
                   file, line, function,
                   ptr);
      debug_assert(0);
      return;
   }

   ftr = footer_from_header(hdr);
   if(ftr->magic != DEBUG_MEMORY_MAGIC) {
      debug_printf("%s:%u:%s: buffer overflow %p\n",
                   hdr->file, hdr->line, hdr->function,
                   ptr);
      debug_assert(0);
   }

#if DEBUG_FREED_MEMORY
   /* Check for double-free */
   assert(!hdr->freed);
   /* Mark the block as freed but don't really free it */
   hdr->freed = TRUE;
   /* Save file/line where freed */
   hdr->file = file;
   hdr->line = line;
   /* set freed memory to special value */
   memset(ptr, DEBUG_FREED_BYTE, hdr->size);
#else
   pipe_mutex_lock(list_mutex);
   LIST_DEL(&hdr->head);
   pipe_mutex_unlock(list_mutex);
   hdr->magic = 0;
   ftr->magic = 0;
   
   os_free(hdr);
#endif
}
Exemple #27
0
static INLINE void
_fenced_buffer_add(struct fenced_buffer *fenced_buf)
{
   struct fenced_buffer_list *fenced_list = fenced_buf->list;

   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
   assert(fenced_buf->fence);

#ifdef DEBUG
   LIST_DEL(&fenced_buf->head);
   assert(fenced_list->numUnfenced);
   --fenced_list->numUnfenced;
#endif
   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
   ++fenced_list->numDelayed;
}
static INLINE void
fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
                             struct fenced_buffer *fenced_buf)
{
   assert(!pipe_is_referenced(&fenced_buf->base.reference));

   assert(!fenced_buf->fence);
   assert(fenced_buf->head.prev);
   assert(fenced_buf->head.next);
   LIST_DEL(&fenced_buf->head);
   assert(fenced_mgr->num_unfenced);
   --fenced_mgr->num_unfenced;

   fenced_buffer_destroy_gpu_storage_locked(fenced_buf);

   FREE(fenced_buf);
}
Exemple #29
0
/* This function tries to resume a temporarily disabled listener. Paused, full,
 * limited and disabled listeners are handled, which means that this function
 * may replace enable_listener(). The resulting state will either be LI_READY
 * or LI_FULL. 0 is returned in case of failure to resume (eg: dead socket).
 * Listeners bound to a different process are not woken up unless we're in
 * foreground mode. If the listener was only in the assigned state, it's totally
 * rebound. This can happen if a pause() has completely stopped it. If the
 * resume fails, 0 is returned and an error might be displayed.
 */
int resume_listener(struct listener *l)
{
	if (l->state == LI_ASSIGNED) {
		char msg[100];
		int err;

		err = l->proto->bind(l, msg, sizeof(msg));
		if (err & ERR_ALERT)
			Alert("Resuming listener: %s\n", msg);
		else if (err & ERR_WARN)
			Warning("Resuming listener: %s\n", msg);

		if (err & (ERR_FATAL | ERR_ABORT))
			return 0;
	}

	if (l->state < LI_PAUSED)
		return 0;

	if ((global.mode & (MODE_DAEMON | MODE_SYSTEMD)) &&
	    l->bind_conf->bind_proc &&
	    !(l->bind_conf->bind_proc & (1UL << (relative_pid - 1))))
		return 0;

	if (l->proto->sock_prot == IPPROTO_TCP &&
	    l->state == LI_PAUSED &&
	    listen(l->fd, l->backlog ? l->backlog : l->maxconn) != 0)
		return 0;

	if (l->state == LI_READY)
		return 1;

	if (l->state == LI_LIMITED)
		LIST_DEL(&l->wait_queue);

	if (l->nbconn >= l->maxconn) {
		l->state = LI_FULL;
		return 1;
	}

	fd_want_recv(l->fd);
	l->state = LI_READY;
	return 1;
}
Exemple #30
0
/*
 * Parse the log_format string and fill a linked list.
 * Variable name are preceded by % and composed by characters [a-zA-Z0-9]* : %varname
 * You can set arguments using { } : %{many arguments}varname.
 * The curproxy->conf.args.ctx must be set by the caller.
 *
 *  str: the string to parse
 *  curproxy: the proxy affected
 *  list_format: the destination list
 *  options: LOG_OPT_* to force on every node
 *  cap: all SMP_VAL_* flags supported by the consumer
 */
void parse_logformat_string(const char *fmt, struct proxy *curproxy, struct list *list_format, int options, int cap)
{
	char *sp, *str, *backfmt; /* start pointer for text parts */
	char *arg = NULL; /* start pointer for args */
	char *var = NULL; /* start pointer for vars */
	int arg_len = 0;
	int var_len = 0;
	int cformat; /* current token format */
	int pformat; /* previous token format */
	struct logformat_node *tmplf, *back;

	sp = str = backfmt = strdup(fmt);
	curproxy->to_log |= LW_INIT;

	/* flush the list first. */
	list_for_each_entry_safe(tmplf, back, list_format, list) {
		LIST_DEL(&tmplf->list);
		free(tmplf);
	}