Ejemplo n.º 1
0
int swRingQueue_push(swRingQueue *queue, void * ele)
{
	if (!(queue->num < queue->size))
	{
		return -1;
	}
	int cur_tail_index = queue->tail;
	char * cur_tail_flag_index = queue->flags + cur_tail_index;
	//TODO Scheld
	while (!sw_atomic_cmp_set(cur_tail_flag_index, 0, 1))
	{
		cur_tail_index = queue->tail;
		cur_tail_flag_index = queue->flags + cur_tail_index;
	}

	// 两个入队线程之间的同步
	//TODO 取模操作可以优化
	int update_tail_index = (cur_tail_index + 1) % queue->size;

	// 如果已经被其他的线程更新过,则不需要更新;
	// 否则,更新为 (cur_tail_index+1) % size;
	sw_atomic_cmp_set(&queue->tail, cur_tail_index, update_tail_index);

	// 申请到可用的存储空间
	*(queue->data + cur_tail_index) = ele;

	sw_atomic_fetch_add(cur_tail_flag_index, 1);
	sw_atomic_fetch_add(&queue->num, 1);
	return 0;
}
Ejemplo n.º 2
0
static void* swRingBuffer_alloc(swMemoryPool *pool, uint32_t size)
{
    assert(size > 0);

    swRingBuffer *object = pool->object;
    swRingBuffer_item *item;
    uint32_t capacity;

    uint32_t alloc_size = size + sizeof(swRingBuffer_item);

    if (object->free_count > 0)
    {
        swRingBuffer_collect(object);
    }

    if (object->status == 0)
    {
        if (object->alloc_offset + alloc_size >= object->size)
        {
            uint32_t skip_n = object->size - object->alloc_offset;

            item = object->memory + object->alloc_offset;
            item->lock = 0;
            item->length = skip_n - sizeof(swRingBuffer_item);

            sw_atomic_t *free_count = &object->free_count;
            sw_atomic_fetch_add(free_count, 1);

            object->alloc_offset = 0;
            object->status = 1;

            capacity = object->collect_offset - object->alloc_offset;
        }
        else
        {
            capacity = object->size - object->alloc_offset;
        }
    }
    else
    {
        capacity = object->collect_offset - object->alloc_offset;
    }

    if (capacity < alloc_size)
    {
        return NULL;
    }

    item = object->memory + object->alloc_offset;
    item->lock = 1;
    item->length = size;
    item->index = object->alloc_count;

    object->alloc_offset += alloc_size;
    object->alloc_count ++;

    swDebug("alloc: ptr=%d", (void *)item->data - object->memory);

    return item->data;
}
Ejemplo n.º 3
0
/**
 * dispatch data to worker
 */
int swProcessPool_dispatch(swProcessPool *pool, swEventData *data, int *dst_worker_id)
{
    int ret = 0;
    swWorker *worker;

    if (*dst_worker_id < 0)
    {
        *dst_worker_id = swProcessPool_schedule(pool);
    }

    *dst_worker_id += pool->start_id;
    worker = swProcessPool_get_worker(pool, *dst_worker_id);

    int sendn = sizeof(data->info) + data->info.len;
    ret = swWorker_send2worker(worker, data, sendn, SW_PIPE_MASTER | SW_PIPE_NONBLOCK);

    if (ret >= 0)
    {
        sw_atomic_fetch_add(&worker->tasking_num, 1);
    }
    else
    {
        swWarn("send %d bytes to worker#%d failed.", sendn, *dst_worker_id);
    }

    return ret;
}
Ejemplo n.º 4
0
PHP_METHOD(swoole_atomic, add)
{
    long add_value = 1;
    sw_atomic_t *atomic = swoole_get_object(getThis());

    if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &add_value) == FAILURE)
    {
        RETURN_FALSE;
    }

    RETURN_LONG(sw_atomic_fetch_add(atomic, (uint32_t) add_value));
}
Ejemplo n.º 5
0
SWINLINE static int swFactoryProcess_schedule(swFactoryProcess *object, swEventData *data)
{
	swServer *serv = SwooleG.serv;
	int target_worker_id = 0;

	//轮询
	if (serv->dispatch_mode == SW_DISPATCH_ROUND)
	{
		target_worker_id = (object->worker_pti++) % serv->worker_num;
	}
	//使用fd取摸来散列
	else if (serv->dispatch_mode == SW_DISPATCH_FDMOD)
	{
		//Fixed #48. 替换一下顺序
		//udp use remote port
		if (data->info.type == SW_EVENT_UDP || data->info.type == SW_EVENT_UDP6 || data->info.type == SW_EVENT_UNIX_DGRAM)
		{
			target_worker_id = ((uint16_t) data->info.from_id) % serv->worker_num;
		}
		else
		{
			target_worker_id = data->info.fd % serv->worker_num;
		}
	}
	//使用抢占式队列(IPC消息队列)分配
	else
	{
		if (serv->ipc_mode == SW_IPC_MSGQUEUE)
		{
			//msgsnd参数必须>0
			//worker进程中正确的mtype应该是pti + 1
			target_worker_id = serv->worker_num;
		}
		else
		{
			int i;
			atomic_t *round = &SwooleTG.worker_round_i;
			for (i = 0; i < serv->worker_num; i++)
			{
				sw_atomic_fetch_add(round, 1);
				target_worker_id = (*round) % serv->worker_num;

				if (object->workers_status[target_worker_id] == SW_WORKER_IDLE)
				{
					break;
				}
			}
			swTrace("schedule=%d|round=%d\n", target_worker_id, *round);
		}
	}
	return target_worker_id;
}
Ejemplo n.º 6
0
static sw_inline int swProcessPool_schedule(swProcessPool *pool)
{
    int i, target_worker_id = 0;
    int run_worker_num = pool->run_worker_num;

    for (i = 0; i < run_worker_num + 1; i++)
    {
        target_worker_id = sw_atomic_fetch_add(&pool->round_id, 1) % run_worker_num;
        if (pool->workers[target_worker_id].status == SW_WORKER_IDLE)
        {
            break;
        }
    }
    return target_worker_id;
}
Ejemplo n.º 7
0
/**
 * dispatch data to worker
 */
int swProcessPool_dispatch(swProcessPool *pool, swEventData *data, int *dst_worker_id)
{
    int ret = 0;
    swWorker *worker;

    if (pool->use_socket)
    {
        swStream *stream = swStream_new(pool->stream->socket_file, 0, SW_SOCK_UNIX_STREAM);
        if (stream == NULL)
        {
            return SW_ERR;
        }
        stream->response = NULL;
        stream->session_id = 0;
        if (swStream_send(stream, (char*) data, sizeof(data->info) + data->info.len) < 0)
        {
            stream->cancel = 1;
            return SW_ERR;
        }
        return SW_OK;
    }

    if (*dst_worker_id < 0)
    {
        *dst_worker_id = swProcessPool_schedule(pool);
    }

    *dst_worker_id += pool->start_id;
    worker = swProcessPool_get_worker(pool, *dst_worker_id);

    int sendn = sizeof(data->info) + data->info.len;
    ret = swWorker_send2worker(worker, data, sendn, SW_PIPE_MASTER | SW_PIPE_NONBLOCK);

    if (ret >= 0)
    {
        sw_atomic_fetch_add(&worker->tasking_num, 1);
    }
    else
    {
        swWarn("send %d bytes to worker#%d failed.", sendn, *dst_worker_id);
    }

    return ret;
}
Ejemplo n.º 8
0
/**
 * dispatch data to worker
 */
int swProcessPool_dispatch_blocking(swProcessPool *pool, swEventData *data, int *dst_worker_id)
{
    int ret = 0;
    int sendn = sizeof(data->info) + data->info.len;

    if (pool->use_socket)
    {
        swClient _socket;
        if (swClient_create(&_socket, SW_SOCK_UNIX_STREAM, SW_SOCK_SYNC) < 0)
        {
            return SW_ERR;
        }
        if (_socket.connect(&_socket, pool->stream->socket_file, 0, -1, 0) < 0)
        {
            return SW_ERR;
        }
        if (_socket.send(&_socket, (void*) data, sendn, 0) < 0)
        {
            return SW_ERR;
        }
        _socket.close(&_socket);
        return SW_OK;
    }

    if (*dst_worker_id < 0)
    {
        *dst_worker_id = swProcessPool_schedule(pool);
    }

    *dst_worker_id += pool->start_id;
    swWorker *worker = swProcessPool_get_worker(pool, *dst_worker_id);

    ret = swWorker_send2worker(worker, data, sendn, SW_PIPE_MASTER);
    if (ret < 0)
    {
        swWarn("send %d bytes to worker#%d failed.", sendn, *dst_worker_id);
    }
    else
    {
        sw_atomic_fetch_add(&worker->tasking_num, 1);
    }

    return ret;
}
Ejemplo n.º 9
0
static sw_inline uint32_t swServer_worker_schedule(swServer *serv, uint32_t schedule_key)
{
    uint32_t target_worker_id = 0;

    //polling mode
    if (serv->dispatch_mode == SW_DISPATCH_ROUND)
    {
        target_worker_id = (serv->worker_round_id++) % serv->worker_num;
    }
    //Using the FD touch access to hash
    else if (serv->dispatch_mode == SW_DISPATCH_FDMOD)
    {
        target_worker_id = schedule_key % serv->worker_num;
    }
    //Preemptive distribution
    else
    {
        if (serv->ipc_mode == SW_IPC_MSGQUEUE)
        {
            //msgsnd参数必须>0
            //worker进程中正确的mtype应该是pti + 1
            target_worker_id = serv->worker_num;
        }
        else
        {
            int i;
            sw_atomic_t *round = &SwooleTG.worker_round_i;
            for (i = 0; i < serv->worker_num; i++)
            {
                sw_atomic_fetch_add(round, 1);
                target_worker_id = (*round) % serv->worker_num;

                if (serv->workers[target_worker_id].status == SW_WORKER_IDLE)
                {
                    break;
                }
            }
            swTrace("schedule=%d|round=%d\n", target_worker_id, *round);
        }
    }
    return target_worker_id;
}
Ejemplo n.º 10
0
static void swRingBuffer_free(swMemoryPool *pool, void *ptr)
{
    swRingBuffer *object = pool->object;
    swRingBuffer_item *item = (swRingBuffer_item *) ((char *) ptr - sizeof(swRingBuffer_item));

    assert(ptr >= object->memory);
    assert((char* )ptr <= (char * ) object->memory + object->size);
    assert(item->lock == 1);

    if (item->lock != 1)
    {
        swDebug("invalid free: index=%d, ptr=%p", item->index,  (void * )((void * )item->data - object->memory));
    }
    else
    {
        item->lock = 0;
    }

    swDebug("free: ptr=%p", (void * )((void * )item->data - object->memory));

    sw_atomic_t *free_count = &object->free_count;
    sw_atomic_fetch_add(free_count, 1);
}
Ejemplo n.º 11
0
int swThreadPool_dispatch(swThreadPool *pool, void *task, int task_len)
{
    int ret;

    pool->cond.lock(&pool->cond);
#ifdef SW_THREADPOOL_USE_CHANNEL
    ret = swChannel_in(pool->chan, task, task_len);
#else
    ret = swRingQueue_push(&pool->queue, task);
#endif
    pool->cond.unlock(&pool->cond);

    if (ret < 0)
    {
        SwooleG.error = EAGAIN;
        return SW_ERR;
    }

    sw_atomic_t *task_num = &pool->task_num;
    sw_atomic_fetch_add(task_num, 1);

    return pool->cond.notify(&pool->cond);
}
Ejemplo n.º 12
0
swTableRow* swTableRow_set(swTable *table, char *key, int keylen, sw_atomic_t **rowlock)
{
    if (keylen > SW_TABLE_KEY_SIZE)
    {
        keylen = SW_TABLE_KEY_SIZE;
    }

    swTableRow *row = swTable_hash(table, key, keylen);
    sw_atomic_t *lock = &row->lock;
    sw_spinlock(lock);
    *rowlock = lock;

    if (row->active)
    {
        for (;;)
        {
            if (strncmp(row->key, key, keylen) == 0)
            {
                break;
            }
            else if (row->next == NULL)
            {
                table->lock.lock(&table->lock);
                swTableRow *new_row = table->pool->alloc(table->pool, 0);

#ifdef SW_TABLE_DEBUG
                conflict_count ++;
#endif
                table->lock.unlock(&table->lock);

                if (!new_row)
                {
                    return NULL;
                }
                //add row_num
                bzero(new_row, sizeof(swTableRow));
                sw_atomic_fetch_add(&(table->row_num), 1);
                row->next = new_row;
                row = new_row;
                break;
            }
            else
            {
                row = row->next;
            }
        }
    }
    else
    {
#ifdef SW_TABLE_DEBUG
        insert_count ++;
#endif

        sw_atomic_fetch_add(&(table->row_num), 1);

        // when the root node become active, we may need compress the jump table
        if (table->list_n >= table->size - 1)
        {
            swTable_compress_list(table);
        }

        table->lock.lock(&table->lock);
        table->rows_list[table->list_n] = row;
        table->lock.unlock(&table->lock);

        row->list_index = table->list_n;
        sw_atomic_fetch_add(&table->list_n, 1);
    }

    memcpy(row->key, key, keylen);
    row->active = 1;
    return row;
}
Ejemplo n.º 13
0
swTableRow* swTableRow_set(swTable *table, char *key, int keylen, swTableRow **rowlock)
{
    if (keylen > SW_TABLE_KEY_SIZE)
    {
        keylen = SW_TABLE_KEY_SIZE;
    }

    swTableRow *row = swTable_hash(table, key, keylen);
    *rowlock = row;
    swTableRow_lock(row);

#ifdef SW_TABLE_DEBUG
    int _conflict_level = 0;
#endif

    if (row->active)
    {
        for (;;)
        {
            if (strncmp(row->key, key, keylen) == 0)
            {
                break;
            }
            else if (row->next == NULL)
            {
                table->lock.lock(&table->lock);
                swTableRow *new_row = table->pool->alloc(table->pool, 0);

#ifdef SW_TABLE_DEBUG
                conflict_count ++;
                if (_conflict_level > conflict_max_level)
                {
                    conflict_max_level = _conflict_level;
                }

#endif
                table->lock.unlock(&table->lock);

                if (!new_row)
                {
                    return NULL;
                }
                //add row_num
                bzero(new_row, sizeof(swTableRow));
                sw_atomic_fetch_add(&(table->row_num), 1);
                row->next = new_row;
                row = new_row;
                break;
            }
            else
            {
                row = row->next;
#ifdef SW_TABLE_DEBUG
                _conflict_level++;
#endif
            }
        }
    }
    else
    {
#ifdef SW_TABLE_DEBUG
        insert_count ++;
#endif
        sw_atomic_fetch_add(&(table->row_num), 1);
    }

    memcpy(row->key, key, keylen);
    row->active = 1;
    return row;
}
Ejemplo n.º 14
0
/**
 * Send the task result to worker
 */
int swTaskWorker_finish(swServer *serv, char *data, int data_len, int flags)
{
    swEventData buf;
    if (!current_task)
    {
        swWarn("cannot use finish in worker");
        return SW_ERR;
    }
    if (serv->task_worker_num < 1)
    {
        swWarn("cannot use task/finish, because no set serv->task_worker_num.");
        return SW_ERR;
    }
	if (current_task->info.type == SW_EVENT_PIPE_MESSAGE)
	{
		swWarn("task/finish is not supported in onPipeMessage callback.");
		return SW_ERR;
	}

    uint16_t source_worker_id = current_task->info.from_id;
    swWorker *worker = swServer_get_worker(serv, source_worker_id);

    if (worker == NULL)
    {
        swWarn("invalid worker_id[%d].", source_worker_id);
        return SW_ERR;
    }

    int ret;
    //for swoole_server_task
    if (swTask_type(current_task) & SW_TASK_NONBLOCK)
    {
        buf.info.type = SW_EVENT_FINISH;
        buf.info.fd = current_task->info.fd;
        //callback function
        if (swTask_type(current_task) & SW_TASK_CALLBACK)
        {
            flags |= SW_TASK_CALLBACK;
        }
        else if (swTask_type(current_task) & SW_TASK_COROUTINE)
        {
            flags |= SW_TASK_COROUTINE;
        }
        swTask_type(&buf) = flags;

        //write to file
        if (data_len >= SW_IPC_MAX_SIZE - sizeof(buf.info))
        {
            if (swTaskWorker_large_pack(&buf, data, data_len) < 0 )
            {
                swWarn("large task pack failed()");
                return SW_ERR;
            }
        }
        else
        {
            memcpy(buf.data, data, data_len);
            buf.info.len = data_len;
        }

        if (worker->pool->use_socket && worker->pool->stream->last_connection > 0)
        {
            int32_t _len = htonl(data_len);
            ret = swSocket_write_blocking(worker->pool->stream->last_connection, (void *) &_len, sizeof(_len));
            if (ret > 0)
            {
                ret = swSocket_write_blocking(worker->pool->stream->last_connection, data, data_len);
            }
        }
        else
        {
            ret = swWorker_send2worker(worker, &buf, sizeof(buf.info) + buf.info.len, SW_PIPE_MASTER);
        }
    }
    else
    {
        uint64_t flag = 1;

        /**
         * Use worker shm store the result
         */
        swEventData *result = &(serv->task_result[source_worker_id]);
        swPipe *task_notify_pipe = &(serv->task_notify[source_worker_id]);

        //lock worker
        worker->lock.lock(&worker->lock);

        if (swTask_type(current_task) & SW_TASK_WAITALL)
        {
            sw_atomic_t *finish_count = (sw_atomic_t*) result->data;
            char *_tmpfile = result->data + 4;
            int fd = open(_tmpfile, O_APPEND | O_WRONLY);
            if (fd >= 0)
            {
                buf.info.type = SW_EVENT_FINISH;
                buf.info.fd = current_task->info.fd;
                swTask_type(&buf) = flags;
                //result pack
                if (data_len >= SW_IPC_MAX_SIZE - sizeof(buf.info))
                {
                    if (swTaskWorker_large_pack(&buf, data, data_len) < 0)
                    {
                        swWarn("large task pack failed()");
                        buf.info.len = 0;
                    }
                }
                else
                {
                    buf.info.len = data_len;
                    memcpy(buf.data, data, data_len);
                }
                //write to tmpfile
                if (swoole_sync_writefile(fd, &buf, sizeof(buf.info) + buf.info.len) < 0)
                {
                    swSysError("write(%s, %ld) failed.", _tmpfile, sizeof(buf.info) + buf.info.len);
                }
                sw_atomic_fetch_add(finish_count, 1);
                close(fd);
            }
        }
        else
        {
            result->info.type = SW_EVENT_FINISH;
            result->info.fd = current_task->info.fd;
            swTask_type(result) = flags;

            if (data_len >= SW_IPC_MAX_SIZE - sizeof(buf.info))
            {
                if (swTaskWorker_large_pack(result, data, data_len) < 0)
                {
                    //unlock worker
                    worker->lock.unlock(&worker->lock);
                    swWarn("large task pack failed()");
                    return SW_ERR;
                }
            }
            else
            {
                memcpy(result->data, data, data_len);
                result->info.len = data_len;
            }
        }

        //unlock worker
        worker->lock.unlock(&worker->lock);

        while (1)
        {
            ret = task_notify_pipe->write(task_notify_pipe, &flag, sizeof(flag));
#ifdef HAVE_KQUEUE
            if (ret < 0 && (errno == EAGAIN || errno == ENOBUFS))
#else
            if (ret < 0 && errno == EAGAIN)
#endif
            {
                if (swSocket_wait(task_notify_pipe->getFd(task_notify_pipe, 1), -1, SW_EVENT_WRITE) == 0)
                {
                    continue;
                }
            }
            break;
        }
    }
    if (ret < 0)
    {
        swWarn("TaskWorker: send result to worker failed. Error: %s[%d]", strerror(errno), errno);
    }
    return ret;
}
Ejemplo n.º 15
0
int swWorker_onTask(swFactory *factory, swEventData *task)
{
    swServer *serv = factory->ptr;
    swString *package = NULL;

    factory->last_from_id = task->info.from_id;
    //worker busy
    serv->workers[SwooleWG.id].status = SW_WORKER_BUSY;

    switch (task->info.type)
    {
    //no buffer
    case SW_EVENT_TCP:
    //ringbuffer shm package
    case SW_EVENT_PACKAGE:
        //discard data
        if (swWorker_discard_data(serv, task) == SW_TRUE)
        {
            break;
        }
        do_task: serv->onReceive(serv, task);
        SwooleWG.request_count++;
        sw_atomic_fetch_add(&SwooleStats->request_count, 1);
        if (task->info.type == SW_EVENT_PACKAGE_END)
        {
            package->length = 0;
        }
        break;

    //chunk package
    case SW_EVENT_PACKAGE_START:
    case SW_EVENT_PACKAGE_END:
        //discard data
        if (swWorker_discard_data(serv, task) == SW_TRUE)
        {
            break;
        }
        //input buffer
        package = SwooleWG.buffer_input[task->info.from_id];
        //merge data to package buffer
        memcpy(package->str + package->length, task->data, task->info.len);
        package->length += task->info.len;

        //package end
        if (task->info.type == SW_EVENT_PACKAGE_END)
        {
            goto do_task;
        }
        break;

    case SW_EVENT_UDP:
    case SW_EVENT_UDP6:
    case SW_EVENT_UNIX_DGRAM:
        SwooleWG.request_count++;
        sw_atomic_fetch_add(&SwooleStats->request_count, 1);
        serv->onPacket(serv, task);
        break;

    case SW_EVENT_CLOSE:
        factory->end(factory, task->info.fd);
        break;

    case SW_EVENT_CONNECT:
        serv->onConnect(serv, task->info.fd, task->info.from_id);
        break;

    case SW_EVENT_FINISH:
        serv->onFinish(serv, task);
        break;

    case SW_EVENT_PIPE_MESSAGE:
        serv->onPipeMessage(serv, task);
        break;

    default:
        swWarn("[Worker] error event[type=%d]", (int )task->info.type);
        break;
    }

    //worker idle
    serv->workers[SwooleWG.id].status = SW_WORKER_IDLE;

    //maximum number of requests, process will exit.
    if (!SwooleWG.run_always && SwooleWG.request_count > SwooleWG.max_request)
    {
        SwooleG.running = 0;
    }
    return SW_OK;
}
Ejemplo n.º 16
0
/**
 * 主进程向worker进程发送数据
 * @param worker_id 发到指定的worker进程
 */
int swFactoryProcess_send2worker(swFactory *factory, swEventData *data, int worker_id)
{
	swFactoryProcess *object = factory->object;
	swServer *serv = factory->ptr;
	int pti = 0;
	int ret;
	int send_len = sizeof(data->info) + data->info.len;

	if (worker_id < 0)
	{
		//轮询
		if (serv->dispatch_mode == SW_DISPATCH_ROUND)
		{
			pti = (object->worker_pti++) % object->worker_num;
		}
		//使用fd取摸来散列
		else if (serv->dispatch_mode == SW_DISPATCH_FDMOD)
		{
			//Fixed #48. 替换一下顺序
			//udp use remote port
			if (data->info.type == SW_EVENT_UDP)
			{
				pti = ((uint16_t) data->info.from_id) % object->worker_num;
			}
			else
			{
				pti = data->info.fd % object->worker_num;
			}
		}
		//使用抢占式队列(IPC消息队列)分配
		else
		{
#if SW_WORKER_IPC_MODE == 2
			//msgsnd参数必须>0
			//worker进程中正确的mtype应该是pti + 1
			pti = object->worker_num;
#else
			int i;
			atomic_t *round = &SwooleWG.worker_pti;
			for(i=0; i< serv->worker_num; i++)
			{
				sw_atomic_fetch_add(round, 1);
				pti = (*round) % serv->worker_num;
				if (object->workers_status[pti] == SW_WORKER_IDLE)
				{
					break;
				}
			}
#endif
		}
	}
	//指定了worker_id
	else
	{
		pti = worker_id;
	}

#if SW_WORKER_IPC_MODE == 2
	//insert to msg queue
	swQueue_data *in_data = (swQueue_data *)((void *)data - sizeof(long));

	//加1防止id为0的worker进程出错
	in_data->mtype = pti + 1;

	swDataHead *info = (swDataHead *)in_data->mdata;
	ret = object->rd_queue.in(&object->rd_queue, in_data, send_len);
	swTrace("[Master]rd_queue[%ld]->in: fd=%d|type=%d|len=%d", in_data->mtype, info->fd, info->type, info->len);
#else
	//swWarn("pti=%d|from_id=%d", pti, data->info.from_id);
	//send to unix sock
	ret = swWrite(object->workers[pti].pipe_master, (char *) data, send_len);
#endif
	return ret;
}
Ejemplo n.º 17
0
int swWorker_onTask(swFactory *factory, swEventData *task)
{
    swServer *serv = factory->ptr;
    swString *package = NULL;
    swDgramPacket *header;

#ifdef SW_USE_OPENSSL
    swConnection *conn;
#endif

    factory->last_from_id = task->info.from_id;
    //worker busy
    serv->workers[SwooleWG.id].status = SW_WORKER_BUSY;

    switch (task->info.type)
    {
    //no buffer
    case SW_EVENT_TCP:
    //ringbuffer shm package
    case SW_EVENT_PACKAGE:
        //discard data
        if (swWorker_discard_data(serv, task) == SW_TRUE)
        {
            break;
        }
        do_task:
        {
            serv->onReceive(serv, task);
            SwooleWG.request_count++;
            sw_atomic_fetch_add(&SwooleStats->request_count, 1);
        }
        if (task->info.type == SW_EVENT_PACKAGE_END)
        {
            package->length = 0;
        }
        break;

    //chunk package
    case SW_EVENT_PACKAGE_START:
    case SW_EVENT_PACKAGE_END:
        //discard data
        if (swWorker_discard_data(serv, task) == SW_TRUE)
        {
            break;
        }
        package = swWorker_get_buffer(serv, task->info.from_id);
        //merge data to package buffer
        memcpy(package->str + package->length, task->data, task->info.len);
        package->length += task->info.len;

        //package end
        if (task->info.type == SW_EVENT_PACKAGE_END)
        {
            goto do_task;
        }
        break;

    case SW_EVENT_UDP:
    case SW_EVENT_UDP6:
    case SW_EVENT_UNIX_DGRAM:
        package = swWorker_get_buffer(serv, task->info.from_id);
        swString_append_ptr(package, task->data, task->info.len);

        if (package->offset == 0)
        {
            header = (swDgramPacket *) package->str;
            package->offset = header->length;
        }

        //one packet
        if (package->offset == package->length - sizeof(swDgramPacket))
        {
            SwooleWG.request_count++;
            sw_atomic_fetch_add(&SwooleStats->request_count, 1);
            serv->onPacket(serv, task);
            swString_clear(package);
        }
        break;

    case SW_EVENT_CLOSE:
#ifdef SW_USE_OPENSSL
        conn = swServer_connection_verify(serv, task->info.fd);
        if (conn && conn->ssl_client_cert.length)
        {
            free(conn->ssl_client_cert.str);
            bzero(&conn->ssl_client_cert, sizeof(conn->ssl_client_cert.str));
        }
#endif
        factory->end(factory, task->info.fd);
        break;

    case SW_EVENT_CONNECT:
#ifdef SW_USE_OPENSSL
        //SSL client certificate
        if (task->info.len > 0)
        {
            conn = swServer_connection_verify(serv, task->info.fd);
            conn->ssl_client_cert.str = strndup(task->data, task->info.len);
            conn->ssl_client_cert.size = conn->ssl_client_cert.length = task->info.len;
        }
#endif
        if (serv->onConnect)
        {
            serv->onConnect(serv, &task->info);
        }
        break;

    case SW_EVENT_FINISH:
        serv->onFinish(serv, task);
        break;

    case SW_EVENT_PIPE_MESSAGE:
        serv->onPipeMessage(serv, task);
        break;

    default:
        swWarn("[Worker] error event[type=%d]", (int )task->info.type);
        break;
    }

    //worker idle
    serv->workers[SwooleWG.id].status = SW_WORKER_IDLE;

    //maximum number of requests, process will exit.
    if (!SwooleWG.run_always && SwooleWG.request_count >= SwooleWG.max_request)
    {
        SwooleG.running = 0;
        SwooleG.main_reactor->running = 0;
    }
    return SW_OK;
}
Ejemplo n.º 18
0
swTableRow* swTableRow_set(swTable *table, char *key, int keylen)
{
    swTableRow *row = swTable_hash(table, key, keylen);
    uint32_t crc32 = swoole_crc32(key, keylen);
    sw_atomic_t *lock = &row->lock;

    sw_spinlock(lock);
    if (row->active)
    {
        for (;;)
        {
            if (row->crc32 == crc32)
            {
                break;
            }
            else if (row->next == NULL)
            {
                table->lock.lock(&table->lock);
                swTableRow *new_row = table->pool->alloc(table->pool, 0);

#ifdef SW_TABLE_DEBUG
                conflict_count ++;
#endif
                table->lock.unlock(&table->lock);

                if (!new_row)
                {
                    sw_spinlock_release(lock);
                    return NULL;
                }
                //add row_num
                bzero(new_row, sizeof(swTableRow));
                sw_atomic_fetch_add(&(table->row_num), 1);
                row->next = new_row;
                row = new_row;
                break;
            }
            else
            {
                row = row->next;
            }
        }
    }
    else
    {
#ifdef SW_TABLE_DEBUG
        insert_count ++;
#endif

        sw_atomic_fetch_add(&(table->row_num), 1);

        // when the root node become active, we may need compress the jump table
        if (table->list_n >= table->size - 1)
        {
            swTable_compress_list(table);
        }

        table->rows_list[table->list_n] = row;
        row->list_index = table->list_n;
        sw_atomic_fetch_add(&table->list_n, 1);
    }

    row->crc32 = crc32;
    row->active = 1;

    swTrace("row=%p, crc32=%u, key=%s\n", row, crc32, key);
    sw_spinlock_release(lock);

    return row;
}
Ejemplo n.º 19
0
swTableRow* swTableRow_set(swTable *table, char *key, int keylen)
{
    swTableRow *row = swTable_hash(table, key, keylen);
    uint32_t crc32 = swoole_crc32(key, keylen);
    sw_atomic_t *lock = &row->lock;

    sw_spinlock(lock);
    if (row->active)
    {
        for (;;)
        {
            if (row->crc32 == crc32)
            {
                break;
            }
            else if (row->next == NULL)
            {
                table->lock.lock(&table->lock);
                swTableRow *new_row = table->pool->alloc(table->pool, 0);
                table->lock.unlock(&table->lock);

                if (!new_row)
                {
                    sw_spinlock_release(lock);
                    return NULL;
                }
                //add row_num
                sw_atomic_fetch_add(&(table->row_num), 1);
                row->next = new_row;
                row = new_row;
                break;
            }
            else
            {
                row = row->next;
            }
        }
    }
    else
    {
        sw_atomic_fetch_add(&(table->row_num), 1);
    }

#ifdef SW_TABLE_USE_LINKED_LIST
    if (!row->active)
    {
        row->list_next = NULL;
        if (table->head)
        {
            row->list_prev = table->tail;
            table->tail->list_next = row;
            table->tail = row;
        }
        else
        {
            table->head = table->tail = row;
            row->list_prev = NULL;
            table->iterator->tmp_row = row;
        }
    }
#endif

    row->crc32 = crc32;
    row->active = 1;

    swTrace("row=%p, crc32=%u, key=%s\n", row, crc32, key);
    sw_spinlock_release(lock);
    return row;
}