Exemplo n.º 1
0
int pipe_send_cache_reply( ECM_DATA *ecm, struct cardserver_data *cs)
{
	if ( !cache_check_request(ecm->ecm[0], ecm->sid, cs->onid, ecm->caid, ecm->hash) ) return 0;
	uchar buf[64]; // 32 por defecto
	buf[0] = PIPE_CACHE_REPLY;
	buf[2] = ecm->ecm[0];
	buf[3] = (ecm->sid)>>8;
	buf[4] = (ecm->sid)&0xff;
	buf[5] = (cs->onid)>>8;
	buf[6] = (cs->onid)&0xff;
	buf[7] = (ecm->caid)>>8;
	buf[8] = (ecm->caid)&0xff;
	buf[9] = ecm->hash>>24;
	buf[10] = ecm->hash>>16;
	buf[11] = ecm->hash>>8;
	buf[12] = ecm->hash & 0xff;

	if (ecm->dcwstatus==STAT_DCW_SUCCESS) {
		memcpy(buf+13, ecm->cw, 16);
		buf[1] = 11+16;
		pipe_send( srvsocks[0], buf, 13+16);
	}
	else {
		buf[1] = 11;
		pipe_send( srvsocks[0], buf, 13);
	}
	return 1;
}
Exemplo n.º 2
0
DWORD WINAPI PipeReadThread(LPVOID param)
{
	DWORD numread, br;
	char buffer[IRCLINE];

	int threadnum = (int)param;

	while (1) {
		BOOL eol = FALSE;
		DWORD State;

		memset(buffer,0,sizeof(buffer));
		if (!PeekNamedPipe(pipe_read,buffer,IRCLINE,&br,NULL,NULL)) {
			pipe_send(pipesock,pipe_chan,"[CMD]: Could not read data from proccess\r\n");
			clearthread(threadnum);

			ExitThread(1);
		}

		if (br == 0) { //nothing to read 
			if (GetExitCodeProcess(pipe_Hproc,&State)) { 
				if (State != STILL_ACTIVE) {
					Close_Handles(); 
					pipe_send(pipesock,pipe_chan,"[CMD]: Proccess has terminated.\r\n");
					clearthread(threadnum);

					ExitThread (0);
				}
			}
			Sleep(10); //process pause and retry
			continue;
		}

		DWORD cbyte;
		for (cbyte=0;cbyte<br;cbyte++) {
			if (buffer[cbyte] == '\n') {
				eol = TRUE;
				break;
			}
		}
		if (eol) 
			br = cbyte + 1;
		else 
			br = 512;
		memset(buffer,0,sizeof(buffer));
		if (!ReadFile(pipe_read, buffer, br, &numread, NULL)) 
				break;
		pipe_send(pipesock,pipe_chan,buffer);

	}
	pipe_send(pipesock,pipe_chan,"[CMD]: Could not read data from proccess.\r\n");
	clearthread(threadnum);

	ExitThread (0);
}
Exemplo n.º 3
0
/*
 * The thread start routine for pipe stage threads.
 * Each will wait for a data item passed from the
 * caller or the previous stage, modify the data
 * and pass it along to the next (or final) stage.
 */
void *pipe_stage (void *arg)
{
    stage_t *stage = (stage_t*)arg;
    stage_t *next_stage = stage->next;
    int status;

    status = pthread_mutex_lock (&stage->mutex);
    if (status != 0)
        err_abort (status, "Lock pipe stage");
    while (1) {
        while (stage->data_ready != 1) {
            status = pthread_cond_wait (&stage->avail, &stage->mutex);
            if (status != 0)
                err_abort (status, "Wait for previous stage");
        }
        pipe_send (next_stage, stage->data + 1);
        stage->data_ready = 0;
        status = pthread_cond_signal (&stage->ready);
        if (status != 0)
            err_abort (status, "Wake next stage");
    }
    /*
     * Notice that the routine never unlocks the stage->mutex.
     * The call to pthread_cond_wait implicitly unlocks the
     * mutex while the thread is waiting, allowing other threads
     * to make progress. Because the loop never terminates, this
     * function has no need to unlock the mutex explicitly.
     */
	 	 return NULL;
}
Exemplo n.º 4
0
void Sync(ArgStruct *p)
{
    int bogus;

    /* Parent */
    if (p->tr) {
        pipe_send(pipe_to_child, &bogus, sizeof(bogus));
        pipe_recv(pipe_to_parent, &bogus, sizeof(bogus));
    } 

    /* Child */
    else {
        pipe_recv(pipe_to_child, &bogus, sizeof(bogus));
        pipe_send(pipe_to_parent, &bogus, sizeof(bogus));
    }
}
Exemplo n.º 5
0
int
pipe_start(pipe_t *pipe, int value) {

	PCHECK(pthread_mutex_lock(&pipe->mutex));
	pipe->active++;
	PCHECK(pthread_mutex_unlock(&pipe->mutex));
	//诛仙乘首先进来
	pipe_send(pipe->head, value);
	return 0;
}
Exemplo n.º 6
0
Arquivo: pipe.c Projeto: Ace-Tang/APUE
void pipe_start(pipe_t *pipe, long value)
{
	int status;
	stage_t *stage = pipe->head;

	status = pthread_mutex_lock(&pipe->mutex);
	pipe->actives++;
	status = pthread_mutex_unlock(&pipe->mutex);
	pipe_send(pipe->head, value);
}
Exemplo n.º 7
0
/*
 * External interface to start a pipeline by passing
 * data to the first stage. The routine returns while
 * the pipeline processes in parallel. Call the
 * pipe_result return to collect the final stage values
 * (note that the pipe will stall when each stage fills,
 * until the result is collected).
 */
int pipe_start (pipe_t *pipe, long value)
{
    int status;

    status = pthread_mutex_lock (&pipe->mutex);
    if (status != 0)
        err_abort (status, "Lock pipe mutex");
    pipe->active++;
    status = pthread_mutex_unlock (&pipe->mutex);
    if (status != 0)
        err_abort (status, "Unlock pipe mutex");
    pipe_send (pipe->head, value);
    return 0;
}
Exemplo n.º 8
0
static int _impl_sync(rh_aout_api_itf self, rh_asmp_itf sample) {

    int e = -1;

    struct sles_api_instance * instance = (struct sles_api_instance *)self;

    const struct io_command_struct cmd = { SYNC_COMMAND, sample };

    if( sample ) {
        // create a reference for the command pipe
        (*sample)->addref(sample);
        if((e = pipe_send(self, &cmd))!=0)
            (*sample)->close(&sample);
    }

    return e;
}
Exemplo n.º 9
0
int pipe_send_cache_find( ECM_DATA *ecm, struct cardserver_data *cs)
{
	if ( !cache_check_request(ecm->ecm[0], ecm->sid, cs->onid, ecm->caid, ecm->hash) ) return 0;
	//send pipe to cache
	uchar buf[64]; // 32 por defecto
	buf[0] = PIPE_CACHE_FIND;
	buf[1] = 14; // Data length
	buf[2] = ecm->ecm[0];
	buf[3] = (ecm->sid)>>8; buf[4] = (ecm->sid)&0xff;
	buf[5] = (cs->onid)>>8; buf[6] = (cs->onid)&0xff;
	buf[7] = (ecm->caid)>>8; buf[8] = (ecm->caid)&0xff;
	buf[9] = ecm->hash>>24; buf[10] = ecm->hash>>16; buf[11] = ecm->hash>>8; buf[12] = ecm->hash & 0xff;
	buf[13] = ecm->provid>>16; buf[14] = ecm->provid>>8; buf[15] = ecm->provid & 0xff;
	//debugf(" Pipe Ecm->Cache: PIPE_CACHE_FIND %04x:%04x:%08x\n",ecm->caid, ecm->sid, ecm->hash);
	pipe_send( srvsocks[0], buf, 16);
	return 1;
}
Exemplo n.º 10
0
Arquivo: pipe.c Projeto: Ace-Tang/APUE
void *stage_deal(void *arg)
{
	int status;

	stage_t *stage = (stage_t *)arg;
	stage_t *stage_next = stage->next;
	
	status = pthread_mutex_lock(&stage->mutex);
	while (1)
	{
		while (!stage->data_ready)
			status = pthread_cond_wait(&stage->avail, &stage->mutex);
		pipe_send(stage_next, stage->data + 1);
		stage->data_ready = 0;
		status = pthread_cond_signal(&stage->ready);
	}
}
Exemplo n.º 11
0
int _impl_consumed_buffer(rh_aout_api_itf self, rh_asmp_itf sample) {

    int e = -1;

    struct sles_api_instance * instance = (struct sles_api_instance *)self;

    const struct io_command_struct cmd = { CONSUMED_BUFFER, sample };

    if( sample ) {
        // create a reference for the command pipe
        (*sample)->addref(sample);
        if((e = pipe_send(self, &cmd))!=0)
            (*sample)->close(&sample);
    }

    return e;
}
Exemplo n.º 12
0
static inline void
_server_pipe_write(void)
{
    ASSERT(pipe_c != NULL);

    ssize_t status = pipe_send(pipe_c, "", 1);

    if (status == 0 || status == CC_EAGAIN) {
        /* retry write */
        log_verb("server core: retry send on pipe");
        event_add_write(ctx->evb, pipe_write_id(pipe_c), NULL);
    } else if (status == CC_ERROR) {
        /* other reasn write can't be done */
        log_error("could not write to pipe - %s", strerror(pipe_c->err));
    }

    /* else, pipe write succeeded and no action needs to be taken */
}
Exemplo n.º 13
0
/*
 * External interface to start a pipeline by passing data to the first stage.
 * The routine returns while the pipeline processes in parallel. Call the
 * pipe_result return to collect the final stage values (note that the pipeline
 * will stall when each stage fills, until the result is collected).
 */
int pipe_start(pipe_t *pipe, long value)
{
    int status;
    
    status = pthread_mutex_lock(&pipe->mutex);
    status_check(status, "Lock pipe mutex");
    if (pipe->active >= pipe->stages) {
        fprintf(stdout, "Pipeline is fully, please try later.\n");
        status = pthread_mutex_unlock(&pipe->mutex);
        status_check(status, "Unlock pipe mutex");
        return 1;
    }
    pipe->active++;
    status = pthread_mutex_unlock(&pipe->mutex);
    status_check(status, "Unlock pipe mutex");
    pipe_send(pipe->head, value);
    
    return 0;
}
Exemplo n.º 14
0
static int _impl_shutdown(rh_aout_api_itf * itf) {

    struct sles_api_instance * instance;

    if(!itf)
        return -1;

    instance = (struct sles_api_instance *)*itf;

    if(instance) {

    	if( instance->outputMix )
			(*instance->outputMix)->Destroy(instance->outputMix);

		if( instance->engineObject )
			(*instance->engineObject)->Destroy(instance->engineObject);

        if(instance->thread) {
            const struct io_command_struct cmd = { EXIT_COMMAND, NULL };
            if( pipe_send( *itf, &cmd ) == 0)
                while(instance->thread)
                    sched_yield();
        }

        close_all_channels(*itf);

        bucket_free(instance->aout_itf_bucket);
        instance->aout_itf_bucket = NULL;

        close(instance->cmd_pipe.write);
        instance->cmd_pipe.write = 0;

        close(instance->cmd_pipe.read);
        instance->cmd_pipe.read = 0;

        free(instance->interface);
        free(instance);
    }

    *itf = NULL;

    return 0;
}
Exemplo n.º 15
0
// thread handle 
void *
pipe_stage(void *arg) {
	stage_t *this= (stage_t *)arg;
	stage_t *next = this->next;

	PCHECK(pthread_mutex_lock(&this->mutex));

	while (1) { //process forever
		while (this->data_ready != 1) {
		printf("thread:%u send wait avail \n", (int)pthread_self());
			PCHECK(pthread_cond_wait(&this->avail, &this->mutex));
		}

	printf("thread:%u send wait avail up\n", (int)pthread_self());
	sleep(1);
		pipe_send(next, this->data + 1);
		this->data_ready = 0; //自己清空自己的ready
		PCHECK(pthread_cond_signal(&this->ready));
	//	PCHECK(pthread_cond_signal(&next->ready));
	}
}
Exemplo n.º 16
0
void SendTime(ArgStruct *p, double *t)
{
    /* Only child calls SendTime */
    pipe_send(pipe_to_parent, t, sizeof(*t));
}
Exemplo n.º 17
0
void SendRepeat(ArgStruct *p, int rpt)
{
    /* Only parent calls SendRepeat */
    pipe_send(pipe_to_child, &rpt, sizeof(rpt));
}
Exemplo n.º 18
0
void cache_recvmsg()
{
	unsigned int recv_ip;
	unsigned short recv_port;
	unsigned char buf[1024];
	struct sockaddr_in si_other;
	socklen_t slen=sizeof(si_other);
	uint ticks = GetTickCount();
	struct cs_cachepeer_data *peer;

	int received = recvfrom( cfg.cachesock, buf, sizeof(buf), 0, (struct sockaddr*)&si_other, &slen);
	memcpy( &recv_ip, &si_other.sin_addr, 4);
	recv_port = ntohs(si_other.sin_port);

	if (received>0) {
		if (flag_debugnet) {
			debugf(" cache: recv data (%d) from address (%s:%d)\n", received, ip2string(recv_ip), recv_port );
			debughex(buf,received);
		}
		// Store Data
		struct cache_data req;

		switch(buf[0]) {
				case TYPE_REQUEST:
					// Check Peer
					peer = getpeerbyaddr(recv_ip,recv_port);
					if (!peer) {
						peer = getpeerbyip(recv_ip);
						if (!peer) break;
					}
					peer->lastactivity = ticks;
					//peer->totreq++;
					// Check Multics diferent version
					if ( !strcmp("MultiCS",peer->program) && (!strcmp("r63",peer->version)||!strcmp("r64",peer->version)||!strcmp("r65",peer->version)||!strcmp("r66",peer->version)||!strcmp("r67",peer->version)||!strcmp("r68",peer->version)||!strcmp("r69",peer->version)||!strcmp("r70",peer->version)||!strcmp("r71",peer->version)||!strcmp("r72",peer->version)||!strcmp("r73",peer->version)||!strcmp("r74",peer->version)||!strcmp("r75",peer->version)||!strcmp("r76",peer->version)||!strcmp("r77",peer->version)||!strcmp("r78",peer->version)||!strcmp("r79",peer->version)||!strcmp("r80",peer->version)||!strcmp("r81",peer->version)||!strcmp("r82",peer->version)||!strcmp("r83",peer->version)||!strcmp("r84",peer->version)||!strcmp("r85",peer->version)) ) break;
					// Check CSP
					if (received==20) { // arbiter number
						strcpy(peer->program,"CSP");
						break;
					}
					// Check Status
					if (peer->disabled) break;
					// Get DATA
					req.tag = buf[1];
					req.sid = (buf[2]<<8) | buf[3];
					req.onid = (buf[4]<<8) | buf[5];
					req.caid = (buf[6]<<8) | buf[7];
					req.hash = (buf[8]<<24) | (buf[9]<<16) | (buf[10]<<8) |buf[11];
					// Check Cache Request
					if (!cache_check(&req)) break;
					//
					peer->reqnb++;
					// ADD CACHE
					struct cache_data *pcache = cache_fetch( &req );
					if (pcache==NULL) {
						//*debugf(" [CACHE] << Cache Request from %s %04x:%04x:%08x\n", peer->host->name, req.caid, req.sid, req.hash);
						pcache = cache_new( &req );
						if (cfg.cache.trackermode) {
							// Send REQUEST to all Peers
							struct cs_cachepeer_data *p = cfg.cachepeer;
							while (p) {
								if (!p->disabled)
								if (p->host->ip && p->port)
								if ( (p->lastactivity+75000)>ticks )
								if ( !p->fblock0onid || pcache->onid )
									cache_send_request(pcache,p);
								p = p->next;
							}
							pcache->sendcache = 1;
							cfg.cachereq++;
						}
					}
					else if (!cfg.cache.trackermode) {
						if ( (pcache->status==CACHE_STAT_DCW)&&(pcache->sendcache!=2) ) {
							//debugf(" [CACHE] << Request Reply >> to peer %s %04x:%04x:%08x\n", peer->host->name, req.caid, req.sid, req.hash);
							peer->ihitfwd++;
							peer->hitfwd++;
							cache_send_reply(pcache,peer);
						}
					}
					break;


				case TYPE_REPLY:
					// Check Peer
					peer = getpeerbyaddr(recv_ip,recv_port);
					if (!peer) {
						peer = getpeerbyip(recv_ip);
						if (!peer) break;
					}
					peer->lastactivity = ticks;

					//peer->totrep++;
					// Check Multics diferent version
					if ( !strcmp("MultiCS",peer->program) && (!strcmp("r63",peer->version)||!strcmp("r64",peer->version)||!strcmp("r65",peer->version)||!strcmp("r66",peer->version)||!strcmp("r67",peer->version)||!strcmp("r68",peer->version)||!strcmp("r69",peer->version)||!strcmp("r70",peer->version)||!strcmp("r71",peer->version)||!strcmp("r72",peer->version)||!strcmp("r73",peer->version)||!strcmp("r74",peer->version)||!strcmp("r75",peer->version)||!strcmp("r76",peer->version)||!strcmp("r77",peer->version)||!strcmp("r78",peer->version)||!strcmp("r79",peer->version)||!strcmp("r80",peer->version)||!strcmp("r81",peer->version)||!strcmp("r82",peer->version)||!strcmp("r83",peer->version)||!strcmp("r84",peer->version)||!strcmp("r85",peer->version)) ) break;

					// Check Status
					if (peer->disabled) break;

					// 02 80 00CD 0001 0500 8D1DB359 80  // failed
					// 02 80 00CD 0001 0500 8D1DB359 80 00CD 0000 0500  63339F359A663232B73158405A255DDC  // OLD
					// 02 80 001F 0001 0100 9A3BA1C1 80 BC02DB99DE3D526D5702D42D4C249505  0005 6361726431 // NEW
					if (buf[12]!=buf[1]) {
						//peer->rep_badheader++;
						break;
					}
					req.tag = buf[1];
					req.sid = (buf[2]<<8) | buf[3];
					req.onid = (buf[4]<<8) | buf[5];
					req.caid = (buf[6]<<8) | buf[7];
					req.hash = (buf[8]<<24) | (buf[9]<<16) | (buf[10]<<8) |buf[11];
					// Check Cache Request
					if (!cache_check(&req)) {
						//peer->rep_badfields++;
						break;
					}
					//
					if (received==13) { // FAILED
						//peer->rep_failed++;
						//*debugf(" [CACHE] <| Failed Cache Reply from %s (CAID:%04x SID:%04x ONID:%04x)\n", peer->host->name, req.caid, req.sid, req.onid);
						// NOTHING TO DO
						break;
					}
					else if (received>=29) {
						// 02 80 001F 0001 0100 9A3BA1C1  80  BC02DB99DE3D526D5702D42D4C249505  0005 6361726431 // NEW
						if ( !acceptDCW(buf+13) ) {
							//peer->rep_baddcw++;
							break;
						}
						//*debugf(" [CACHE] << Good Cache Reply from %s %04x:%04x:%08x (ONID:%04x)\n", peer->host->name, req.caid, req.sid, req.hash, req.onid);
						peer->repok++; // Request+Reply

						// Search for Cache data
						struct cache_data *pcache = cache_fetch( &req );
						if (pcache==NULL) pcache = cache_new( &req );

						if (pcache->status!=CACHE_STAT_DCW) {
							//*debugf(" [CACHE] Update Cache DCW %04x:%04x:%08x\n", pcache->caid, pcache->sid, pcache->hash);
							pcache->peerid = peer->id;
							memcpy(pcache->cw, buf+13, 16);
							pcache->status = CACHE_STAT_DCW;
							if (pcache->sendpipe) {
								uchar buf[128]; // 32 por defecto
								buf[0] = PIPE_CACHE_FIND_SUCCESS;
								buf[1] = 11+2+16; // Data length
								buf[2] = pcache->tag;
								buf[3] = pcache->sid>>8; buf[4] = pcache->sid&0xff;
								buf[5] = pcache->onid>>8; buf[6] = pcache->onid&0xff;
								buf[7] = pcache->caid>>8; buf[8] = pcache->caid&0xff;
								buf[9] = pcache->hash>>24; buf[10] = pcache->hash>>16; buf[11] = pcache->hash>>8; buf[12] = pcache->hash & 0xff;
								buf[13] = peer->id>>8; buf[14] = peer->id&0xff;
								memcpy( buf+15, pcache->cw, 16);
								//*debugf(" pipe Cache->Ecm: PIPE_CACHE_FIND_SUCCESS %04x:%04x:%08x\n",pcache->caid, pcache->sid, pcache->hash); // debughex(buf, 13+16);
								pipe_send( srvsocks[1], buf, 13+2+16);
								//pcache->sendpipe = 0;
							}

							if (cfg.cache.trackermode) {
								// Send REQUEST to all Peers
								struct cs_cachepeer_data *p = cfg.cachepeer;
								while (p) {
									if (!p->disabled)
									if (p->host->ip && p->port)
									if ( (p->lastactivity+75000)>ticks )
									if ( !p->fblock0onid || pcache->onid )
										cache_send_reply(pcache,p);
									p = p->next;
								}
								pcache->sendcache = 2;
								cfg.cacherep++;
							}

						}
						else if ( pcache->sendpipe && memcmp(pcache->cw, buf+13, 16) ) {
							// resend to server
							pcache->peerid = peer->id;
							memcpy(pcache->cw, buf+13, 16);
							pcache->status = CACHE_STAT_DCW;

							uchar buf[128]; // 32 por defecto
							buf[0] = PIPE_CACHE_FIND_SUCCESS;
							buf[1] = 11+2+16; // Data length
							buf[2] = pcache->tag;
							buf[3] = pcache->sid>>8; buf[4] = pcache->sid&0xff;
							buf[5] = pcache->onid>>8; buf[6] = pcache->onid&0xff;
							buf[7] = pcache->caid>>8; buf[8] = pcache->caid&0xff;
							buf[9] = pcache->hash>>24; buf[10] = pcache->hash>>16; buf[11] = pcache->hash>>8; buf[12] = pcache->hash & 0xff;
							buf[13] = peer->id>>8; buf[14] = peer->id&0xff;
							memcpy( buf+15, pcache->cw, 16);
							pipe_send( srvsocks[1], buf, 13+2+16);
						}
Exemplo n.º 19
0
/* Send data over a socket, possibly including Mach ports.  */
error_t
S_socket_send (struct sock_user *user, struct addr *dest_addr, int flags,
               char *data, size_t data_len,
               mach_port_t *ports, size_t num_ports,
               char *control, size_t control_len,
               size_t *amount)
{
    error_t err = 0;
    struct pipe *pipe;
    struct sock *sock, *dest_sock;
    struct addr *source_addr;

    if (!user)
        return EOPNOTSUPP;

    sock = user->sock;

    if (flags & MSG_OOB)
        /* BSD local sockets don't support OOB data.  */
        return EOPNOTSUPP;

    if (dest_addr)
    {
        err = addr_get_sock (dest_addr, &dest_sock);
        if (err == EADDRNOTAVAIL)
            /* The server went away.  */
            err = ECONNREFUSED;
        if (err)
            return err;
        if (sock->pipe_class != dest_sock->pipe_class)
            /* Sending to a different type of socket!  */
            err = EINVAL;		/* ? XXX */
    }
    else
        dest_sock = 0;

    /* We could provide a source address for all writes, but we
       only do so for connectionless sockets because that's the
       only place it's required, and it's more efficient not to.  */
    if (!err && sock->pipe_class->flags & PIPE_CLASS_CONNECTIONLESS)
        err = sock_get_addr (sock, &source_addr);
    else
        source_addr = NULL;

    if (!err)
    {
        if (dest_sock)
            /* Grab the destination socket's read pipe directly, and stuff data
               into it.  This is not quite the usage sock_acquire_read_pipe was
               intended for, but it will work, as the only inappropriate errors
               occur on a broken pipe, which shouldn't be possible with the sort of
               sockets with which we can use socket_send...  XXXX */
            err = sock_acquire_read_pipe (dest_sock, &pipe);
        else
            /* No address, must be a connected socket...  */
            err = sock_acquire_write_pipe (sock, &pipe);

        if (!err)
        {
            err = pipe_send (pipe, sock->flags & PFLOCAL_SOCK_NONBLOCK,
                             source_addr, data, data_len,
                             control, control_len, ports, num_ports,
                             amount);
            if (dest_sock)
                pipe_release_reader (pipe);
            else
                pipe_release_writer (pipe);
        }

        if (err)
            /* The send failed, so free any resources it would have consumed
               (mig gets rid of memory, but we have to do everything else). */
        {
            if (source_addr)
                ports_port_deref (source_addr);
            while (num_ports-- > 0)
                mach_port_deallocate (mach_task_self (), *ports++);
        }
    }

    if (dest_sock)
        sock_deref (dest_sock);

    return err;
}