Esempio n. 1
0
/***
 *  rt_udp_close
 */
int rt_udp_close(struct rtdm_dev_context *context, int call_flags)
{
    struct rtsocket *sock = (struct rtsocket *)&context->dev_private;
    struct rtskb    *del;
    int             port;
    unsigned long   flags;


    rtos_spin_lock_irqsave(&udp_socket_base_lock, flags);

    sock->prot.inet.state = TCP_CLOSE;

    if (sock->prot.inet.reg_index >= 0) {
        port = sock->prot.inet.reg_index;
        clear_bit(port % 32, &port_bitmap[port / 32]);

        sock->prot.inet.reg_index = -1;
    }

    rtos_spin_unlock_irqrestore(&udp_socket_base_lock, flags);

    /* cleanup already collected fragments */
    rt_ip_frag_invalidate_socket(sock);

    /* free packets in incoming queue */
    while ((del = rtskb_dequeue(&sock->incoming)) != NULL)
        kfree_rtskb(del);

    return rt_socket_cleanup(context);
}
Esempio n. 2
0
/***
 *  rt_udp_close
 */
int rt_udp_close(struct rtdm_dev_context *sockctx,
                 rtdm_user_info_t *user_info)
{
    struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private;
    struct rtskb    *del;
    int             port;
    rtdm_lockctx_t  context;


    rtdm_lock_get_irqsave(&udp_socket_base_lock, context);

    sock->prot.inet.state = TCP_CLOSE;

    if (sock->prot.inet.reg_index >= 0) {
        port = sock->prot.inet.reg_index;
        clear_bit(port % 32, &port_bitmap[port / 32]);

        free_ports++;

        sock->prot.inet.reg_index = -1;
    }

    rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);

    /* cleanup already collected fragments */
    rt_ip_frag_invalidate_socket(sock);

    /* free packets in incoming queue */
    while ((del = rtskb_dequeue(&sock->incoming)) != NULL)
        kfree_rtskb(del);

    return rt_socket_cleanup(sockctx);
}
Esempio n. 3
0
void rt_icmp_cleanup_echo_requests(void)
{
    unsigned long       flags;
    struct list_head    *entry = &echo_calls;
    struct list_head    *next;


    rtos_spin_lock_irqsave(&echo_calls_lock, flags);
    entry = echo_calls.next;
    INIT_LIST_HEAD(&echo_calls);
    rtos_spin_unlock_irqrestore(&echo_calls_lock, flags);

    while (entry != &echo_calls) {
        next = entry->next;
        rtpc_complete_call_nrt((struct rt_proc_call *)entry, -EINTR);
        entry = next;
    }

    /* purge any pending ICMP fragments */
    rt_ip_frag_invalidate_socket(&icmp_socket);
}
Esempio n. 4
0
void rt_icmp_cleanup_echo_requests(void)
{
    rtdm_lockctx_t      context;
    struct list_head    *entry;
    struct list_head    *next;


    rtdm_lock_get_irqsave(&echo_calls_lock, context);
    entry = echo_calls.next;
    INIT_LIST_HEAD(&echo_calls);
    rtdm_lock_put_irqrestore(&echo_calls_lock, context);

    while (entry != &echo_calls) {
	next = entry->next;
	rtpc_complete_call_nrt((struct rt_proc_call *)entry, -EINTR);
	entry = next;
    }

    /* purge any pending ICMP fragments */
    rt_ip_frag_invalidate_socket(icmp_socket);
}
Esempio n. 5
0
/***
 *  rt_udp_close
 */
int rt_udp_close(struct rtsocket *s)
{
    unsigned long flags;
    struct rtsocket *prev=s->prev;
    struct rtsocket *next=s->next;
    struct rtskb *del;


    s->state=TCP_CLOSE;

    rtos_spin_lock_irqsave(&udp_socket_base_lock, flags);

    prev=s->prev;
    next=s->next;
    if (prev != NULL)
        prev->next = next;
    if (next != NULL)
        next->prev = prev;

    if (s == udp_sockets)
        udp_sockets = next;

    rtos_spin_unlock_irqrestore(&udp_socket_base_lock, flags);

    s->next = NULL;
    s->prev = NULL;

    /* cleanup already collected fragments */
    rt_ip_frag_invalidate_socket(s);

    /* free packets in incoming queue */
    while ((del = rtskb_dequeue(&s->incoming)) != NULL)
        kfree_rtskb(del);

    return 0;
}