void IRC_Session::on_msgKicked(const QString& origin, const QString& channel, const QString& nick, const QString& message) { if(ChanList.contains(channel)) { ChanList[channel]->append("KICK",origin,message); if(nick == nick()) emit event_kicked(this,origin,channel); else emit event_kick(this,channel); } }
static int ol_tube_send(outlet_t *ol, int len, term_t reply_to) { assert(ol->tube != 0); tube_t *tb = ol->tube; tube_ring_t *ring = (tb->accepting) ?&tb->page->rx :&tb->page->tx; int tail = ring->tail; int tail1 = tube_ring_next(tail); if (tail1 == ring->head) return 0; // packet dropped if (tb->temp_buffer_used) { uint8_t *buf = (tb->accepting) ? tb->rx_buffers[tail] :tb->tx_buffers[tail]; memcpy(buf, tb->temp_send_buffer, len); tb->temp_buffer_used = 0; } ring->slots[tail].len = len; ring->tail = tail1; if (tb->accepting) event_kick(tb->evtchn_rx); else event_kick(tb->evtchn_tx); return 0; }
static void incoming(tube_ring_t *ring, uint8_t *bufs[TUBE_SLOTS], uint32_t kickme, outlet_t *ol) { int head = ring->head; if (head == ring->tail) return; do { uint8_t *packet = bufs[head]; int pkt_len = ring->slots[head].len; outlet_new_data(ol, packet, pkt_len); head = tube_ring_next(head); } while (head != ring->tail); ring->head = head; event_kick(kickme); }
void netfe_output(netfe_t *fe, uint8_t *packet, int pack_len) { assert(pack_len <= ETH_MTU +ETH_HDR_LEN +ETH_CSUM_LEN); assert(pack_len <= PAGE_SIZE); #ifdef EXP_LINC_LATENCY // see comment above if (pack_len >= 6 +6 +2 +20 && packet[6 +6 +2 +1] == 42) linc_output(fe->index); #endif // EXP_LINC_LATENCY if (fe->free_tx_head == NO_TX_BUFFER) { //printk("netfe_output: packet dropped [size %d]\n", pack_len); LINK_STATS_INC(link.drop); return; } int tx_buf = fe->free_tx_head; fe->free_tx_head = fe->free_tx_bufs[tx_buf]; uint8_t *p = fe->tx_buffers[tx_buf]; memcpy(p, packet, pack_len); RING_IDX prod = fe->tx_ring.req_prod_pvt; netif_tx_request_t *req = RING_GET_REQUEST(&fe->tx_ring, prod); req->gref = fe->tx_buf_refs[tx_buf]; req->id = tx_buf; req->offset = 0; req->flags = 0; req->size = pack_len; fe->tx_ring.req_prod_pvt = prod +1; wmb(); // dark int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&fe->tx_ring, notify); if (notify) event_kick(fe->evtchn); netfe_tx_buf_gc(fe); }
void console_write(const char *msg, int len) { static int was_cr = 0; int sent = 0; while (sent < len) { XENCONS_RING_IDX cons, prod; cons = console.intf->out_cons; rmb(); prod = console.intf->out_prod; // while ((sent < len) && (prod - cons < sizeof(console.intf->out))) // console.intf->out[MASK_XENCONS_IDX(prod++, console.intf->out)] = msg[sent++]; // // It may be possible to use stty or ESC sequence instead of this nastiness // while ((sent < len) && (prod - cons < sizeof(console.intf->out))) { if (msg[sent] == '\n' && !was_cr) { int idx = MASK_XENCONS_IDX(prod, console.intf->out); console.intf->out[idx] = '\r'; prod++; if (prod - cons >= sizeof(console.intf->out)) break; } was_cr = (msg[sent] == '\r'); int idx = MASK_XENCONS_IDX(prod, console.intf->out); console.intf->out[idx] = msg[sent++]; prod++; } console.intf->out_prod = prod; wmb(); event_kick(console.chan); } ssa(SYS_STATS_IO_OUTPUT, len); }
static void netfe_int(uint32_t port, void *data) { netfe_t *fe = (netfe_t *)data; assert(fe != 0); netfe_tx_buf_gc(fe); // A reponse may have NETRXF_more_data flag set. Such responses are buffered // instead of passing it to upper layer immediately. // static uint8_t chained_data_buffer[CHAINED_DATA_SIZE]; static int chained_data_offset = 0; // buffer is empty RING_IDX prod, cons; try_harder: prod = fe->rx_ring.sring->rsp_prod; rmb(); // magic cons = fe->rx_ring.rsp_cons; while (cons != prod) { netif_rx_response_t *rsp = RING_GET_RESPONSE(&fe->rx_ring, cons); //assert(rsp->id == (cons & (NR_RX_BUFFERS -1))); assert(rsp->status > 0); //assert(rsp->offset == 0); assert((rsp->flags & NETRXF_extra_info) == 0); uint8_t *data = fe->rx_buffers[rsp->id]; int data_len = rsp->status; if (chained_data_offset > 0 || (rsp->flags & NETRXF_more_data)) { assert(chained_data_offset +data_len <= CHAINED_DATA_SIZE); memcpy(chained_data_buffer +chained_data_offset, data, data_len); chained_data_offset += data_len; } if ((rsp->flags & NETRXF_more_data) == 0) { if (chained_data_offset > 0) { netfe_incoming(fe, chained_data_buffer, chained_data_offset); chained_data_offset = 0; } else netfe_incoming(fe, data, data_len); } cons++; } fe->rx_ring.rsp_cons = cons; int more; RING_FINAL_CHECK_FOR_RESPONSES(&fe->rx_ring, more); if (more) goto try_harder; int add_reqs = EXT_RX_BUFFERS - (fe->rx_ring.req_prod_pvt -fe->rx_ring.rsp_cons); //assert(add_reqs >= 0); RING_IDX req_prod = fe->rx_ring.req_prod_pvt; for (int i = 0; i < add_reqs; i++) { netif_rx_request_t *req = RING_GET_REQUEST(&fe->rx_ring, req_prod +i); req->id = (req_prod +i) & (NR_RX_BUFFERS -1); req->gref = fe->rx_buf_refs[req->id]; } wmb(); // dark fe->rx_ring.req_prod_pvt = req_prod +add_reqs; int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&fe->rx_ring, notify); if (notify) event_kick(fe->evtchn); }
void netfe_init(void) { int index = 0; netfe_t **link = &net_front_ends; while (1) { int n; char xs_key[256]; snprintf(xs_key, sizeof(xs_key), "device/vif/%d/backend-id", index); int rs = xenstore_read_int(&n, xs_key); if (rs != 0) break; // FE/(index) is present domid_t backend_id = (domid_t)n; netfe_t *fe = (netfe_t *)mm_alloc_pages(PSIZE(sizeof(netfe_t))); memset(fe, 0, sizeof(*fe)); // setup shared rings fe->rxs = (netif_rx_sring_t *)mm_alloc_page(); assert(fe->rxs != 0); fe->txs = (netif_tx_sring_t *)mm_alloc_page(); assert(fe->txs != 0); SHARED_RING_INIT(fe->rxs); SHARED_RING_INIT(fe->txs); FRONT_RING_INIT(&fe->rx_ring, fe->rxs, PAGE_SIZE); FRONT_RING_INIT(&fe->tx_ring, fe->txs, PAGE_SIZE); grants_allow_access(&fe->rx_ref, backend_id, virt_to_mfn(fe->rxs)); grants_allow_access(&fe->tx_ref, backend_id, virt_to_mfn(fe->txs)); // set up receive buffers for (int i = 0; i < NR_RX_BUFFERS; i++) { fe->rx_buffers[i] = mm_alloc_page(); assert(fe->rx_buffers[i] != 0); unsigned long mfn = virt_to_mfn(fe->rx_buffers[i]); grants_allow_access(&fe->rx_buf_refs[i], backend_id, mfn); } // set up send buffers fe->free_tx_head = NO_TX_BUFFER; for (int i = 0; i < NR_TX_BUFFERS; i++) { fe->tx_buffers[i] = mm_alloc_page(); assert(fe->tx_buffers[i] != 0); unsigned long mfn = virt_to_mfn(fe->tx_buffers[i]); grants_allow_access(&fe->tx_buf_refs[i], backend_id, mfn); fe->free_tx_bufs[i] = fe->free_tx_head; fe->free_tx_head = i; } // set up interrupt fe->evtchn = event_alloc_unbound(backend_id); event_bind(fe->evtchn, netfe_int, (void *)fe); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/rx-ring-ref", index); rs = xenstore_write_uint(xs_key, fe->rx_ref); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/tx-ring-ref", index); rs = xenstore_write_uint(xs_key, fe->tx_ref); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/event-channel", index); rs = xenstore_write_uint(xs_key, fe->evtchn); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/request-rx-copy", index); rs = xenstore_write(xs_key, "1"); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/feature-no-csum-offload", index); rs = xenstore_write(xs_key, "1"); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/feature-rx-notify", index); rs = xenstore_write(xs_key, "1"); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/state", index); rs = xenstore_write(xs_key, "4"); // XenbusStateConnected assert(rs == 0); // read MAC address char buf[64]; snprintf(xs_key, sizeof(xs_key), "device/vif/%d/mac", index); rs = xenstore_read(xs_key, buf, sizeof(buf)); assert(rs == 0); rs = parse_mac(buf, fe->mac); assert(rs == 0); fe->mac_len = ETH_ALEN; printk("\reth%d: MAC %02x:%02x:%02x:%02x:%02x:%02x\r\n", index, fe->mac[0], fe->mac[1], fe->mac[2], fe->mac[3], fe->mac[4], fe->mac[5]); // // Publish EXT_RX_BUFFERS requests only and replenish then to this number // during each interrupt handler invocation. // for (int i = 0; i < EXT_RX_BUFFERS; i++) { netif_rx_request_t *req = RING_GET_REQUEST(&fe->rx_ring, fe->rx_ring.req_prod_pvt); req->id = i; //rx_id++; req->gref = fe->rx_buf_refs[i]; fe->rx_ring.req_prod_pvt++; } RING_PUSH_REQUESTS(&fe->rx_ring); event_kick(fe->evtchn); fe->index = index++; //fe->next = 0; //fe->attached_lwip_netif = 0; //fe->attached_outlet = 0; // add to net_front_ends list *link = fe; link = &fe->next; } num_net_front_ends = index; }