/* ----------------------------------------------------------------------
 * remove the slot with the given address from the given non-wrap fifo.
 * intended for out-of-order operations
 *
 * the given address must belong to a slot between the read and write
 * slots, and be in the 'ready' state
 *
 * returns 0 on success, non-0 otherwise
 * -------------------------------------------------------------------- */
int32_t
non_wrap_fifo_remove( VCHI_NWFIFO_T *_fifo, void *address )
{
   NON_WRAP_FIFO_HANDLE_T *fifo = (NON_WRAP_FIFO_HANDLE_T *)_fifo;
   int32_t success = -1;
   uint32_t slot;

   os_semaphore_obtain( &fifo->sem );

   slot = fifo->read_slot;
   os_assert( fifo->base_address );

   while( slot != fifo->write_slot ) {

      if ( fifo->slot_info[slot].address == address && fifo->slot_info[slot].state == NON_WRAP_READY ) {
         // mark this as no longer being in use
         fifo->slot_info[slot].state = NON_WRAP_NOT_IN_USE;
         success = 0;
         break;
      }
      // increment and wrap our slot number
      slot = next_slot( fifo, slot );
   }

   // if the entry to be removed was the current read slot then we need to move on
   if ( slot == fifo->read_slot && success == 0 )
      fifo->read_slot = next_read_slot( fifo, slot );

   os_semaphore_release( &fifo->sem );
   return success;
}
void MainWindow::setup_shortcuts(){
	static struct Pair{
		const char *command;
		const char *slot;
	} pairs[] = {
#define SETUP_SHORTCUT(command, slot) { command, SLOT(slot)},
		SETUP_SHORTCUT(quit_command, quit_slot())
		SETUP_SHORTCUT(quit2_command, quit2_slot())
		SETUP_SHORTCUT(next_command, next_slot())
		SETUP_SHORTCUT(back_command, back_slot())
		SETUP_SHORTCUT(background_swap_command, background_swap_slot())
		SETUP_SHORTCUT(close_command, close_slot())
		SETUP_SHORTCUT(zoom_in_command, zoom_in_slot())
		SETUP_SHORTCUT(zoom_out_command, zoom_out_slot())
		SETUP_SHORTCUT(reset_zoom_command, reset_zoom_slot())
		SETUP_SHORTCUT(up_command, up_slot())
		SETUP_SHORTCUT(down_command, down_slot())
		SETUP_SHORTCUT(left_command, left_slot())
		SETUP_SHORTCUT(right_command, right_slot())
		SETUP_SHORTCUT(up_big_command, up_big_slot())
		SETUP_SHORTCUT(down_big_command, down_big_slot())
		SETUP_SHORTCUT(left_big_command, left_big_slot())
		SETUP_SHORTCUT(right_big_command, right_big_slot())
		SETUP_SHORTCUT(cycle_zoom_mode_command, cycle_zoom_mode_slot())
		SETUP_SHORTCUT(toggle_lock_zoom_command, toggle_lock_zoom_slot())
		SETUP_SHORTCUT(go_to_start_command, go_to_start())
		SETUP_SHORTCUT(go_to_end_command, go_to_end())
		SETUP_SHORTCUT(toggle_fullscreen_command, toggle_fullscreen())
		SETUP_SHORTCUT(rotate_left_command, rotate_left())
		SETUP_SHORTCUT(rotate_right_command, rotate_right())
		SETUP_SHORTCUT(rotate_left_fine_command, rotate_left_fine())
		SETUP_SHORTCUT(rotate_right_fine_command, rotate_right_fine())
		SETUP_SHORTCUT(flip_h_command, flip_h())
		SETUP_SHORTCUT(flip_v_command, flip_v())
		SETUP_SHORTCUT(minimize_command, minimize_slot())
		SETUP_SHORTCUT(minimize_all_command, minimize_all_slot())
		SETUP_SHORTCUT(show_options_command, show_options_dialog())
	};

	for (auto &c : this->connections)
		this->disconnect(c);
	
	this->connections.clear();
	this->shortcuts.clear();

	auto &shortcuts = this->app->get_shortcuts();
	for (auto &p : pairs){
		auto setting = shortcuts.get_shortcut_setting(p.command);
		if (!setting)
			continue;
		for (auto &seq : setting->sequences)
			this->setup_shortcut(seq, p.slot);
	}
}
示例#3
0
/* Request a slot for usage. */
static inline int request_slot(struct b43_dmaring *ring)
{
	int slot;

	B43_WARN_ON(!ring->tx);
	B43_WARN_ON(ring->stopped);
	B43_WARN_ON(free_slots(ring) == 0);

	slot = next_slot(ring, ring->current_slot);
	ring->current_slot = slot;
	ring->used_slots++;

	update_max_used_slots(ring, ring->used_slots);

	return slot;
}
示例#4
0
void b43_dma_rx(struct b43_dmaring *ring)
{
	const struct b43_dma_ops *ops = ring->ops;
	int slot, current_slot;
	int used_slots = 0;

	B43_WARN_ON(ring->tx);
	current_slot = ops->get_current_rxslot(ring);
	B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));

	slot = ring->current_slot;
	for (; slot != current_slot; slot = next_slot(ring, slot)) {
		dma_rx(ring, &slot);
		update_max_used_slots(ring, ++used_slots);
	}
	wmb();
	ops->set_current_rxslot(ring, slot);
	ring->current_slot = slot;
}
示例#5
0
文件: assembler.cpp 项目: jmgc/pyston
void Assembler::emitBatchPop(int scratch_rbp_offset, int scratch_size, const std::vector<GenericRegister>& to_push) {
    int offset = 0;

    for (const GenericRegister& r : to_push) {
        assert(scratch_size >= offset + 8);
        Indirect next_slot(RBP, offset + scratch_rbp_offset);

        if (r.type == GenericRegister::GP) {
            Register gp = r.gp;
            assert(gp.regnum >= 0 && gp.regnum < 16);
            movq(next_slot, gp);
            offset += 8;
        } else if (r.type == GenericRegister::XMM) {
            XMMRegister reg = r.xmm;
            movsd(next_slot, reg);
            offset += 8;
        } else {
            RELEASE_ASSERT(0, "%d", r.type);
        }
    }
}
示例#6
0
void Assembler::emitBatchPush(StackInfo stack_info, const std::vector<GenericRegister>& to_push) {
    assert(stack_info.has_scratch);
    int offset = 0;

    for (const GenericRegister& r : to_push) {
        assert(stack_info.scratch_bytes >= offset + 8);
        Indirect next_slot(RBP, offset + stack_info.scratch_rbp_offset);

        if (r.type == GenericRegister::GP) {
            Register gp = r.gp;
            assert(gp.regnum >= 0 && gp.regnum < 16);
            mov(gp, next_slot);
            offset += 8;
        } else if (r.type == GenericRegister::XMM) {
            XMMRegister reg = r.xmm;
            movsd(reg, next_slot);
            offset += 8;
        } else {
            RELEASE_ASSERT(0, "%d", r.type);
        }
    }
}
/* ----------------------------------------------------------------------
 * used in conjunction with non_wrap_fifo_request_write_address, to
 * signify completion
 *
 * NOTE: this routine will unlock the fifo!
 *
 * returns 0 on success, non-0 otherwise
 * -------------------------------------------------------------------- */
int32_t
non_wrap_fifo_write_complete( VCHI_NWFIFO_T *_fifo, void *address )
{
   NON_WRAP_FIFO_HANDLE_T *fifo = (NON_WRAP_FIFO_HANDLE_T *)_fifo;
   int32_t success = -1;

   os_assert( fifo->base_address );

   //os_semaphore_obtain( &fifo->sem );
   // it should be the case that the writes will complete sequentially
   // but we will verify it
   if ( fifo->slot_info[fifo->write_slot].address == address ) {
      // mark that this message has been written
      fifo->slot_info[fifo->write_slot].state = NON_WRAP_READY;
      // point to the next message
      fifo->write_slot = next_slot( fifo, fifo->write_slot );
      // success!
      success = 0;
   }
   os_assert( success == 0 );

   os_semaphore_release( &fifo->sem );
   return success;
}
示例#8
0
static void enter_mode_pcap_to_tx(struct mode *mode)
{
	int irq, ifindex, fd = 0, ret;
	unsigned int size, it = 0;
	struct ring tx_ring;
	struct frame_map *hdr;
	struct sock_fprog bpf_ops;
	struct tx_stats stats;
	uint8_t *out = NULL;

	if (!device_up_and_running(mode->device_out))
		panic("Device not up and running!\n");

	set_memcpy();
	tx_sock = pf_socket();

	if (!pcap_ops[mode->pcap])
		panic("pcap group not supported!\n");
	fd = open_or_die(mode->device_in, O_RDONLY | O_LARGEFILE | O_NOATIME);
	ret = pcap_ops[mode->pcap]->pull_file_header(fd);
	if (ret)
		panic("error reading pcap header!\n");
	if (pcap_ops[mode->pcap]->prepare_reading_pcap) {
		ret = pcap_ops[mode->pcap]->prepare_reading_pcap(fd);
		if (ret)
			panic("error prepare reading pcap!\n");
	}

	memset(&tx_ring, 0, sizeof(tx_ring));
	memset(&bpf_ops, 0, sizeof(bpf_ops));
	memset(&stats, 0, sizeof(stats));

	ifindex = device_ifindex(mode->device_out);
	size = ring_size(mode->device_out, mode->reserve_size);

	bpf_parse_rules(mode->filter, &bpf_ops);

	set_packet_loss_discard(tx_sock);
	setup_tx_ring_layout(tx_sock, &tx_ring, size, mode->jumbo_support);
	create_tx_ring(tx_sock, &tx_ring);
	mmap_tx_ring(tx_sock, &tx_ring);
	alloc_tx_ring_frames(&tx_ring);
	bind_tx_ring(tx_sock, &tx_ring, ifindex);

	dissector_init_all(mode->print_mode);

	if (mode->cpu >= 0 && ifindex > 0) {
		irq = device_irq_number(mode->device_out);
		device_bind_irq_to_cpu(mode->cpu, irq);
		printf("IRQ: %s:%d > CPU%d\n", mode->device_out, irq, 
		       mode->cpu);
	}

	if (mode->kpull)
		interval = mode->kpull;

	itimer.it_interval.tv_sec = 0;
	itimer.it_interval.tv_usec = interval;
	itimer.it_value.tv_sec = 0;
	itimer.it_value.tv_usec = interval;
	setitimer(ITIMER_REAL, &itimer, NULL); 

	printf("BPF:\n");
	bpf_dump_all(&bpf_ops);
	printf("MD: TX %luus %s\n\n", interval, pcap_ops[mode->pcap]->name);

	while (likely(sigint == 0)) {
		while (user_may_pull_from_tx(tx_ring.frames[it].iov_base)) {
			struct pcap_pkthdr phdr;
			hdr = tx_ring.frames[it].iov_base;
			/* Kernel assumes: data = ph.raw + po->tp_hdrlen -
			 * sizeof(struct sockaddr_ll); */
			out = ((uint8_t *) hdr) + TPACKET_HDRLEN -
			      sizeof(struct sockaddr_ll);

			do {
				ret = pcap_ops[mode->pcap]->read_pcap_pkt(fd, &phdr,
						out, ring_frame_size(&tx_ring));
				if (unlikely(ret <= 0))
					goto out;
			} while (mode->filter && !bpf_run_filter(&bpf_ops, out, phdr.len));
			pcap_pkthdr_to_tpacket_hdr(&phdr, &hdr->tp_h);

			stats.tx_bytes += hdr->tp_h.tp_len;;
			stats.tx_packets++;

			show_frame_hdr(hdr, mode->print_mode, RING_MODE_EGRESS);
			dissector_entry_point(out, hdr->tp_h.tp_snaplen,
					      mode->link_type);

			kernel_may_pull_from_tx(&hdr->tp_h);
			next_slot(&it, &tx_ring);

			if (unlikely(sigint == 1))
				break;
			if (frame_cnt_max != 0 &&
			    stats.tx_packets >= frame_cnt_max) {
				sigint = 1;
				break;
			}
		}
	}
out:
	fflush(stdout);
	printf("\n");
	printf("\r%12lu frames outgoing\n", stats.tx_packets);
	printf("\r%12lu bytes outgoing\n", stats.tx_bytes);

	dissector_cleanup_all();
	destroy_tx_ring(tx_sock, &tx_ring);

	close(tx_sock);
	if (pcap_ops[mode->pcap]->prepare_close_pcap)
		pcap_ops[mode->pcap]->prepare_close_pcap(fd, PCAP_MODE_READ);
	close(fd);
}
示例#9
0
static void dma_rx(struct b43_dmaring *ring, int *slot)
{
	const struct b43_dma_ops *ops = ring->ops;
	struct b43_dmadesc_generic *desc;
	struct b43_dmadesc_meta *meta;
	struct b43_rxhdr_fw4 *rxhdr;
	struct sk_buff *skb;
	u16 len;
	int err;
	dma_addr_t dmaaddr;

	desc = ops->idx2desc(ring, *slot, &meta);

	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
	skb = meta->skb;

	rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
	len = le16_to_cpu(rxhdr->frame_len);
	if (len == 0) {
		int i = 0;

		do {
			udelay(2);
			barrier();
			len = le16_to_cpu(rxhdr->frame_len);
		} while (len == 0 && i++ < 5);
		if (unlikely(len == 0)) {
			dmaaddr = meta->dmaaddr;
			goto drop_recycle_buffer;
		}
	}
	if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
		/* Something went wrong with the DMA.
		 * The device did not touch the buffer and did not overwrite the poison. */
		b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
		dmaaddr = meta->dmaaddr;
		goto drop_recycle_buffer;
	}
	if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
		/* The data did not fit into one descriptor buffer
		 * and is split over multiple buffers.
		 * This should never happen, as we try to allocate buffers
		 * big enough. So simply ignore this packet.
		 */
		int cnt = 0;
		s32 tmp = len;

		while (1) {
			desc = ops->idx2desc(ring, *slot, &meta);
			/* recycle the descriptor buffer. */
			b43_poison_rx_buffer(ring, meta->skb);
			sync_descbuffer_for_device(ring, meta->dmaaddr,
						   ring->rx_buffersize);
			*slot = next_slot(ring, *slot);
			cnt++;
			tmp -= ring->rx_buffersize;
			if (tmp <= 0)
				break;
		}
		b43err(ring->dev->wl, "DMA RX buffer too small "
		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
		       len, ring->rx_buffersize, cnt);
		goto drop;
	}

	dmaaddr = meta->dmaaddr;
	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
	if (unlikely(err)) {
		b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
		goto drop_recycle_buffer;
	}

	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
	skb_put(skb, len + ring->frameoffset);
	skb_pull(skb, ring->frameoffset);

	b43_rx(ring->dev, skb, rxhdr);
drop:
	return;

drop_recycle_buffer:
	/* Poison and recycle the RX buffer. */
	b43_poison_rx_buffer(ring, skb);
	sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
}
示例#10
0
void b43_dma_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	const struct b43_dma_ops *ops;
	struct b43_dmaring *ring;
	struct b43_dmadesc_meta *meta;
	static const struct b43_txstatus fake; /* filled with 0 */
	const struct b43_txstatus *txstat;
	int slot, firstused;
	bool frame_succeed;
	int skip;
	static u8 err_out1, err_out2;

	ring = parse_cookie(dev, status->cookie, &slot);
	if (unlikely(!ring))
		return;
	B43_WARN_ON(!ring->tx);

	/* Sanity check: TX packets are processed in-order on one ring.
	 * Check if the slot deduced from the cookie really is the first
	 * used slot. */
	firstused = ring->current_slot - ring->used_slots + 1;
	if (firstused < 0)
		firstused = ring->nr_slots + firstused;

	skip = 0;
	if (unlikely(slot != firstused)) {
		/* This possibly is a firmware bug and will result in
		 * malfunction, memory leaks and/or stall of DMA functionality.
		 */
		if (slot == next_slot(ring, next_slot(ring, firstused))) {
			/* If a single header/data pair was missed, skip over
			 * the first two slots in an attempt to recover.
			 */
			slot = firstused;
			skip = 2;
			if (!err_out1) {
				/* Report the error once. */
				b43dbg(dev->wl,
				       "Skip on DMA ring %d slot %d.\n",
				       ring->index, slot);
				err_out1 = 1;
			}
		} else {
			/* More than a single header/data pair were missed.
			 * Report this error once.
			 */
			if (!err_out2)
				b43dbg(dev->wl,
				       "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
				       ring->index, firstused, slot);
			err_out2 = 1;
			return;
		}
	}

	ops = ring->ops;
	while (1) {
		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
		/* get meta - ignore returned value */
		ops->idx2desc(ring, slot, &meta);

		if (b43_dma_ptr_is_poisoned(meta->skb)) {
			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
			       "on ring %d\n",
			       slot, firstused, ring->index);
			break;
		}

		if (meta->skb) {
			struct b43_private_tx_info *priv_info =
			     b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));

			unmap_descbuffer(ring, meta->dmaaddr,
					 meta->skb->len, 1);
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
		} else {
			unmap_descbuffer(ring, meta->dmaaddr,
					 b43_txhdr_size(dev), 1);
		}

		if (meta->is_last_fragment) {
			struct ieee80211_tx_info *info;

			if (unlikely(!meta->skb)) {
				/* This is a scatter-gather fragment of a frame,
				 * so the skb pointer must not be NULL.
				 */
				b43dbg(dev->wl, "TX status unexpected NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}

			info = IEEE80211_SKB_CB(meta->skb);

			/*
			 * Call back to inform the ieee80211 subsystem about
			 * the status of the transmission. When skipping over
			 * a missed TX status report, use a status structure
			 * filled with zeros to indicate that the frame was not
			 * sent (frame_count 0) and not acknowledged
			 */
			if (unlikely(skip))
				txstat = &fake;
			else
				txstat = status;

			frame_succeed = b43_fill_txstatus_report(dev, info,
								 txstat);
#ifdef CPTCFG_B43_DEBUG
			if (frame_succeed)
				ring->nr_succeed_tx_packets++;
			else
				ring->nr_failed_tx_packets++;
			ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
			ieee80211_tx_status(dev->wl->hw, meta->skb);

			/* skb will be freed by ieee80211_tx_status().
			 * Poison our pointer. */
			meta->skb = B43_DMA_PTR_POISON;
		} else {
			/* No need to call free_descriptor_buffer here, as
			 * this is only the txhdr, which is not allocated.
			 */
			if (unlikely(meta->skb)) {
				b43dbg(dev->wl, "TX status unexpected non-NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}
		}

		/* Everything unmapped and free'd. So it's not used anymore. */
		ring->used_slots--;

		if (meta->is_last_fragment && !skip) {
			/* This is the last scatter-gather
			 * fragment of the frame. We are done. */
			break;
		}
		slot = next_slot(ring, slot);
		if (skip > 0)
			--skip;
	}
	if (ring->stopped) {
		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
		ring->stopped = false;
	}

	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
	} else {
		/* If the driver queue is running wake the corresponding
		 * mac80211 queue. */
		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
		}
	}
	/* Add work to the queue. */
	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
示例#11
0
static int dma_tx_fragment(struct b43_dmaring *ring,
			   struct sk_buff *skb)
{
	const struct b43_dma_ops *ops = ring->ops;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
	u8 *header;
	int slot, old_top_slot, old_used_slots;
	int err;
	struct b43_dmadesc_generic *desc;
	struct b43_dmadesc_meta *meta;
	struct b43_dmadesc_meta *meta_hdr;
	u16 cookie;
	size_t hdrsize = b43_txhdr_size(ring->dev);

	/* Important note: If the number of used DMA slots per TX frame
	 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
	 * the file has to be updated, too!
	 */

	old_top_slot = ring->current_slot;
	old_used_slots = ring->used_slots;

	/* Get a slot for the header. */
	slot = request_slot(ring);
	desc = ops->idx2desc(ring, slot, &meta_hdr);
	memset(meta_hdr, 0, sizeof(*meta_hdr));

	header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
	cookie = generate_cookie(ring, slot);
	err = b43_generate_txhdr(ring->dev, header,
				 skb, info, cookie);
	if (unlikely(err)) {
		ring->current_slot = old_top_slot;
		ring->used_slots = old_used_slots;
		return err;
	}

	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
					   hdrsize, 1);
	if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
		ring->current_slot = old_top_slot;
		ring->used_slots = old_used_slots;
		return -EIO;
	}
	ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
			     hdrsize, 1, 0, 0);

	/* Get a slot for the payload. */
	slot = request_slot(ring);
	desc = ops->idx2desc(ring, slot, &meta);
	memset(meta, 0, sizeof(*meta));

	meta->skb = skb;
	meta->is_last_fragment = true;
	priv_info->bouncebuffer = NULL;

	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
	/* create a bounce buffer in zone_dma on mapping failure. */
	if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
		priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
						  GFP_ATOMIC | GFP_DMA);
		if (!priv_info->bouncebuffer) {
			ring->current_slot = old_top_slot;
			ring->used_slots = old_used_slots;
			err = -ENOMEM;
			goto out_unmap_hdr;
		}

		meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
			ring->current_slot = old_top_slot;
			ring->used_slots = old_used_slots;
			err = -EIO;
			goto out_unmap_hdr;
		}
	}

	ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);

	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		/* Tell the firmware about the cookie of the last
		 * mcast frame, so it can clear the more-data bit in it. */
		b43_shm_write16(ring->dev, B43_SHM_SHARED,
				B43_SHM_SH_MCASTCOOKIE, cookie);
	}
	/* Now transfer the whole frame. */
	wmb();
	ops->poke_tx(ring, next_slot(ring, slot));
	return 0;

out_unmap_hdr:
	unmap_descbuffer(ring, meta_hdr->dmaaddr,
			 hdrsize, 1);
	return err;
}
示例#12
0
static int dma_tx_fragment(struct b43_dmaring *ring,
			   struct sk_buff *skb)
{
	const struct b43_dma_ops *ops = ring->ops;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
	u8 *header;
	int slot, old_top_slot, old_used_slots;
	int err;
	struct b43_dmadesc_generic *desc;
	struct b43_dmadesc_meta *meta;
	struct b43_dmadesc_meta *meta_hdr;
	u16 cookie;
	size_t hdrsize = b43_txhdr_size(ring->dev);


	old_top_slot = ring->current_slot;
	old_used_slots = ring->used_slots;

	
	slot = request_slot(ring);
	desc = ops->idx2desc(ring, slot, &meta_hdr);
	memset(meta_hdr, 0, sizeof(*meta_hdr));

	header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
	cookie = generate_cookie(ring, slot);
	err = b43_generate_txhdr(ring->dev, header,
				 skb, info, cookie);
	if (unlikely(err)) {
		ring->current_slot = old_top_slot;
		ring->used_slots = old_used_slots;
		return err;
	}

	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
					   hdrsize, 1);
	if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
		ring->current_slot = old_top_slot;
		ring->used_slots = old_used_slots;
		return -EIO;
	}
	ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
			     hdrsize, 1, 0, 0);

	
	slot = request_slot(ring);
	desc = ops->idx2desc(ring, slot, &meta);
	memset(meta, 0, sizeof(*meta));

	meta->skb = skb;
	meta->is_last_fragment = true;
	priv_info->bouncebuffer = NULL;

	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
	
	if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
		priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
						  GFP_ATOMIC | GFP_DMA);
		if (!priv_info->bouncebuffer) {
			ring->current_slot = old_top_slot;
			ring->used_slots = old_used_slots;
			err = -ENOMEM;
			goto out_unmap_hdr;
		}

		meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
			ring->current_slot = old_top_slot;
			ring->used_slots = old_used_slots;
			err = -EIO;
			goto out_unmap_hdr;
		}
	}

	ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);

	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		b43_shm_write16(ring->dev, B43_SHM_SHARED,
				B43_SHM_SH_MCASTCOOKIE, cookie);
	}
	
	wmb();
	ops->poke_tx(ring, next_slot(ring, slot));
	return 0;

out_unmap_hdr:
	unmap_descbuffer(ring, meta_hdr->dmaaddr,
			 hdrsize, 1);
	return err;
}
示例#13
0
int main(int argc, char** argv) {
  QApplication tea(argc, argv);

  daemon::Player* daemon_player = new daemon::Player(argc, argv);

  face::Player* face_player = new face::Player(argc, argv);
  face::Playlist* face_playlist = new face::Playlist(argc, argv);
  face::Talk* face_talk = new face::Talk(argc, argv);

  QObject::connect(daemon_player, SIGNAL(thumb_up_signal(int)),
		   face_player, SLOT(thumb_up_slot(int)));
  QObject::connect(daemon_player, SIGNAL(thumb_down_signal(int)),
		   face_player, SLOT(thumb_down_slot(int)));
  QObject::connect(daemon_player, SIGNAL(search_signal(const QUrl&)),
		   face_playlist, SLOT(search_slot(const QUrl&)));
  QObject::connect(daemon_player, SIGNAL(done_signal(bool)),
		   face_playlist, SLOT(done_slot(bool)));
  QObject::connect(daemon_player, SIGNAL(duration_signal(int)),
		   face_player, SLOT(duration_slot(int)));
  QObject::connect(daemon_player, SIGNAL(tick_signal(int)),
		   face_player, SLOT(tick_slot(int)));
  QObject::connect(daemon_player, SIGNAL(title_signal(const QString&)),
		   face_player, SLOT(title_slot(const QString&)));
  QObject::connect(daemon_player, SIGNAL(favorite_signal(bool)),
		   face_player, SLOT(favorite_slot(bool)));
  QObject::connect(daemon_player, SIGNAL(title_signal(const QString&)),
		   face_talk, SLOT(title_slot(const QString&)));
  QObject::connect(daemon_player, SIGNAL(comments_signal(const QStringList&, const QStringList&)),
		   face_talk, SLOT(comments_slot(const QStringList&, const QStringList&)));

  QObject::connect(face_player, SIGNAL(open_signal(const QUrl&, bool)),
 		   daemon_player, SLOT(open_slot(const QUrl&, bool)));
  QObject::connect(face_player, SIGNAL(open_signal(const QUrl&, bool)),
		   face_playlist, SLOT(open_slot(const QUrl&, bool)));
  QObject::connect(face_player, SIGNAL(thumb_up_signal()), 
		   daemon_player, SLOT(thumb_up_slot()));
  QObject::connect(face_player, SIGNAL(thumb_down_signal()), 
		   daemon_player, SLOT(thumb_down_slot()));
  QObject::connect(face_player, SIGNAL(talk_signal(bool, int, int)), 
		   face_talk, SLOT(show_it(bool, int, int)));
  QObject::connect(face_player, SIGNAL(search_signal(const QString&)), 
		   daemon_player, SLOT(search_slot(const QString&)));
  QObject::connect(face_player, SIGNAL(playlist_signal(bool, int, int)), 
		   face_playlist, SLOT(show_it(bool, int, int)));
  QObject::connect(face_player, SIGNAL(play_signal()),
		   daemon_player, SLOT(play_slot()));
  QObject::connect(face_player, SIGNAL(pause_signal()),
		   daemon_player, SLOT(pause_slot()));
  QObject::connect(face_player, SIGNAL(volume_signal(int)),
		   daemon_player, SLOT(volume_slot(int)));
  QObject::connect(face_player, SIGNAL(previous_signal()),
		   face_playlist, SLOT(previous_slot()));
  QObject::connect(face_player, SIGNAL(next_signal()),
		   face_playlist, SLOT(next_slot()));
  QObject::connect(face_player, SIGNAL(normal_signal()),
		   face_playlist, SLOT(normal_slot())); 
  QObject::connect(face_player, SIGNAL(repeat_signal()),
		   face_playlist, SLOT(repeat_slot())); 
  QObject::connect(face_player, SIGNAL(shuffle_signal()),
		   face_playlist, SLOT(shuffle_slot())); 
  QObject::connect(face_player, SIGNAL(position_signal(int)),
		   daemon_player, SLOT(position_slot(int))); 
  QObject::connect(face_player, SIGNAL(favorite_signal()),
		   daemon_player, SLOT(favorite_slot())); 
  QObject::connect(face_player, SIGNAL(complete_signal(const QString, QStringList&)),
		   daemon_player, SLOT(complete_slot(const QString, QStringList&)),
		   Qt::DirectConnection);

  QObject::connect(face_playlist, SIGNAL(play_signal(const QString&)),
		   face_player, SLOT(play_slot(const QString&)));
  QObject::connect(face_playlist, SIGNAL(clos()),
		   face_player, SLOT(playlist_closed()));

  QObject::connect(face_talk, SIGNAL(comment_signal(const QString&, const QString&)),
		   daemon_player, SLOT(comment_slot(const QString&, const QString&)));
  QObject::connect(face_talk, SIGNAL(clos()),
		   face_player, SLOT(talk_closed()));


  face_player->show();

  return tea.exec();
}
示例#14
0
static void dma_rx(struct b43_dmaring *ring, int *slot)
{
	const struct b43_dma_ops *ops = ring->ops;
	struct b43_dmadesc_generic *desc;
	struct b43_dmadesc_meta *meta;
	struct b43_rxhdr_fw4 *rxhdr;
	struct sk_buff *skb;
	u16 len;
	int err;
	dma_addr_t dmaaddr;

	desc = ops->idx2desc(ring, *slot, &meta);

	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
	skb = meta->skb;

	rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
	len = le16_to_cpu(rxhdr->frame_len);
	if (len == 0) {
		int i = 0;

		do {
			udelay(2);
			barrier();
			len = le16_to_cpu(rxhdr->frame_len);
		} while (len == 0 && i++ < 5);
		if (unlikely(len == 0)) {
			dmaaddr = meta->dmaaddr;
			goto drop_recycle_buffer;
		}
	}
	if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
		b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
		dmaaddr = meta->dmaaddr;
		goto drop_recycle_buffer;
	}
	if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
		int cnt = 0;
		s32 tmp = len;

		while (1) {
			desc = ops->idx2desc(ring, *slot, &meta);
			
			b43_poison_rx_buffer(ring, meta->skb);
			sync_descbuffer_for_device(ring, meta->dmaaddr,
						   ring->rx_buffersize);
			*slot = next_slot(ring, *slot);
			cnt++;
			tmp -= ring->rx_buffersize;
			if (tmp <= 0)
				break;
		}
		b43err(ring->dev->wl, "DMA RX buffer too small "
		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
		       len, ring->rx_buffersize, cnt);
		goto drop;
	}

	dmaaddr = meta->dmaaddr;
	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
	if (unlikely(err)) {
		b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
		goto drop_recycle_buffer;
	}

	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
	skb_put(skb, len + ring->frameoffset);
	skb_pull(skb, ring->frameoffset);

	b43_rx(ring->dev, skb, rxhdr);
drop:
	return;

drop_recycle_buffer:
	
	b43_poison_rx_buffer(ring, skb);
	sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
}
示例#15
0
/* If netsniff-ngs in device is on a tap, it can efficiently filter out 
 * some interesting packets and give them to the out device for testing
 * or debugging for instance. */
static void enter_mode_rx_to_tx(struct mode *mode)
{
	int rx_sock, ifindex_in, ifindex_out;
	unsigned int size_in, size_out, it_in = 0, it_out = 0;
	unsigned long fcnt = 0;
	uint8_t *in, *out;
	short ifflags = 0;
	struct frame_map *hdr_in, *hdr_out;
	struct ring tx_ring;
	struct ring rx_ring;
	struct pollfd rx_poll;
	struct sock_fprog bpf_ops;

	if (!strncmp(mode->device_in, mode->device_out,
		     strlen(mode->device_in)))
		panic("Ingress/egress devices must be different!\n");
	if (!device_up_and_running(mode->device_out))
		panic("Egress device not up and running!\n");
	if (!device_up_and_running(mode->device_in))
		panic("Ingress device not up and running!\n");

	set_memcpy();
	rx_sock = pf_socket();
	tx_sock = pf_socket();

	memset(&tx_ring, 0, sizeof(tx_ring));
	memset(&rx_ring, 0, sizeof(rx_ring));
	memset(&rx_poll, 0, sizeof(rx_poll));
	memset(&bpf_ops, 0, sizeof(bpf_ops));

	ifindex_in = device_ifindex(mode->device_in);
	size_in = ring_size(mode->device_in, mode->reserve_size);

	ifindex_out = device_ifindex(mode->device_out);
	size_out = ring_size(mode->device_out, mode->reserve_size);

	enable_kernel_bpf_jit_compiler();
	bpf_parse_rules(mode->filter, &bpf_ops);
	bpf_attach_to_sock(rx_sock, &bpf_ops);

	setup_rx_ring_layout(rx_sock, &rx_ring, size_in, mode->jumbo_support);
	create_rx_ring(rx_sock, &rx_ring);
	mmap_rx_ring(rx_sock, &rx_ring);
	alloc_rx_ring_frames(&rx_ring);
	bind_rx_ring(rx_sock, &rx_ring, ifindex_in);
	prepare_polling(rx_sock, &rx_poll);

	set_packet_loss_discard(tx_sock);
	setup_tx_ring_layout(tx_sock, &tx_ring, size_out, mode->jumbo_support);
	create_tx_ring(tx_sock, &tx_ring);
	mmap_tx_ring(tx_sock, &tx_ring);
	alloc_tx_ring_frames(&tx_ring);
	bind_tx_ring(tx_sock, &tx_ring, ifindex_out);

	mt_init_by_seed_time();
	dissector_init_all(mode->print_mode);

	 if (mode->promiscuous == true) {
		ifflags = enter_promiscuous_mode(mode->device_in);
		printf("PROMISC\n");
	}

	if (mode->kpull)
		interval = mode->kpull;

	itimer.it_interval.tv_sec = 0;
	itimer.it_interval.tv_usec = interval;
	itimer.it_value.tv_sec = 0;
	itimer.it_value.tv_usec = interval;
	setitimer(ITIMER_REAL, &itimer, NULL);

	printf("BPF:\n");
	bpf_dump_all(&bpf_ops);
	printf("MD: RXTX %luus\n\n", interval);
	printf("Running! Hang up with ^C!\n\n");

	while (likely(sigint == 0)) {
		while (user_may_pull_from_rx(rx_ring.frames[it_in].iov_base)) {
			hdr_in = rx_ring.frames[it_in].iov_base;
			in = ((uint8_t *) hdr_in) + hdr_in->tp_h.tp_mac;
			fcnt++;
			if (mode->packet_type != PACKET_ALL)
				if (mode->packet_type != hdr_in->s_ll.sll_pkttype)
					goto next;

			hdr_out = tx_ring.frames[it_out].iov_base;
			out = ((uint8_t *) hdr_out) + TPACKET_HDRLEN -
			      sizeof(struct sockaddr_ll);

			/* If we cannot pull, look for a different slot. */
			for (; !user_may_pull_from_tx(tx_ring.frames[it_out].iov_base) &&
			       likely(!sigint);) {
				if (mode->randomize)
					next_rnd_slot(&it_out, &tx_ring);
				else
					next_slot(&it_out, &tx_ring);
				hdr_out = tx_ring.frames[it_out].iov_base;
				out = ((uint8_t *) hdr_out) + TPACKET_HDRLEN -
				      sizeof(struct sockaddr_ll);
			}

			tpacket_hdr_clone(&hdr_out->tp_h, &hdr_in->tp_h);
			__memcpy(out, in, hdr_in->tp_h.tp_len);

			kernel_may_pull_from_tx(&hdr_out->tp_h);
			if (mode->randomize)
				next_rnd_slot(&it_out, &tx_ring);
			else
				next_slot(&it_out, &tx_ring);

			/* Should actually be avoided ... */
			show_frame_hdr(hdr_in, mode->print_mode, RING_MODE_INGRESS);
			dissector_entry_point(in, hdr_in->tp_h.tp_snaplen,
					      mode->link_type);

			if (frame_cnt_max != 0 && fcnt >= frame_cnt_max) {
				sigint = 1;
				break;
			}
next:
			kernel_may_pull_from_rx(&hdr_in->tp_h);
			next_slot(&it_in, &rx_ring);

			if (unlikely(sigint == 1))
				goto out;
		}

		poll(&rx_poll, 1, -1);
		poll_error_maybe_die(rx_sock, &rx_poll);
	}
out:
	sock_print_net_stats(rx_sock);

	dissector_cleanup_all();
	destroy_tx_ring(tx_sock, &tx_ring);
	destroy_rx_ring(rx_sock, &rx_ring);

	if (mode->promiscuous == true)
		leave_promiscuous_mode(mode->device_in, ifflags);

	close(tx_sock);
	close(rx_sock);
}
示例#16
0
static void enter_mode_rx_only_or_dump(struct mode *mode)
{
	int sock, irq, ifindex, fd = 0, ret;
	unsigned int size, it = 0;
	unsigned long fcnt = 0;
	short ifflags = 0;
	uint8_t *packet;
	struct ring rx_ring;
	struct pollfd rx_poll;
	struct frame_map *hdr;
	struct sock_fprog bpf_ops;

	if (!device_up_and_running(mode->device_in))
		panic("Device not up and running!\n");

	set_memcpy();
	sock = pf_socket();

	if (mode->dump) {
		struct stat tmp;
		memset(&tmp, 0, sizeof(tmp));
		ret = stat(mode->device_out, &tmp);
		if (ret < 0) {
			mode->dump_dir = 0;
			goto try_file;
		}
		mode->dump_dir = !!S_ISDIR(tmp.st_mode);
		if (mode->dump_dir) {
			fd = begin_multi_pcap_file(mode);
		} else {
try_file:
			fd = begin_single_pcap_file(mode);
		}
	}

	memset(&rx_ring, 0, sizeof(rx_ring));
	memset(&rx_poll, 0, sizeof(rx_poll));
	memset(&bpf_ops, 0, sizeof(bpf_ops));

	ifindex = device_ifindex(mode->device_in);
	size = ring_size(mode->device_in, mode->reserve_size);

	enable_kernel_bpf_jit_compiler();
	bpf_parse_rules(mode->filter, &bpf_ops);
	bpf_attach_to_sock(sock, &bpf_ops);

	setup_rx_ring_layout(sock, &rx_ring, size, mode->jumbo_support);
	create_rx_ring(sock, &rx_ring);
	mmap_rx_ring(sock, &rx_ring);
	alloc_rx_ring_frames(&rx_ring);
	bind_rx_ring(sock, &rx_ring, ifindex);

	prepare_polling(sock, &rx_poll);
	dissector_init_all(mode->print_mode);

	if (mode->cpu >= 0 && ifindex > 0) {
		irq = device_irq_number(mode->device_in);
		device_bind_irq_to_cpu(mode->cpu, irq);
		printf("IRQ: %s:%d > CPU%d\n", mode->device_in, irq, 
		       mode->cpu);
	}

	if (mode->promiscuous == true) {
		ifflags = enter_promiscuous_mode(mode->device_in);
		printf("PROMISC\n");
	}

	printf("BPF:\n");
	bpf_dump_all(&bpf_ops);
	printf("MD: RX %s\n\n", mode->dump ? pcap_ops[mode->pcap]->name : "");

	while (likely(sigint == 0)) {
		while (user_may_pull_from_rx(rx_ring.frames[it].iov_base)) {
			hdr = rx_ring.frames[it].iov_base;
			packet = ((uint8_t *) hdr) + hdr->tp_h.tp_mac;
			fcnt++;

			if (mode->packet_type != PACKET_ALL)
				if (mode->packet_type != hdr->s_ll.sll_pkttype)
					goto next;
			if (unlikely(rx_ring.layout.tp_frame_size <
				     hdr->tp_h.tp_snaplen)) {
				fprintf(stderr, "Skipping too large packet! "
					"No jumbo support selected?\n");
				fflush(stderr);
				goto next;
			}
			if (mode->dump) {
				struct pcap_pkthdr phdr;
				tpacket_hdr_to_pcap_pkthdr(&hdr->tp_h, &phdr);
				ret = pcap_ops[mode->pcap]->write_pcap_pkt(fd, &phdr,
									   packet, phdr.len);
				if (unlikely(ret != sizeof(phdr) + phdr.len))
					panic("Write error to pcap!\n");
			}

			show_frame_hdr(hdr, mode->print_mode, RING_MODE_INGRESS);
			dissector_entry_point(packet, hdr->tp_h.tp_snaplen,
					      mode->link_type);

			if (frame_cnt_max != 0 && fcnt >= frame_cnt_max) {
				sigint = 1;
				break;
			}
next:
			kernel_may_pull_from_rx(&hdr->tp_h);
			next_slot(&it, &rx_ring);

			if (unlikely(sigint == 1))
				break;
			if (mode->dump && next_dump) {
				struct tpacket_stats kstats;
				socklen_t slen = sizeof(kstats);
				memset(&kstats, 0, sizeof(kstats));
				getsockopt(sock, SOL_PACKET, PACKET_STATISTICS,
					   &kstats, &slen);
				fd = next_multi_pcap_file(mode, fd);
				next_dump = false;
				if (mode->print_mode == FNTTYPE_PRINT_NONE) {
					printf(".(+%u/-%u)", kstats.tp_packets - kstats.tp_drops,
					       kstats.tp_drops);
					fflush(stdout);
				}
			}
		}

		poll(&rx_poll, 1, -1);
		poll_error_maybe_die(sock, &rx_poll);
	}

	if (!(mode->dump_dir && mode->print_mode == FNTTYPE_PRINT_NONE))
		sock_print_net_stats(sock);
	else {
		printf("\n\n");
		fflush(stdout);
	}
	dissector_cleanup_all();
	destroy_rx_ring(sock, &rx_ring);
	if (mode->promiscuous == true)
		leave_promiscuous_mode(mode->device_in, ifflags);
	close(sock);
	if (mode->dump) {
		if (mode->dump_dir)
			finish_multi_pcap_file(mode, fd);
		else
			finish_single_pcap_file(mode, fd);
	}
}
示例#17
0
void b43_dma_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	const struct b43_dma_ops *ops;
	struct b43_dmaring *ring;
	struct b43_dmadesc_meta *meta;
	int slot, firstused;
	bool frame_succeed;

	ring = parse_cookie(dev, status->cookie, &slot);
	if (unlikely(!ring))
		return;
	B43_WARN_ON(!ring->tx);

	/* Sanity check: TX packets are processed in-order on one ring.
	 * Check if the slot deduced from the cookie really is the first
	 * used slot. */
	firstused = ring->current_slot - ring->used_slots + 1;
	if (firstused < 0)
		firstused = ring->nr_slots + firstused;
	if (unlikely(slot != firstused)) {
		/* This possibly is a firmware bug and will result in
		 * malfunction, memory leaks and/or stall of DMA functionality. */
		b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
		       "Expected %d, but got %d\n",
		       ring->index, firstused, slot);
		return;
	}

	ops = ring->ops;
	while (1) {
		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
		/* get meta - ignore returned value */
		ops->idx2desc(ring, slot, &meta);

		if (b43_dma_ptr_is_poisoned(meta->skb)) {
			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
			       "on ring %d\n",
			       slot, firstused, ring->index);
			break;
		}
		if (meta->skb) {
			struct b43_private_tx_info *priv_info =
				b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));

			unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
		} else {
			unmap_descbuffer(ring, meta->dmaaddr,
					 b43_txhdr_size(dev), 1);
		}

		if (meta->is_last_fragment) {
			struct ieee80211_tx_info *info;

			if (unlikely(!meta->skb)) {
				/* This is a scatter-gather fragment of a frame, so
				 * the skb pointer must not be NULL. */
				b43dbg(dev->wl, "TX status unexpected NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}

			info = IEEE80211_SKB_CB(meta->skb);

			/*
			 * Call back to inform the ieee80211 subsystem about
			 * the status of the transmission.
			 */
			frame_succeed = b43_fill_txstatus_report(dev, info, status);
#ifdef CONFIG_B43_DEBUG
			if (frame_succeed)
				ring->nr_succeed_tx_packets++;
			else
				ring->nr_failed_tx_packets++;
			ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
			ieee80211_tx_status(dev->wl->hw, meta->skb);

			/* skb will be freed by ieee80211_tx_status().
			 * Poison our pointer. */
			meta->skb = B43_DMA_PTR_POISON;
		} else {
			/* No need to call free_descriptor_buffer here, as
			 * this is only the txhdr, which is not allocated.
			 */
			if (unlikely(meta->skb)) {
				b43dbg(dev->wl, "TX status unexpected non-NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}
		}

		/* Everything unmapped and free'd. So it's not used anymore. */
		ring->used_slots--;

		if (meta->is_last_fragment) {
			/* This is the last scatter-gather
			 * fragment of the frame. We are done. */
			break;
		}
		slot = next_slot(ring, slot);
	}
	if (ring->stopped) {
		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
		ring->stopped = 0;
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
		}
	}
}