/** * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits * @link: Pointer to SMC link used for sending LLC control message. * @wr_buf: Out variable returning pointer to work request payload buffer. * @pend: Out variable returning pointer to private pending WR tracking. * It's the context the transmit complete handler will get. * * Reserves and pre-fills an entry for a pending work request send/tx. * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx. * Can sleep due to smc_get_ctrl_buf (if not in softirq context). * * Return: 0 on success, otherwise an error value. */ static int smc_llc_add_pending_send(struct smc_link *link, struct smc_wr_buf **wr_buf, struct smc_wr_tx_pend_priv **pend) { int rc; rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL, pend); if (rc < 0) return rc; BUILD_BUG_ON_MSG( sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE, "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)"); BUILD_BUG_ON_MSG( sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE, "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); BUILD_BUG_ON_MSG( sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)"); return 0; }
void __init mmu_partition_table_init(void) { unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; unsigned long ptcr; BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); partition_tb = __va(memblock_alloc_base(patb_size, patb_size, MEMBLOCK_ALLOC_ANYWHERE)); /* Initialize the Partition Table with no entries */ memset((void *)partition_tb, 0, patb_size); /* * update partition table control register, * 64 K size. */ ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); mtspr(SPRN_PTCR, ptcr); powernv_set_nmmu_ptcr(ptcr); }
/* * theory of operation: * * c_can core saves a received CAN message into the first free message * object it finds free (starting with the lowest). Bits NEWDAT and * INTPND are set for this message object indicating that a new message * has arrived. To work-around this issue, we keep two groups of message * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. * * We clear the newdat bit right away. * * This can result in packet reordering when the readout is slow. */ static int c_can_do_rx_poll(struct net_device *dev, int quota) { struct c_can_priv *priv = netdev_priv(dev); u32 pkts = 0, pend = 0, toread, n; /* * It is faster to read only one 16bit register. This is only possible * for a maximum number of 16 objects. */ BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16, "Implementation does not support more message objects than 16"); while (quota > 0) { if (!pend) { pend = c_can_get_pending(priv); if (!pend) break; /* * If the pending field has a gap, handle the * bits above the gap first. */ toread = c_can_adjust_pending(pend); } else { toread = pend; } /* Remove the bits from pend */ pend &= ~toread; /* Read the objects */ n = c_can_read_objects(dev, priv, toread, quota); pkts += n; quota -= n; } if (pkts) can_led_event(dev, CAN_LED_EVENT_RX); return pkts; }
/* * Callback for completed capture URB. */ static void audio_in_callback(struct urb *urb) { int i, index, length = 0, shutdown = 0; unsigned long flags; struct snd_line6_pcm *line6pcm = (struct snd_line6_pcm *)urb->context; line6pcm->in.last_frame = urb->start_frame; /* find index of URB */ for (index = 0; index < line6pcm->line6->iso_buffers; ++index) if (urb == line6pcm->in.urbs[index]) break; spin_lock_irqsave(&line6pcm->in.lock, flags); for (i = 0; i < LINE6_ISO_PACKETS; ++i) { char *fbuf; int fsize; struct usb_iso_packet_descriptor *fin = &urb->iso_frame_desc[i]; if (fin->status == -EXDEV) { shutdown = 1; break; } fbuf = urb->transfer_buffer + fin->offset; fsize = fin->actual_length; if (fsize > line6pcm->max_packet_size_in) { dev_err(line6pcm->line6->ifcdev, "driver and/or device bug: packet too large (%d > %d)\n", fsize, line6pcm->max_packet_size_in); } length += fsize; BUILD_BUG_ON_MSG(LINE6_ISO_PACKETS != 1, "The following code assumes LINE6_ISO_PACKETS == 1"); /* TODO: * Also, if iso_buffers != 2, the prev frame is almost random at * playback side. * This needs to be redesigned. It should be "stable", but we may * experience sync problems on such high-speed configs. */ line6pcm->prev_fbuf = fbuf; line6pcm->prev_fsize = fsize / (line6pcm->properties->bytes_per_channel * line6pcm->properties->capture_hw.channels_max); if (!test_bit(LINE6_STREAM_IMPULSE, &line6pcm->in.running) && test_bit(LINE6_STREAM_PCM, &line6pcm->in.running) && fsize > 0) line6_capture_copy(line6pcm, fbuf, fsize); } clear_bit(index, &line6pcm->in.active_urbs); if (test_and_clear_bit(index, &line6pcm->in.unlink_urbs)) shutdown = 1; if (!shutdown) { submit_audio_in_urb(line6pcm); if (!test_bit(LINE6_STREAM_IMPULSE, &line6pcm->in.running) && test_bit(LINE6_STREAM_PCM, &line6pcm->in.running)) line6_capture_check_period(line6pcm, length); } spin_unlock_irqrestore(&line6pcm->in.lock, flags); }