static struct slab *get_slab(struct slab_cache *cache) { struct slab *slab; if (rb_empty(&cache->partial) && rb_empty(&cache->empty)) { slab = new_slab(cache); slab_insert(&cache->partial, slab); return slab; } if (rb_empty(&cache->partial)) return slab_entry(rb_root(&cache->empty)); return slab_entry(rb_root(&cache->partial)); }
/* * rb_erase - ring-buffer erase * * Rewind the buffer back one byte. * * returns: 1 - echo pointer was moved * 0 - echo pointer was not moved */ uint8_t rb_erase(struct ring_buffer * const rb) { uint8_t * put; if (rb_empty(rb)) return 0; /* space will be available for put */ clr_cantput(rb); /* update pointer */ put = rb->put; rb_dec_ptr(rb, put); rb->put = put; /* if can't echo on entry we backed over an echoed byte */ if (rb_cantecho(rb)) { rb->echo = put; if (put == rb->get) set_cantget(rb); return 1; } if (put == rb->echo) set_cantecho(rb); return 0; }
uchar8 rb_remove(RingBuffer * f) { if(rb_empty(f)) return 0; uchar8 tmp = f->buffer[f->rd_idx++]; f->rd_idx %= MAXSIZE; f->size--; return tmp; }
static void recv_sbd_ipc_frames(struct mem_link_device *mld, struct mem_snapshot *mst) { struct sbd_link_device *sl = &mld->sbd_link_dev; int i; for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, RX); if (unlikely(rb_empty(rb))) continue; if (likely(sipc_ps_ch(rb->ch))) rx_net_frames_from_rb(rb); else rx_ipc_frames_from_rb(rb); } }
/** @brief receive all @b IPC message frames in all RXQs In a for loop,\n 1) Checks any REQ_ACK received.\n 2) Receives all IPC link frames in every RXQ.\n 3) Sends RES_ACK if there was REQ_ACK from CP.\n 4) Checks any RES_ACK received.\n @param mld the pointer to a mem_link_device instance @param mst the pointer to a mem_snapshot instance */ void recv_sbd_ipc_frames(struct mem_link_device *mld) { struct sbd_link_device *sl = &mld->sbd_link_dev; int i; for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, RX); if (unlikely(rb_empty(rb))) continue; if (likely(sipc_ps_ch(rb->ch))) { #ifdef CONFIG_LINK_DEVICE_NAPI //mld->link_dev.disable_irq(&mld->link_dev); if (napi_schedule_prep(&rb->iod->napi)) __napi_schedule(&rb->iod->napi); #else rx_net_frames_from_rb(rb, 0); #endif } else { rx_ipc_frames_from_rb(rb); } } }
uint32_t rb_read(ring_t* rb, uint32_t *size, uint8_t* memory) { if (rb == NULL || rb->buffer_start == NULL) goto failed_end; if (1 == rb_empty(rb)) goto failed_end; rb->read_calls++;//if debug? uint32_t available_space = rb_free_space(rb); //Triple check! if ((rb->tail + *size) <= rb->head) { memcpy(memory, (rb->tail), *size); rb->tail = rb->tail + *size; if (rb->tail == rb->buffer_end) { rb->tail = rb->buffer_start; rb->round_complete = 0; } return 0; } else if (available_space < rb->size) { int32_t remaining_space = rb->buffer_end - rb->tail; if (remaining_space >= *size)//this shouldnt happen!this if should get caugth in the previous one { memcpy(memory, (rb->tail), *size); rb->tail += *size; if (rb->tail == rb->buffer_end) { rb->tail = rb->buffer_start; rb->round_complete = 0; } return *size; } else { memcpy(memory, (rb->tail), remaining_space); rb->tail += remaining_space; // if (rb->tail == rb->buffer_end) rb->tail = rb->buffer_start; else if (rb->tail > rb->buffer_end) LOG_PRINT("rb_read:Ups, this is bad!\n"); //get the missing asked size int32_t missing_size = *size - remaining_space; //get the ptr uint8_t* temp = (memory + remaining_space); memcpy(temp, rb->tail, missing_size);//cpy the lefted ptr rb->tail += missing_size; rb->round_complete = 0; return size; } } else { LOG_PRINT("rb_read:Nothing to Read from the buffer!\n"); } failed_end: *size = 0; return 0; }
static enum hrtimer_restart sbd_tx_timer_func(struct hrtimer *timer) { struct mem_link_device *mld; struct link_device *ld; struct modem_ctl *mc; struct sbd_link_device *sl; int i; bool need_schedule; u16 mask; unsigned long flags = 0; mld = container_of(timer, struct mem_link_device, sbd_tx_timer); ld = &mld->link_dev; mc = ld->mc; sl = &mld->sbd_link_dev; need_schedule = false; mask = 0; spin_lock_irqsave(&mc->lock, flags); if (unlikely(!ipc_active(mld))) { spin_unlock_irqrestore(&mc->lock, flags); goto exit; } spin_unlock_irqrestore(&mc->lock, flags); if (mld->link_active) { if (!mld->link_active(mld)) { need_schedule = true; goto exit; } } for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, TX); int ret; ret = tx_frames_to_rb(rb); if (unlikely(ret < 0)) { if (ret == -EBUSY || ret == -ENOSPC) { need_schedule = true; mask = MASK_SEND_DATA; continue; } else { modemctl_notify_event(MDM_CRASH_INVALID_RB); need_schedule = false; goto exit; } } if (ret > 0) mask = MASK_SEND_DATA; if (!skb_queue_empty(&rb->skb_q)) need_schedule = true; } if (!need_schedule) { for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb; rb = sbd_id2rb(sl, i, TX); if (!rb_empty(rb)) { need_schedule = true; break; } } } if (mask) { spin_lock_irqsave(&mc->lock, flags); if (unlikely(!ipc_active(mld))) { spin_unlock_irqrestore(&mc->lock, flags); need_schedule = false; goto exit; } send_ipc_irq(mld, mask2int(mask)); spin_unlock_irqrestore(&mc->lock, flags); } exit: if (need_schedule) { ktime_t ktime = ktime_set(0, ms2ns(TX_PERIOD_MS)); hrtimer_start(timer, ktime, HRTIMER_MODE_REL); } return HRTIMER_NORESTART; }