static gboolean giop_recv_buffer_demarshal_reply_1_2(GIOPRecvBuffer *buf) { gboolean do_bswap = giop_msg_conversion_needed(buf); buf->cur = ALIGN_ADDRESS(buf->cur, 4); if((buf->cur + 8) > buf->end) return TRUE; if(do_bswap) { buf->msg.u.reply_1_2.request_id = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); buf->cur += 4; buf->msg.u.reply_1_2.reply_status = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); } else { buf->msg.u.reply_1_2.request_id = *((guint32 *)buf->cur); buf->cur += 4; buf->msg.u.reply_1_2.reply_status = *((guint32 *)buf->cur); } buf->cur += 4; buf->msg.u.reply_1_2.service_context._buffer = NULL; if(giop_IOP_ServiceContextList_demarshal(buf, &buf->msg.u.reply_1_2.service_context)) return TRUE; buf->cur = ALIGN_ADDRESS(buf->cur, 8); return FALSE; }
void _ORBIT_skel_Freeamp_PlayNumber(POA_Freeamp * _ORBIT_servant, GIOPRecvBuffer * _ORBIT_recv_buffer, CORBA_Environment * ev, void (*_impl_PlayNumber) (PortableServer_Servant _servant, const CORBA_char * number, CORBA_Environment * ev)) { CORBA_char *number; { /* demarshalling */ guchar *_ORBIT_curptr; register CORBA_unsigned_long _ORBIT_tmpvar_2; CORBA_unsigned_long _ORBIT_tmpvar_3; _ORBIT_curptr = GIOP_RECV_BUFFER(_ORBIT_recv_buffer)->cur; if (giop_msg_conversion_needed(GIOP_MESSAGE_BUFFER(_ORBIT_recv_buffer))) { _ORBIT_curptr = ALIGN_ADDRESS(_ORBIT_curptr, 4); (*((guint32 *) & (_ORBIT_tmpvar_3))) = GUINT32_SWAP_LE_BE(*((guint32 *) _ORBIT_curptr)); _ORBIT_curptr += 4; number = (void *) _ORBIT_curptr; _ORBIT_curptr += sizeof(number[_ORBIT_tmpvar_2]) * _ORBIT_tmpvar_3; } else { _ORBIT_curptr = ALIGN_ADDRESS(_ORBIT_curptr, 4); _ORBIT_tmpvar_3 = *((CORBA_unsigned_long *) _ORBIT_curptr); _ORBIT_curptr += 4; number = (void *) _ORBIT_curptr; _ORBIT_curptr += sizeof(number[_ORBIT_tmpvar_2]) * _ORBIT_tmpvar_3; } } _impl_PlayNumber(_ORBIT_servant, number, ev); { /* marshalling */ register GIOPSendBuffer *_ORBIT_send_buffer; _ORBIT_send_buffer = giop_send_reply_buffer_use(GIOP_MESSAGE_BUFFER(_ORBIT_recv_buffer)-> connection, NULL, _ORBIT_recv_buffer->message.u.request. request_id, ev->_major); if (_ORBIT_send_buffer) { if (ev->_major == CORBA_NO_EXCEPTION) { } else ORBit_send_system_exception(_ORBIT_send_buffer, ev); giop_send_buffer_write(_ORBIT_send_buffer); giop_send_buffer_unuse(_ORBIT_send_buffer); } } }
GIOPRecvBuffer * giop_recv_buffer_use_encaps_buf (GIOPRecvBuffer *buf) { guchar *ptr; CORBA_unsigned_long len; buf->cur = ALIGN_ADDRESS (buf->cur, 4); if ((buf->cur + 4) > buf->end) return NULL; len = *(CORBA_unsigned_long *) buf->cur; if (giop_msg_conversion_needed (buf)) len = GUINT32_SWAP_LE_BE (len); buf->cur += 4; if ((buf->cur + len) > buf->end || (buf->cur + len) < buf->cur) return NULL; ptr = buf->cur; buf->cur += len; return giop_recv_buffer_use_encaps (ptr, len); }
static gboolean giop_recv_buffer_demarshal_locate_request_1_1(GIOPRecvBuffer *buf) { gboolean do_bswap = giop_msg_conversion_needed (buf); buf->cur = ALIGN_ADDRESS (buf->cur, 4); if ((buf->cur + 8) > buf->end) return TRUE; if (do_bswap) buf->msg.u.locate_request_1_1.request_id = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else buf->msg.u.locate_request_1_1.request_id = *((guint32 *)buf->cur); buf->cur += 4; if (do_bswap) buf->msg.u.locate_request_1_1.object_key._length = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else buf->msg.u.locate_request_1_1.object_key._length = *((guint32 *)buf->cur); buf->cur += 4; if ((buf->cur + buf->msg.u.locate_request_1_1.object_key._length) > buf->end || (buf->cur + buf->msg.u.locate_request_1_1.object_key._length) < buf->cur) return TRUE; buf->msg.u.locate_request_1_1.object_key._buffer = buf->cur; buf->msg.u.locate_request_1_1.object_key._release = CORBA_FALSE; buf->cur += buf->msg.u.locate_request_1_1.object_key._length; return FALSE; }
/* Don't do this genericaly, union's suck genericaly */ static gboolean giop_GIOP_TargetAddress_demarshal (GIOPRecvBuffer *buf, GIOP_TargetAddress *value) { gboolean do_bswap = giop_msg_conversion_needed (buf); buf->cur = ALIGN_ADDRESS(buf->cur, 2); if ((buf->cur + 2) > buf->end) return TRUE; if (do_bswap) value->_d = GUINT16_SWAP_LE_BE ( *(guint16 *) buf->cur); else value->_d = *(guint16 *) buf->cur; buf->cur += 2; switch (value->_d) { case GIOP_KeyAddr: buf->cur = ALIGN_ADDRESS (buf->cur, 4); if ((buf->cur + 4) > buf->end) return TRUE; value->_u.object_key._release = CORBA_FALSE; if (do_bswap) value->_u.object_key._length = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else value->_u.object_key._length = *((guint32 *)buf->cur); buf->cur += 4; if ((buf->cur + value->_u.object_key._length) > buf->end || (buf->cur + value->_u.object_key._length) < buf->cur) return TRUE; value->_u.object_key._buffer = buf->cur; buf->cur += value->_u.object_key._length; break; case GIOP_ProfileAddr: g_warning ("XXX FIXME GIOP_ProfileAddr not handled"); return TRUE; break; case GIOP_ReferenceAddr: g_warning ("XXX FIXME GIOP_ReferenceAddr not handled"); return TRUE; break; } return FALSE; }
static gboolean giop_recv_buffer_demarshal_request_1_2(GIOPRecvBuffer *buf) { gboolean do_bswap = giop_msg_conversion_needed(buf); CORBA_unsigned_long oplen; buf->cur = ALIGN_ADDRESS(buf->cur, 4); if((buf->cur + 8) > buf->end) return TRUE; if(do_bswap) buf->msg.u.request_1_2.request_id = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else buf->msg.u.request_1_2.request_id = *((guint32 *) buf->cur); buf->cur += 4; buf->msg.u.request_1_2.response_flags = *buf->cur; buf->cur += 4; if(giop_GIOP_TargetAddress_demarshal(buf, &buf->msg.u.request_1_2.target)) return TRUE; buf->cur = ALIGN_ADDRESS(buf->cur, 4); if((buf->cur + 4) > buf->end) return TRUE; if(do_bswap) oplen = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else oplen = *((guint32 *)buf->cur); buf->cur += 4; if((buf->cur + oplen) > buf->end || (buf->cur + oplen) < buf->cur) return TRUE; buf->msg.u.request_1_2.operation = (CORBA_char *) buf->cur; buf->cur += oplen; buf->msg.u.request_1_2.service_context._buffer = NULL; if(giop_IOP_ServiceContextList_demarshal(buf, &buf->msg.u.request_1_2.service_context)) return TRUE; buf->cur = ALIGN_ADDRESS(buf->cur, 8); return FALSE; }
static CORBA_boolean CDR_buffer_getn(CDR_Codec *codec, void *dest, int bsize) { codec->rptr = (unsigned long)ALIGN_ADDRESS(codec->rptr, bsize); if(codec->host_endian==codec->data_endian) memcpy(dest, codec->buffer + codec->rptr, bsize); else rtps_byteswap(dest, codec->buffer + codec->rptr, bsize); codec->rptr += bsize; return CORBA_TRUE; }
static gboolean giop_recv_buffer_demarshal_cancel(GIOPRecvBuffer *buf) { gboolean do_bswap = giop_msg_conversion_needed (buf); buf->cur = ALIGN_ADDRESS (buf->cur, 4); if ((buf->cur + 4) > buf->end) return TRUE; if (do_bswap) buf->msg.u.cancel_request.request_id = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else buf->msg.u.cancel_request.request_id = *((guint32 *)buf->cur); buf->cur += 4; return FALSE; }
static gboolean giop_recv_buffer_demarshal_locate_request_1_2(GIOPRecvBuffer *buf) { gboolean do_bswap = giop_msg_conversion_needed(buf); buf->cur = ALIGN_ADDRESS(buf->cur, 4); if((buf->cur + 4) > buf->end) return TRUE; if(do_bswap) buf->msg.u.locate_request_1_2.request_id = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else buf->msg.u.locate_request_1_2.request_id = *((guint32 *)buf->cur); buf->cur += 4; return giop_GIOP_TargetAddress_demarshal (buf, &buf->msg.u.locate_request_1_2.target); }
static CORBA_boolean CDR_buffer_putn(CDR_Codec *codec, void *datum, int bsize) { unsigned long forward,i; forward = (unsigned long)ALIGN_ADDRESS(codec->wptr, bsize); if (forward+bsize > codec->wptr_max) { return CORBA_FALSE; } i = codec->wptr; while(forward > i) codec->buffer[i++] = '\0'; codec->wptr = forward; if(codec->host_endian==codec->data_endian) memcpy(codec->buffer + codec->wptr, datum, bsize); else rtps_byteswap(codec->buffer + codec->wptr, datum, bsize); codec->wptr += bsize; return CORBA_TRUE; }
static gboolean giop_recv_buffer_demarshal_locate_reply_1_1(GIOPRecvBuffer *buf) { gboolean do_bswap = giop_msg_conversion_needed(buf); buf->cur = ALIGN_ADDRESS (buf->cur, 4); if ((buf->cur + 8) > buf->end) return TRUE; if (do_bswap) { buf->msg.u.locate_reply_1_1.request_id = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); buf->cur += 4; buf->msg.u.locate_reply_1_1.locate_status = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); } else { buf->msg.u.locate_reply_1_1.request_id = *((guint32 *)buf->cur); buf->cur += 4; buf->msg.u.locate_reply_1_1.locate_status = *((guint32 *)buf->cur); } buf->cur += 4; return FALSE; }
gboolean ORBit_Context_demarshal (CORBA_Context parent, CORBA_Context initme, GIOPRecvBuffer *buf) { CORBA_unsigned_long nstrings, keylen, vallen, i; char *key, *value; initme->parent.refs = ORBIT_REFCOUNT_STATIC; initme->parent_ctx = parent; initme->mappings = NULL; buf->cur = ALIGN_ADDRESS (buf->cur, 4); if ((buf->cur + 4) > buf->end) goto errout; nstrings = *(CORBA_unsigned_long *) buf->cur; if (giop_msg_conversion_needed (buf)) nstrings = GUINT32_SWAP_LE_BE (nstrings); buf->cur += 4; if ((buf->cur + nstrings * 8) > buf->end) goto errout; if (nstrings) initme->mappings = g_hash_table_new (g_str_hash, g_str_equal); else goto errout; for (i = 0; i < nstrings; ) { buf->cur = ALIGN_ADDRESS (buf->cur, 4); if ((buf->cur + 4) > buf->end) goto errout; keylen = *(CORBA_unsigned_long *) buf->cur; if (giop_msg_conversion_needed (buf)) keylen = GUINT32_SWAP_LE_BE(keylen); buf->cur += 4; if ((buf->cur + keylen) > buf->end || (buf->cur + keylen) < buf->cur) goto errout; key = buf->cur; buf->cur += keylen; i++; if (i >= nstrings) break; buf->cur = ALIGN_ADDRESS (buf->cur, 4); if ((buf->cur + 4) > buf->end) goto errout; vallen = *(CORBA_unsigned_long *) buf->cur; if (giop_msg_conversion_needed (buf)) vallen = GUINT32_SWAP_LE_BE(vallen); buf->cur += 4; if ((buf->cur + vallen) > buf->end || (buf->cur + vallen) < buf->cur) goto errout; value = buf->cur; buf->cur += vallen; i++; g_hash_table_insert (initme->mappings, key, value); } return FALSE; errout: if (initme->mappings) g_hash_table_destroy (initme->mappings); return TRUE; }
/*\ acquire exclusive LOCK to MEMORY area <pstart,pend> owned by process "proc" * . only one area can be locked at a time by the calling process * . must unlock it with armci_unlockmem \*/ void armci_lockmem(void *start, void *end, int proc) { register void* pstart, *pend; register int slot, avail=0; int turn=0, conflict=0; memlock_t *memlock_table; #if defined(CLUSTER) && !defined(SGIALTIX) int lock = (proc-armci_clus_info[armci_clus_id(proc)].master)%NUM_LOCKS; #else int lock = 0; #endif #ifdef CORRECT_PTR if(! *armci_use_memlock_table){ /* if offset invalid, use dumb locking scheme ignoring addresses */ armci_lockmem_(start, end, proc); return; } # ifndef SGIALTIX /* when processes are attached to a shmem region at different addresses, * addresses written to memlock table must be adjusted to the node master */ if(armci_mem_offset){ start = armci_mem_offset + (char*)start; end = armci_mem_offset + (char*)end; } # endif #endif if(DEBUG_){ printf("%d: calling armci_lockmem for %d range %p -%p\n", armci_me, proc, start,end); fflush(stdout); } memlock_table = (memlock_t*)memlock_table_array[proc]; #ifdef ALIGN_ADDRESS /* align address range on cache line boundary to avoid false sharing */ pstart = ALIGN_ADDRESS(start); pend = CALGN -1 + ALIGN_ADDRESS(end); #else pstart=start; pend =end; #endif #ifdef CRAY_SHMEM { /* adjust according the remote process raw address */ long bytes = (long) ((char*)pend-(char*)pstart); extern void* armci_shmalloc_remote_addr(void *ptr, int proc); pstart = armci_shmalloc_remote_addr(pstart, proc); pend = (char*)pstart + bytes; } #endif while(1){ NATIVE_LOCK(lock,proc); armci_get(memlock_table, table, sizeof(table), proc); /* armci_copy(memlock_table, table, sizeof(table));*/ /* inspect the table */ conflict = 0; avail =-1; for(slot = 0; slot < MAX_SLOTS; slot ++){ /* nonzero starting address means the slot is occupied */ if(table[slot].start == NULL){ /* remember a free slot to store address range */ avail = slot; }else{ /*check for conflict: overlap between stored and current range*/ if( (pstart >= table[slot].start && pstart <= table[slot].end) || (pend >= table[slot].start && pend <= table[slot].end) ){ conflict = 1; break; } /* printf("%d: locking %ld-%ld (%d) conflict\n", armci_me, */ } } if(avail != -1 && !conflict) break; NATIVE_UNLOCK(lock,proc); armci_waitsome( ++turn ); } /* we got the memory lock: enter address into the table */ table[avail].start = pstart; table[avail].end = pend; armci_put(table+avail,memlock_table+avail,sizeof(memlock_t),proc); FENCE_NODE(proc); NATIVE_UNLOCK(lock,proc); locked_slot = avail; }
/*\ acquire exclusive LOCK to MEMORY area <pstart,pend> owned by process "proc" * . only one area can be locked at a time by the calling process * . must unlock it with armci_unlockmem \*/ void armci_lockmem(void *start, void *end, int proc) { #ifdef ARMCIX ARMCIX_Lockmem (start, end, proc); #else register void* pstart, *pend; register int slot, avail=0; int turn=0, conflict=0; memlock_t *memlock_table; #if defined(CLUSTER) && !defined(SGIALTIX) int lock = (proc-armci_clus_info[armci_clus_id(proc)].master)%NUM_LOCKS; #else int lock = 0; #endif #ifdef CORRECT_PTR if(! *armci_use_memlock_table) { /* if offset invalid, use dumb locking scheme ignoring addresses */ armci_lockmem_(start, end, proc); return; } # ifndef SGIALTIX /* when processes are attached to a shmem region at different addresses, * addresses written to memlock table must be adjusted to the node master */ if(armci_mem_offset) { start = armci_mem_offset + (char*)start; end = armci_mem_offset + (char*)end; } # endif #endif if(DEBUG_) { printf("%d: calling armci_lockmem for %d range %p -%p\n", armci_me, proc, start,end); fflush(stdout); } memlock_table = (memlock_t*)memlock_table_array[proc]; #ifdef ALIGN_ADDRESS /* align address range on cache line boundary to avoid false sharing */ pstart = ALIGN_ADDRESS(start); pend = CALGN -1 + ALIGN_ADDRESS(end); #else pstart=start; pend =end; #endif #ifdef CRAY_SHMEM { /* adjust according the remote process raw address */ long bytes = (long) ((char*)pend-(char*)pstart); extern void* armci_shmalloc_remote_addr(void *ptr, int proc); pstart = armci_shmalloc_remote_addr(pstart, proc); pend = (char*)pstart + bytes; } #endif #ifdef SGIALTIX if (proc == armci_me) { pstart = shmem_ptr(pstart,armci_me); pend = shmem_ptr(pend,armci_me); } /* In SGI Altix processes are attached to a shmem region at different addresses. Addresses written to memlock table must be adjusted to the node master */ if(ARMCI_Uses_shm()) { int i, seg_id=-1; size_t tile_size,offset; void *start_addr, *end_addr; for(i=0; i<seg_count; i++) { tile_size = armci_memoffset_table[i].tile_size; start_addr = (void*) ((char*)armci_memoffset_table[i].seg_addr + proc*tile_size); end_addr = (void*) ((char*)start_addr + armci_memoffset_table[i].seg_size); /* CHECK: because of too much "span" in armci_lockmem_patch in * strided.c, it is not possible to have condition as (commented):*/ /*if(pstart>=start_addr && pend<=end_addr) {seg_id=i; break;}*/ if(pstart >= start_addr && pstart <= end_addr) { seg_id=i; break; } } if(seg_id==-1) armci_die("armci_lockmem: Invalid segment", seg_id); offset = armci_memoffset_table[seg_id].mem_offset; pstart = ((char*)pstart + offset); pend = ((char*)pend + offset); } #endif while(1) { NATIVE_LOCK(lock,proc); armci_get(memlock_table, table, sizeof(table), proc); /* armci_copy(memlock_table, table, sizeof(table));*/ /* inspect the table */ conflict = 0; avail =-1; for(slot = 0; slot < MAX_SLOTS; slot ++) { /* nonzero starting address means the slot is occupied */ if(table[slot].start == NULL) { /* remember a free slot to store address range */ avail = slot; } else { /*check for conflict: overlap between stored and current range*/ if( (pstart >= table[slot].start && pstart <= table[slot].end) || (pend >= table[slot].start && pend <= table[slot].end) ) { conflict = 1; break; } /* printf("%d: locking %ld-%ld (%d) conflict\n", armci_me, */ } } if(avail != -1 && !conflict) break; NATIVE_UNLOCK(lock,proc); armci_waitsome( ++turn ); } /* we got the memory lock: enter address into the table */ table[avail].start = pstart; table[avail].end = pend; armci_put(table+avail,memlock_table+avail,sizeof(memlock_t),proc); FENCE_NODE(proc); NATIVE_UNLOCK(lock,proc); locked_slot = avail; #endif /* ! ARMCIX */ }
static int init_tdm(struct tdm_priv *priv) { u8 *buf; int i; int buf_size; dma_addr_t physaddr = 0; int ret = 0; struct tdm_adapter *adap; if (!priv) { pr_err("%s: Invalid handle\n", __func__); return -EINVAL; } adap = priv->adap; /* Allocate memory for Rx/Tx buffer according to active time slots BufferSize = NUM_OF_TDM_BUF * NUM_SAMPLES_PER_FRAME * slot_width * num_ch */ /*Allocating Rx Buffer*/ buf_size = TDM_BUF_SIZE(adap->adapt_cfg.num_ch, adap->adapt_cfg.slot_width, adap->adapt_cfg.num_frames); buf = dma_alloc_coherent(priv->device, buf_size, &physaddr, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_alloc_ip; } priv->dma_input_paddr = physaddr; priv->dma_input_vaddr = buf; priv->tdm_input_data = ALIGN_ADDRESS(buf, ALIGNED_8_BYTES); /*Allocating Tx Buffer*/ buf = dma_alloc_coherent(priv->device, buf_size, &physaddr, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_alloc_op; } priv->dma_output_paddr = physaddr; priv->dma_output_vaddr = buf; priv->tdm_output_data = ALIGN_ADDRESS(buf, ALIGNED_8_BYTES); /* allocate memory for TCD buffer discriptors */ buf = dma_alloc_coherent(priv->device, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE, &physaddr, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_alloc_rx; } memset(buf, 0, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE); priv->dma_rx_tcd_paddr = physaddr; priv->dma_rx_tcd_vaddr = buf; for (i = 0; i < NUM_OF_TDM_BUF; i++) { priv->dma_rx_tcd[i] = ALIGN_ADDRESS(buf, ALIGNED_32_BYTES); buf += TCD_BUFFER_SIZE; } buf = dma_alloc_coherent(priv->device, 3 * TCD_BUFFER_SIZE, &physaddr, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_alloc_tx; } memset(buf, 0, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE); priv->dma_tx_tcd_paddr = physaddr; priv->dma_tx_tcd_vaddr = buf; for (i = 0; i < NUM_OF_TDM_BUF; i++) { priv->dma_tx_tcd[i] = ALIGN_ADDRESS(buf, ALIGNED_32_BYTES); buf += TCD_BUFFER_SIZE; } priv->phase_rx = 0; priv->phase_tx = 0; return 0; err_alloc_tx: dma_free_coherent(priv->device, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE, priv->dma_rx_tcd_vaddr, priv->dma_rx_tcd_paddr); err_alloc_rx: dma_free_coherent(priv->device, buf_size, priv->dma_output_vaddr, priv->dma_output_paddr); err_alloc_op: dma_free_coherent(priv->device, buf_size, priv->dma_input_vaddr, priv->dma_input_paddr); err_alloc_ip: return ret; }
{ return &ev->_any; } void ORBit_handle_exception(GIOPRecvBuffer *rb, CORBA_Environment *ev, const ORBit_exception_demarshal_info *ex_info, CORBA_ORB orb) { CORBA_SystemException *new; CORBA_unsigned_long len, completion_status, reply_status; CORBA_char *my_repoid; CORBA_exception_free(ev); rb->cur = ALIGN_ADDRESS(rb->cur, sizeof(len)); if((rb->cur + 4) > rb->end) goto errout; len = *(CORBA_unsigned_long *)rb->cur; rb->cur += 4; if(giop_msg_conversion_needed(rb)) len = GUINT32_SWAP_LE_BE(len); if(len) { my_repoid = rb->cur; rb->cur += len; } else my_repoid = NULL;
/** * giop_recv_buffer_handle_fragmented: * @buf: pointer to recv buffer pointer * @cnx: current connection. * * This will append @buf to the right list of buffers * on the connection, forming a complete message, and * re-write *@buf to the first buffer in the chain. * * Return value: TRUE on error else FALSE **/ static gboolean giop_recv_buffer_handle_fragmented (GIOPRecvBuffer **ret_buf, GIOPConnection *cnx) { GList *list; gboolean giop_1_1; gboolean error = FALSE; CORBA_long message_id; GIOPRecvBuffer *buf = *ret_buf; giop_1_1 = (buf->giop_version == GIOP_1_1); switch (buf->msg.header.message_type) { case GIOP_REPLY: case GIOP_LOCATEREPLY: case GIOP_REQUEST: case GIOP_LOCATEREQUEST: message_id = giop_recv_buffer_get_request_id (buf); break; case GIOP_FRAGMENT: if (!giop_1_1) { buf->cur = ALIGN_ADDRESS (buf->cur, 4); if ((buf->cur + 4) > buf->end) { dprintf (ERRORS, "incoming bogus fragment length"); return TRUE; } if (giop_msg_conversion_needed (buf)) message_id = GUINT32_SWAP_LE_BE (*((guint32 *)buf->cur)); else message_id = *(guint32 *) buf->cur; buf->cur += 4; } else message_id = 0; break; default: dprintf (ERRORS, "Bogus fragment packet type %d", buf->msg.header.message_type); return TRUE; } if (!(list = giop_connection_get_frag (cnx, message_id, giop_1_1))) { if (!MORE_FRAGMENTS_FOLLOW (buf)) return TRUE; giop_connection_add_frag (cnx, buf); } else { GIOPRecvBuffer *head = list->data; *ret_buf = head; g_assert (head->msg.header.message_type != GIOP_FRAGMENT); /* track total length on head node */ /* (end - cur) to account for fragment (msg id) header */ head->msg.header.message_size += (buf->end - buf->cur); list = g_list_append (list, buf); if (!cnx->parent.is_auth && buf->msg.header.message_size > giop_initial_msg_size_limit) { dprintf (ERRORS, "Message exceeded initial size limit\n"); error = TRUE; giop_connection_remove_frag (cnx, list); } if (!MORE_FRAGMENTS_FOLLOW (buf)) { g_assert (buf->msg.header.message_type == GIOP_FRAGMENT); /* concat all fragments - re-write & continue */ error = concat_frags (list); giop_connection_remove_frag (cnx, list); } } return error; }
static int init_tdm(struct tdm_priv *priv) { u8 *buf; int i; int buf_size; dma_addr_t physaddr = 0; int ret = 0; /* Allocate memory for Rx/Tx buffer according to active time slots BufferSize = NUM_OF_TDM_BUF*NUM_OF_FRAMES*Active_CH */ buf_size = TDM_BUF_SIZE(priv->cfg.num_ch, priv->cfg.ch_width, priv->cfg.num_frames); buf = dma_alloc_coherent(priv->device, buf_size, &physaddr, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_alloc_ip; } priv->dma_input_paddr = physaddr; priv->dma_input_vaddr = buf; priv->tdm_input_data = ALIGN_ADDRESS(buf, ALIGNED_8_BYTES); buf = dma_alloc_coherent(priv->device, buf_size, &physaddr, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_alloc_op; } priv->dma_output_paddr = physaddr; priv->dma_output_vaddr = buf; priv->tdm_output_data = ALIGN_ADDRESS(buf, ALIGNED_8_BYTES); /* allocate memory for TCD buffer discriptors */ buf = dma_alloc_coherent(priv->device, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE, &physaddr, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_alloc_rx; } memset(buf, 0, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE); priv->dma_rx_tcd_paddr = physaddr; priv->dma_rx_tcd_vaddr = buf; for (i = 0; i < NUM_OF_TDM_BUF; i++) { priv->dma_rx_tcd[i] = ALIGN_ADDRESS(buf, ALIGNED_32_BYTES); buf += TCD_BUFFER_SIZE; } buf = dma_alloc_coherent(priv->device, 3 * TCD_BUFFER_SIZE, &physaddr, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_alloc_tx; } memset(buf, 0, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE); priv->dma_tx_tcd_paddr = physaddr; priv->dma_tx_tcd_vaddr = buf; for (i = 0; i < NUM_OF_TDM_BUF; i++) { priv->dma_tx_tcd[i] = ALIGN_ADDRESS(buf, ALIGNED_32_BYTES); buf += TCD_BUFFER_SIZE; } priv->phase_rx = 0; priv->phase_tx = 0; return 0; err_alloc_tx: dma_free_coherent(priv->device, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE, priv->dma_rx_tcd_vaddr, priv->dma_rx_tcd_paddr); err_alloc_rx: dma_free_coherent(priv->device, buf_size, priv->dma_output_vaddr, priv->dma_output_paddr); err_alloc_op: dma_free_coherent(priv->device, buf_size, priv->dma_input_vaddr, priv->dma_input_paddr); err_alloc_ip: return ret; }
static gboolean giop_recv_buffer_demarshal_request_1_1(GIOPRecvBuffer *buf) { gboolean do_bswap = giop_msg_conversion_needed(buf); CORBA_unsigned_long oplen; buf->msg.u.request_1_1.service_context._buffer = NULL; if(giop_IOP_ServiceContextList_demarshal(buf, &buf->msg.u.request_1_1.service_context)) return TRUE; buf->cur = ALIGN_ADDRESS(buf->cur, 4); if((buf->cur + 12) > buf->end) return TRUE; if(do_bswap) buf->msg.u.request_1_1.request_id = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else buf->msg.u.request_1_1.request_id = *((guint32 *)buf->cur); buf->cur += 4; buf->msg.u.request_1_1.response_expected = *buf->cur; buf->cur += 4; if(do_bswap) buf->msg.u.request_1_1.object_key._length = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else buf->msg.u.request_1_1.object_key._length = *((guint32 *)buf->cur); buf->cur += 4; if((buf->cur + buf->msg.u.request_1_1.object_key._length) > buf->end || (buf->cur + buf->msg.u.request_1_1.object_key._length) < buf->cur) return TRUE; buf->msg.u.request_1_1.object_key._buffer = buf->cur; buf->msg.u.request_1_1.object_key._release = CORBA_FALSE; buf->cur += buf->msg.u.request_1_1.object_key._length; buf->cur = ALIGN_ADDRESS(buf->cur, 4); if((buf->cur + 4) > buf->end) return TRUE; if(do_bswap) oplen = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else oplen = *((guint32 *)buf->cur); buf->cur += 4; if((buf->cur + oplen) > buf->end || (buf->cur + oplen) < buf->cur) return TRUE; buf->msg.u.request_1_1.operation = (CORBA_char *) buf->cur; buf->cur += oplen; buf->cur = ALIGN_ADDRESS(buf->cur, 4); if((buf->cur + 4) > buf->end) return TRUE; if(do_bswap) buf->msg.u.request_1_1.requesting_principal._length = GUINT32_SWAP_LE_BE(*((guint32 *)buf->cur)); else buf->msg.u.request_1_1.requesting_principal._length = *((guint32 *)buf->cur); buf->cur += 4; if((buf->cur + buf->msg.u.request_1_1.requesting_principal._length) > buf->end || (buf->cur + buf->msg.u.request_1_1.requesting_principal._length) < buf->cur) return TRUE; buf->msg.u.request_1_1.requesting_principal._buffer = buf->cur; buf->msg.u.request_1_1.requesting_principal._release = CORBA_FALSE; buf->cur += buf->msg.u.request_1_1.requesting_principal._length; return FALSE; }