//! Get size of exported data. uint grit_xp_size(GritRec *gr) { uint size= 0; uint extra=0; if(gr->bRiff) { size= 8+8+8+sizeof(GrfHeader); // RIFF + GRF + HDR overhead. extra= 8; } if(gr->gfxProcMode == GRIT_EXPORT) size += ALIGN4(rec_size(&gr->_gfxRec)) + extra; if(gr->mapProcMode == GRIT_EXPORT) size += ALIGN4(rec_size(&gr->_mapRec)) + extra; if(gr->mapProcMode == GRIT_EXPORT && gr->isMetaTiled()) size += ALIGN4(rec_size(&gr->_metaRec)) + extra; if(gr->palProcMode == GRIT_EXPORT) size += ALIGN4(rec_size(&gr->_palRec)) + extra; return size; }
void E32ImageFile::Adjust(TInt aSize, TBool aAllowShrink) // // Adjust the size of allocated data and fix the member data // { TInt asize = ALIGN4(aSize); if (asize == iSize) return; if (iSize == 0) { iSize = asize; iData = (char*)malloc(iSize); memset(iData, 0, iSize); } else if (aAllowShrink || asize > iSize) { TInt oldsize = iSize; iSize = asize; iData = (char*)realloc(iData, iSize); if (iSize > oldsize) memset(iData+oldsize, 0, iSize-oldsize); } if (!iData) iSize = 0; if (iHdr && iHdr == iOrigHdr) iHdr = (E32ImageHeaderV*)iData; iOrigHdr = (E32ImageHeader*)iData; }
unsigned char *read_lzw_dynamic(FILE *f, uint8 *buf, int max_bits,int use_rle, unsigned long in_len, unsigned long orig_len, int q) { uint8 *buf2, *b; int pos; int size; struct local_data *data; if ((data = malloc(sizeof (struct local_data))) == NULL) { return NULL; } if ((buf2 = malloc(in_len)) == NULL) { //perror("read_lzw_dynamic"); return NULL; } pos = ftell(f); fread(buf2, 1, in_len, f); b = _convert_lzw_dynamic(buf2, max_bits, use_rle, in_len, orig_len, q, data); memcpy(buf, b, orig_len); size = q & NOMARCH_QUIRK_ALIGN4 ? ALIGN4(data->nomarch_input_size) : data->nomarch_input_size; fseek(f, pos + size, SEEK_SET); free(b); free(buf2); free(data); return buf; }
/** ******************************************************************************* * * @brief * Used to get MV bank size for a given number of luma samples * * @par Description: * For given number of luma samples one MV bank size is computed * Each MV bank includes pu_map and pu_t for all the min PUs(4x4) in a picture * * @param[in] num_luma_samples * Max number of luma pixels in the frame * * @returns Total MV Bank size * * @remarks * * ******************************************************************************* */ WORD32 ihevcd_get_pic_mv_bank_size(WORD32 num_luma_samples) { WORD32 size; WORD32 pic_size; WORD32 mv_bank_size; WORD32 num_pu; WORD32 num_ctb; pic_size = num_luma_samples; num_pu = pic_size / (MIN_PU_SIZE * MIN_PU_SIZE); num_ctb = pic_size / (MIN_CTB_SIZE * MIN_CTB_SIZE); mv_bank_size = 0; /* Size for storing pu_t start index each CTB */ /* One extra entry is needed to compute number of PUs in the last CTB */ mv_bank_size += (num_ctb + 1) * sizeof(WORD32); /* Size for pu_map */ mv_bank_size += num_pu; /* Size for storing pu_t for each PU */ mv_bank_size += num_pu * sizeof(pu_t); /* Size for storing slice_idx for each CTB */ mv_bank_size += ALIGN4(num_ctb * sizeof(UWORD16)); size = mv_bank_size; return size; }
static void *ALIGNPTR(void *ptr) { if (4 == ZBX_PTR_SIZE) return ALIGN4(ptr); if (8 == ZBX_PTR_SIZE) return ALIGN8(ptr); assert(0); }
void process_program_header_note(Elf32_Phdr *phdr) { long desc_offset, offset; struct { int name_size; int desc_size; int type; } note_header; offset = phdr->p_offset; while (offset < phdr->p_offset + phdr->p_filesz) { read_at(offset, ¬e_header, sizeof(note_header)); // offset at current note offset += sizeof(note_header); // offset at name offset += note_header.name_size; offset = ALIGN4(offset); // offset at desc desc_offset = offset; offset += note_header.desc_size; offset = ALIGN4(offset); // offset at next note switch (note_header.type) { case NT_PRSTATUS: if (notes_found & NOTE_FOUND_PRSTATUS) error("Multiple NT_PRSTATUS notes."); notes_found |= NOTE_FOUND_PRSTATUS; read_at(desc_offset, &prstatus, sizeof(prstatus)); break; case NT_FILE: if (notes_found & NOTE_FOUND_FILE) error("Multiple NT_FILE notes."); notes_found |= NOTE_FOUND_FILE; process_note_file(desc_offset); break; case NT_386_TLS: if (notes_found & NOTE_FOUND_386_TLS) error("Multiple NT_386_TLS notes."); notes_found |= NOTE_FOUND_386_TLS; read_at(desc_offset, &user_desc, sizeof(user_desc)); break; } } }
void loader_tag_idblk_serial( char *sector3 ) { struct kld_tag tg; struct rktag_idblk_serial *sn = (struct rktag_idblk_serial*)sector3; if( sn->size > 510 ||sn->size == 0 ) return; tg.hdr.tag = RKTAG_IDBLK_SERIAL; tg.hdr.size = sizeof( struct rktag_header ) + ALIGN4(sn->size+sizeof(sn->size)); memcpy( &tg.u.sn , sn , sn->size+sizeof(sn->size) ); kld_set_tag( &tg.hdr ); }
static gboolean submit_gsm_tpdu(GIsiClient *client, unsigned char *pdu, int pdu_len, int tpdu_len, int mms, void *data, GDestroyNotify notify) { uint8_t use_sca = (pdu_len - tpdu_len) > 1; size_t sca_sb_len = use_sca ? 16 : 0; size_t tpdu_sb_len = ALIGN4(4 + tpdu_len); size_t tpdu_pad_len = tpdu_sb_len - (4 + tpdu_len); uint8_t msg[] = { SMS_MESSAGE_SEND_REQ, mms, /* More messages to send */ SMS_ROUTE_CS_PREF, 0, /* Repeated message */ SMS_SENDER_ANY, SMS_TYPE_TEXT_MESSAGE, 1, /* Subblock count */ SMS_GSM_TPDU, tpdu_sb_len + sca_sb_len, 0, /* Filler */ use_sca ? 2 : 1, /* Sub-sub blocks */ SMS_COMMON_DATA, tpdu_sb_len, tpdu_len, 0, /* Packing required? */ /* Databytes aligned to next 32bit boundary */ }; uint8_t sca_sb[16] = { SMS_ADDRESS, 16, /* Subblock length */ SMS_GSM_0411_ADDRESS, 0, /* Filled in later */ }; uint8_t padding[4] = { 0 }; struct iovec iov[4] = { { msg, sizeof(msg) }, { pdu + pdu_len - tpdu_len, tpdu_len }, { padding, tpdu_pad_len }, { sca_sb, sca_sb_len }, }; if (use_sca) { sca_sb[3] = pdu_len - tpdu_len; memcpy(sca_sb + 4, pdu, pdu_len - tpdu_len); } /* * Modem seems to time out SMS_MESSAGE_SEND_REQ in 5 seconds. * Wait normal timeout plus the modem timeout. */ return g_isi_client_vsend_with_timeout(client, iov, 4, SMS_TIMEOUT + 5, submit_gsm_tpdu_resp_cb, data, notify); }
TInt E32ImageFile_ELF::CopyCode(char *p, ELFFile &aElfFile) // // Copies the files code sections to p // returns the number of bytes copied or KErrGeneral // { TInt size=aElfFile.GetCodeSize(); memcpy(p, (char *)aElfFile.GetCode(), size); p+=ALIGN4(size); return iHdr->iCodeSize = size; }
void *first_fit_malloc(t_ctx *ctx, size_t size) { t_mblk *block; size = ALIGN4(size); if (ctx->root) block = first_fit_add_elem(ctx, size); else block = first_fit_add_head(ctx, size); return (DATA_PTR(block)); }
TInt E32ImageFile_ELF::DoDataHeader(ELFFile &aElfFile, TUint aDataBase) { if (aDataBase==0 && aElfFile.iDataSegmentHdr) aDataBase=aElfFile.iDataSegmentHdr->p_vaddr; TInt size=0; iHdr->iDataBase=aDataBase; if (aElfFile.HasInitialisedData()) { size=ALIGN4(aElfFile.GetDataSize()); iHdr->iDataOffset = iHdr->iCodeOffset + iHdr->iCodeSize; iHdr->iDataSize = size; } if (aElfFile.HasBssData()) { iHdr->iBssSize = ALIGN4(aElfFile.GetBssSize()); } return size; }
void loader_tag_idblk_chipinfo( char *sector2 ) { struct kld_tag tg; struct rktag_idblk_chipinfo *id_chip = (struct rktag_idblk_chipinfo*)sector2; if( id_chip->size > 510 ||id_chip->size == 0 ) return; tg.hdr.tag = RKTAG_IDBLK_CHIPINFO; tg.hdr.size = sizeof( struct rktag_header ) + ALIGN4(id_chip->size+sizeof(id_chip->size)); memcpy( &tg.u.id_chip , id_chip , id_chip->size+sizeof(id_chip->size) ); kld_set_tag( &tg.hdr ); }
static gboolean submit_tpdu(GIsiClient *client, unsigned char *pdu, int pdu_len, int tpdu_len, int mms, void *data, GDestroyNotify notify) { uint8_t use_sca = (pdu_len - tpdu_len) > 1; size_t sca_sb_len = use_sca ? 18 : 0; size_t tpdu_sb_len = ALIGN4(6 + tpdu_len); size_t tpdu_pad_len = tpdu_sb_len - (6 + tpdu_len); uint8_t msg[] = { SMS_MESSAGE_SEND_REQ, mms, /* More messages to send */ SMS_ROUTE_ANY, /* Use any (default) route */ 0, /* Repeated message */ 0, 0, /* Filler */ use_sca ? 3 : 2, /* Subblock count */ ISI_16BIT(SMS_SB_SMS_PARAMETERS), ISI_16BIT(8), /* Subblock length */ SMS_PARAMETER_LOCATION_DEFAULT, SMS_PI_SERVICE_CENTER_ADDRESS, 0, 0, /* Filler */ ISI_16BIT(SMS_SB_TPDU), ISI_16BIT(tpdu_sb_len), tpdu_len, 0, /* Filler */ /* Databytes aligned to next 32bit boundary */ }; uint8_t sca_sb[18] = { ISI_16BIT(SMS_SB_ADDRESS), ISI_16BIT(18), SMS_SMSC_ADDRESS, 0, /* Filled in later */ }; uint8_t padding[4] = { 0 }; struct iovec iov[4] = { { msg, sizeof(msg) }, { pdu + pdu_len - tpdu_len, tpdu_len }, { padding, tpdu_pad_len }, { sca_sb, sca_sb_len }, }; if (use_sca) { sca_sb[5] = pdu_len - tpdu_len; memcpy(sca_sb + 6, pdu, pdu_len - tpdu_len); } return g_isi_client_vsend_with_timeout(client, iov, 4, SMS_TIMEOUT, submit_tpdu_resp_cb, data, notify); }
int ring_write_multi(ring_t *ring, ring_val_t *vals, int count) { // measure the total size size_t size = 0; for (int i = 0; i < count; i++) size += vals[i].size; size = ALIGN4(size); void *dst = ring_dma(ring, size); // write values for (int i = 0; i < count; i++) { memcpy(dst, vals[i].buf, vals[i].size); dst += vals[i].size; } ring_dma_done(ring); return 0; }
static void * impl_get_attach_buffer(u32 size) { void *p_addr = NULL; if(g_attach_block.using_attach_block) { size = ALIGN4(size); OS_PRINTF("scan alloc mem %d\n", size); p_addr = (u8*)g_attach_block.p_block_addr; g_attach_block.p_block_addr = (u8*)g_attach_block.p_block_addr + size; g_attach_block.using_size += size; MT_ASSERT(g_attach_block.using_size <= g_attach_block.total_size); } return p_addr; }
TInt E32ImageFile_ELF::DoCodeHeader(ELFFile &aElfFile) // // Calculate the code parts of the ELFFile // { TInt size=ALIGN4(aElfFile.GetCodeSize()); iHdr->iCodeSize = iHdr->iTextSize = size; // make it the offset from the beginning of the file..... if(iHdr->iExportDirCount==0) iHdr->iExportDirOffset = 0; else iHdr->iExportDirOffset = aElfFile.GetExportTableOffset() + iHdr->iCodeOffset; return size; }
/*! Prepares the work dib for export, i.e. converts to the final bitdepth, compresses the data and fills in \a gr._gfxRec. */ bool grit_prep_gfx(GritRec *gr) { lprintf(LOG_STATUS, "Graphics preparation.\n"); int srcB= dib_get_bpp(gr->_dib); // should be 8 or 16 by now int srcP= dib_get_pitch(gr->_dib); int srcS= dib_get_size_img(gr->_dib); BYTE *srcD= dib_get_img(gr->_dib); int dstB= gr->gfxBpp; // # dst bytes, with # src pixels as 'width' int dstS= dib_align(srcS*8/srcB, dstB); dstS= ALIGN4(dstS); BYTE *dstD= (BYTE*)malloc(dstS); if(dstD == NULL) { lprintf(LOG_ERROR, " Can't allocate graphics data.\n"); return false; } // Convert to final bitdepth // NOTE: do not use dib_convert here, because of potential // problems with padding // NOTE: we're already at 8 or 16 bpp here, with 16 bpp already // accounted for. Only have to do 8->1,2,4 // TODO: offset if(srcB == 8 && srcB != dstB) { lprintf(LOG_STATUS, " Bitpacking: %d -> %d.\n", srcB, dstB); data_bit_pack(dstD, srcD, srcS, srcB, dstB, 0); } else memcpy(dstD, srcD, dstS); RECORD rec= { 1, dstS, dstD }; if( BYTE_ORDER == BIG_ENDIAN && gr->gfxBpp > 8 ) data_byte_rev(rec.data, rec.data, rec_size(&rec), gr->gfxBpp/8); // attach and compress graphics grit_compress(&rec, &rec, gr->gfxCompression); rec_alias(&gr->_gfxRec, &rec); lprintf(LOG_STATUS, "Graphics preparation complete.\n"); return true; }
static gboolean cdffile_validate_vars(const NetCDF *cdffile, GError **error) { NetCDFDim *dim; NetCDFVar *var; gint i, j, size; for (i = 0; i < cdffile->nvars; i++) { var = cdffile->vars + i; size = cdffile_type_size(var->type); for (j = 0; j < var->ndims; j++) { /* Bogus dimension id */ if (var->dimids[j] >= cdffile->ndims) { err_CDF_INTEGRITY(error, var->name); return FALSE; } dim = cdffile->dims + var->dimids[j]; /* XXX: record vars have length == 0 for the first dimension, but * frankly, we don not care. */ if (dim->length <= 0) { err_CDF_INTEGRITY(error, var->name); return FALSE; } size *= dim->length; if (size <= 0) { err_CDF_INTEGRITY(error, var->name); return FALSE; } } ALIGN4(size); /* Sizes do not match */ if (size != var->vsize) { err_CDF_INTEGRITY(error, var->name); return FALSE; } /* Data sticks out */ if (var->begin < (guint64)cdffile->data_start || var->begin + var->vsize > (guint64)cdffile->size) { err_CDF_INTEGRITY(error, var->name); return FALSE; } } return TRUE; }
t_block *find_block(t_block **last, size_t size, int type_zone) { t_block *b; size_t align_size; b = g_base[type_zone]; align_size = ALIGN4(size); while (b && (!IS_FREE(b) || align_size > b->size + b->rest)) { *last = b; b = b->next; } if (!b) MALLOC_DEBUG("/!\\ No block found"); else MALLOC_DEBUG("New block found"); return (b); }
void IPC_EndpointRegister(IPC_EndpointId_T EndpointId, IPC_FlowCntrlFPtr_T FlowControlFunction, IPC_BufferDeliveryFPtr_T DeliveryFunction, IPC_U32 HeaderSize) { IPC_EP_T *EP = &SmLocalControl.SmControl->Endpoints[EndpointId]; IPC_TRACE(IPC_Channel_Sm, "IPC_EndpointRegister", "Id %d, HdrMax %d FcFn %08X, DeliveryFn %08X", EndpointId, HeaderSize, (IPC_U32) FlowControlFunction, (IPC_U32) DeliveryFunction); EP->Cpu = IPC_SM_CURRENT_CPU; EP->DeliveryFunction = DeliveryFunction; EP->FlowControlFunction = FlowControlFunction; EP->MaxHeaderSize = ALIGN4(HeaderSize); }
void *best_fit_realloc(t_ctx *ctx, void *p, size_t size) { t_mblk *block; void *np; (void)ctx; if (!p) return (ctx->fn.malloc(ctx, size)); if (!best_fit_valid_pointer(ctx, p)) return (NULL); size = ALIGN4(size); block = BLOCK_PTR(p); if (check_extend_block(ctx, block, size)) return (extend_block(ctx, block, size)); np = ctx->fn.malloc(ctx, size); memcpy(np, p, MIN(block->size, size)); ctx->fn.free(ctx, p); return (np); }
unsigned char *read_lzw_dynamic(FILE *f, uint8 *buf, int max_bits,int use_rle, unsigned long in_len, unsigned long orig_len, int q) { uint8 *buf2, *b; int pos; int size; if ((buf2 = malloc(in_len)) == NULL) perror("read_lzw_dynamic"), exit(1); pos = ftell(f); fread(buf2, 1, in_len, f); b = convert_lzw_dynamic(buf2, max_bits, use_rle, in_len, orig_len, q); memcpy(buf, b, orig_len); size = q & NOMARCH_QUIRK_ALIGN4 ? ALIGN4(nomarch_input_size) : nomarch_input_size; fseek(f, pos + size, SEEK_SET); free(b); free(buf2); return buf; }
/* * adds keys to batch table */ void add_to_batch(char *key, size_t nkey) { int pos; int pad; nKeys[batchNo] = nkey; memcpy(&batchKeys[batchIndex[batchNo]], key, nKeys[batchNo] * sizeof(char)); pos = nKeys[batchNo]; if((pos % 4) != 0) ALIGN4(pos); batchIndex[batchNo + 1] = batchIndex[batchNo] + pos; for(pad = (batchIndex[batchNo] + nKeys[batchNo]); pad < batchIndex[batchNo + 1]; pad++) batchKeys[pad] = '\0'; batchNo++; }
/* * 20100202,HSL@RK,change to string format. */ void loader_tag_set_version( __u32 date , __u16 maj_v , __u16 min_v ) { #if 0 struct kld_tag tg; tg.hdr.tag = RKTAG_VERSION; tg.hdr.size = sizeof( struct rktag_header ) + sizeof(struct rktag_version); tg.u.lver.date = date; /* 20091114 */ tg.u.lver.main_version = maj_v; /*2.6.27: 0206 */ tg.u.lver.min_version = min_v; /* 0027 */ kld_set_tag( &tg.hdr ); #else char ver[128]; int len; struct kld_tag tg; tg.hdr.tag = RKTAG_VERSION; // loader version X.XX YYYYMMDD len = sprintf(ver,"loader version %x.%x %x\n" , maj_v,min_v,date )+1; RkPrintf("%s",ver ); tg.hdr.size = sizeof( struct rktag_header ) + ALIGN4(len); memcpy(&tg.u.lver , ver , len ); kld_set_tag( &tg.hdr ); #endif }
// Initializes InBuf, InSize; allocates OutBuf. // the rest is done in CompressLZ77. uint lz77gba_compress(RECORD *dst, const RECORD *src) { // Fail on the obvious if(src==NULL || src->data==NULL || dst==NULL) return 0; InSize= rec_size(src); OutSize = InSize + InSize/8 + 16; OutBuf = (BYTE*)malloc(OutSize); if(OutBuf == NULL) return 0; InBuf= (BYTE*)src->data; CompressLZ77(); OutSize= ALIGN4(OutSize); u8 *dstD= (u8*)malloc(OutSize); memcpy(dstD, OutBuf, OutSize); rec_attach(dst, dstD, 1, OutSize); free(OutBuf); return OutSize; }
static void do_spawn (void) { int n; int size; uid_t uid; gid_t gid; int env_inherit; pid_t child; int envs = 0; int log_stderr = 0; char *interpreter = NULL; char *log_file = NULL; char *uid_str = NULL; char *chroot_dir = NULL; char **envp = NULL; char *p = spawn_shared; const char *argv[] = {"sh", "-c", NULL, NULL}; #define CHECK_MARK(val) \ if ((*(int *)p) != val) { \ goto cleanup; \ } else { \ p += sizeof(int); \ } #define ALIGN4(buf) while ((long)p & 0x3) p++; /* Read the shared memory */ /* 1.- Interpreter */ CHECK_MARK (0xF0); size = *((int *)p); p += sizeof(int); if (size <= 0) { goto cleanup; } interpreter = malloc (sizeof("exec ") + size); if (interpreter == NULL) { goto cleanup; } strncpy (interpreter, "exec ", 5); strncpy (interpreter + 5, p, size + 1); p += size + 1; ALIGN4 (p); /* 2.- UID & GID */ CHECK_MARK (0xF1); size = *((int *)p); if (size > 0) { uid_str = strdup (p + sizeof(int)); } p += sizeof(int) + size + 1; ALIGN4 (p); memcpy (&uid, p, sizeof(uid_t)); p += sizeof(uid_t); memcpy (&gid, p, sizeof(gid_t)); p += sizeof(gid_t); /* 3.- Chroot directory */ CHECK_MARK (0xF2); size = *((int *) p); p += sizeof(int); if (size > 0) { chroot_dir = malloc(size + 1); memcpy(chroot_dir, p, size + 1); } p += size + 1; ALIGN4 (p); /* 4.- Environment */ CHECK_MARK (0xF3); env_inherit = *((int *)p); p += sizeof(int); envs = *((int *)p); p += sizeof(int); envp = malloc (sizeof(char *) * (envs + 1)); if (envp == NULL) { goto cleanup; } envp[envs] = NULL; for (n=0; n<envs; n++) { char *e; size = *((int *)p); p += sizeof(int); e = malloc (size + 1); if (e == NULL) { goto cleanup; } memcpy (e, p, size); e[size] = '\0'; envp[n] = e; p += size + 1; ALIGN4 (p); } /* 5.- Error log */ CHECK_MARK (0xF4); size = *((int *)p); p += sizeof(int); if (size > 0) { if (! strncmp (p, "stderr", 6)) { log_stderr = 1; } else if (! strncmp (p, "file,", 5)) { log_file = p+5; } p += (size + 1); ALIGN4 (p); } /* 6.- PID: it's -1 now */ CHECK_MARK (0xF5); n = *((int *)p); if (n > 0) { kill (n, SIGTERM); *p = -1; } /* Spawn */ child = fork(); switch (child) { case 0: { int i; struct sigaction sig_action; /* Reset signal handlers */ sig_action.sa_handler = SIG_DFL; sig_action.sa_flags = 0; sigemptyset (&sig_action.sa_mask); for (i=0 ; i < NSIG ; i++) { sigaction (i, &sig_action, NULL); } /* Logging */ if (log_file) { int fd; fd = open (log_file, O_WRONLY | O_APPEND | O_CREAT, 0600); if (fd < 0) { PRINT_ERROR ("(warning) Couldn't open '%s' for writing..\n", log_file); } close (STDOUT_FILENO); close (STDERR_FILENO); dup2 (fd, STDOUT_FILENO); dup2 (fd, STDERR_FILENO); } else if (log_stderr) { /* do nothing */ } else { int tmp_fd; tmp_fd = open ("/dev/null", O_WRONLY); close (STDOUT_FILENO); close (STDERR_FILENO); dup2 (tmp_fd, STDOUT_FILENO); dup2 (tmp_fd, STDERR_FILENO); } /* Change root */ if (chroot_dir) { int re = chroot(chroot_dir); if (re < 0) { PRINT_ERROR ("(critial) Couldn't chroot to %s\n", chroot_dir); exit (1); } } /* Change user & group */ if (uid_str != NULL) { n = initgroups (uid_str, gid); if (n == -1) { PRINT_ERROR ("(warning) initgroups failed User=%s, GID=%d\n", uid_str, gid); } } if ((int)gid != -1) { n = setgid (gid); if (n != 0) { PRINT_ERROR ("(warning) Couldn't set GID=%d\n", gid); } } if ((int)uid != -1) { n = setuid (uid); if (n != 0) { PRINT_ERROR ("(warning) Couldn't set UID=%d\n", uid); } } /* Clean the shared memory */ size = (p - spawn_shared) - sizeof(int); memset (spawn_shared, 0, size); /* Execute the interpreter */ argv[2] = interpreter; if (env_inherit) { do { execv ("/bin/sh", (char **)argv); } while (errno == EINTR); } else { do { execve ("/bin/sh", (char **)argv, envp); } while (errno == EINTR); } PRINT_MSG ("(critical) Couldn't spawn: sh -c %s\n", interpreter); exit (1); } case -1: /* Error */ PRINT_MSG ("(critical) Couldn't fork(): %s\n", strerror(errno)); goto cleanup; default: break; } /* Return the PID */ memcpy (p, (char *)&child, sizeof(int)); printf ("PID %d: launched '/bin/sh -c %s' with uid=%d, gid=%d, chroot=%s, env=%s\n", child, interpreter, uid, gid, chroot_dir, env_inherit ? "inherited":"custom"); cleanup: /* Unlock worker */ do_sem_op (SEM_LAUNCH_READY, 1); /* Clean up */ free (uid_str); free (interpreter); free (chroot_dir); if (envp != NULL) { for (n=0; n<envs; n++) { free (envp[n]); } free (envp); } }
/* * Perform a full round of TRANS2 request */ static int smb_t2_request_int(struct smb_t2rq *t2p) { struct smb_vc *vcp = t2p->t2_vc; struct smb_cred *scred = t2p->t2_cred; struct mbchain *mbp; struct mdchain *mdp, mbparam, mbdata; struct mbuf *m; struct smb_rq *rqp; int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i; int error, doff, poff, txdcount, txpcount, nmlen; m = t2p->t2_tparam.mb_top; if (m) { md_initm(&mbparam, m); /* do not free it! */ totpcount = m_fixhdr(m); if (totpcount > 0xffff) /* maxvalue for u_short */ return EINVAL; } else totpcount = 0; m = t2p->t2_tdata.mb_top; if (m) { md_initm(&mbdata, m); /* do not free it! */ totdcount = m_fixhdr(m); if (totdcount > 0xffff) return EINVAL; } else totdcount = 0; leftdcount = totdcount; leftpcount = totpcount; txmax = vcp->vc_txmax; error = smb_rq_alloc(t2p->t2_source, t2p->t_name ? SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp); if (error) return error; rqp->sr_flags |= SMBR_MULTIPACKET; t2p->t2_rq = rqp; rqp->sr_t2 = t2p; mbp = &rqp->sr_rq; smb_rq_wstart(rqp); mb_put_uint16le(mbp, totpcount); mb_put_uint16le(mbp, totdcount); mb_put_uint16le(mbp, t2p->t2_maxpcount); mb_put_uint16le(mbp, t2p->t2_maxdcount); mb_put_uint8(mbp, t2p->t2_maxscount); mb_put_uint8(mbp, 0); /* reserved */ mb_put_uint16le(mbp, 0); /* flags */ mb_put_uint32le(mbp, 0); /* Timeout */ mb_put_uint16le(mbp, 0); /* reserved 2 */ len = mb_fixhdr(mbp); /* * now we have known packet size as * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1), * and need to decide which parts should go into the first request */ nmlen = t2p->t_name ? strlen(t2p->t_name) : 0; len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1); if (len + leftpcount > txmax) { txpcount = min(leftpcount, txmax - len); poff = len; txdcount = 0; doff = 0; } else { txpcount = leftpcount; poff = txpcount ? len : 0; len = ALIGN4(len + txpcount); txdcount = min(leftdcount, txmax - len); doff = txdcount ? len : 0; } leftpcount -= txpcount; leftdcount -= txdcount; mb_put_uint16le(mbp, txpcount); mb_put_uint16le(mbp, poff); mb_put_uint16le(mbp, txdcount); mb_put_uint16le(mbp, doff); mb_put_uint8(mbp, t2p->t2_setupcount); mb_put_uint8(mbp, 0); for (i = 0; i < t2p->t2_setupcount; i++) mb_put_uint16le(mbp, t2p->t2_setupdata[i]); smb_rq_wend(rqp); smb_rq_bstart(rqp); /* TDUNICODE */ if (t2p->t_name) mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM); mb_put_uint8(mbp, 0); /* terminating zero */ len = mb_fixhdr(mbp); if (txpcount) { mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); error = md_get_mbuf(&mbparam, txpcount, &m); SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax); if (error) goto freerq; mb_put_mbuf(mbp, m); } len = mb_fixhdr(mbp); if (txdcount) { mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); error = md_get_mbuf(&mbdata, txdcount, &m); if (error) goto freerq; mb_put_mbuf(mbp, m); } smb_rq_bend(rqp); /* incredible, but thats it... */ error = smb_rq_enqueue(rqp); if (error) goto freerq; if (leftpcount == 0 && leftdcount == 0) t2p->t2_flags |= SMBT2_ALLSENT; error = smb_t2_reply(t2p); if (error) goto bad; while (leftpcount || leftdcount) { t2p->t2_flags |= SMBT2_SECONDARY; error = smb_rq_new(rqp, t2p->t_name ? SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY); if (error) goto bad; mbp = &rqp->sr_rq; smb_rq_wstart(rqp); mb_put_uint16le(mbp, totpcount); mb_put_uint16le(mbp, totdcount); len = mb_fixhdr(mbp); /* * now we have known packet size as * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one, * and need to decide which parts should go into request */ len = ALIGN4(len + 6 * 2 + 2); if (t2p->t_name == NULL) len += 2; if (len + leftpcount > txmax) { txpcount = min(leftpcount, txmax - len); poff = len; txdcount = 0; doff = 0; } else { txpcount = leftpcount; poff = txpcount ? len : 0; len = ALIGN4(len + txpcount); txdcount = min(leftdcount, txmax - len); doff = txdcount ? len : 0; } mb_put_uint16le(mbp, txpcount); mb_put_uint16le(mbp, poff); mb_put_uint16le(mbp, totpcount - leftpcount); mb_put_uint16le(mbp, txdcount); mb_put_uint16le(mbp, doff); mb_put_uint16le(mbp, totdcount - leftdcount); leftpcount -= txpcount; leftdcount -= txdcount; if (t2p->t_name == NULL) mb_put_uint16le(mbp, t2p->t2_fid); smb_rq_wend(rqp); smb_rq_bstart(rqp); mb_put_uint8(mbp, 0); /* name */ len = mb_fixhdr(mbp); if (txpcount) { mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); error = md_get_mbuf(&mbparam, txpcount, &m); if (error) goto bad; mb_put_mbuf(mbp, m); } len = mb_fixhdr(mbp); if (txdcount) { mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); error = md_get_mbuf(&mbdata, txdcount, &m); if (error) goto bad; mb_put_mbuf(mbp, m); } smb_rq_bend(rqp); rqp->sr_state = SMBRQ_NOTSENT; error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL); if (error) goto bad; } /* while left params or data */ t2p->t2_flags |= SMBT2_ALLSENT; mdp = &t2p->t2_rdata; if (mdp->md_top) { m_fixhdr(mdp->md_top); md_initm(mdp, mdp->md_top); } mdp = &t2p->t2_rparam; if (mdp->md_top) { m_fixhdr(mdp->md_top); md_initm(mdp, mdp->md_top); } bad: smb_iod_removerq(rqp); freerq: smb_rq_done(rqp); if (error) { if (rqp->sr_flags & SMBR_RESTART) t2p->t2_flags |= SMBT2_RESTART; md_done(&t2p->t2_rparam); md_done(&t2p->t2_rdata); } return error; }
/* Handle the recv of otherwise unhandled packets */ void default_packet_rec(struct default_rec_st *rec_info) { card_st *cdst; char *rxbuf; word_t value; IO_Rec rxrecs[2]; int header_size = 128; int mtu = 1514; /* XXX hardwired constants */ int i; intf_st *intfp; iphost_st *host_st; bool_t ok = False; Net_IPHostAddr ipaddr; Net_IPHostAddr *ipaddrp; char buf[32]; uint32_t rec_recs; /* nr of recs the received in call */ TRC(printf("packet recv running\n")); cdst = rec_info->mycard; host_st = rec_info->myhost; intfp = rec_info->intf; sprintf(buf, "svc>net>%s>ipaddr", intfp->name); ipaddrp = NAME_FIND(buf, Net_IPHostAddrP); ipaddr = *ipaddrp; intfp->def_txfilt = LMPFMod$NewTXDef(cdst->pfmod, cdst->mac, ipaddr.a); getDxIOs(cdst->netif, intfp->def_txfilt, "DEF", /* RETURNS: */ &intfp->def_rxhdl, &intfp->def_txhdl, &intfp->io_rx, &intfp->io_tx, &intfp->def_rxoff, &intfp->def_txoff, &intfp->def_heap); /* sanity check what we got */ if (intfp->def_rxoff == NULL || intfp->def_rxhdl==0) printf("flowman: error: bad recv handle or offer\n"); if (intfp->io_tx == NULL) printf("flowman: default handler: tx bind failed\n"); if (intfp->io_rx == NULL) printf("flowman: default handler: rx bind failed\n"); if (intfp->def_heap == NULL) printf("flowman: default handler: def_heap == NULL\n"); ok = Netif$SetTxFilter(cdst->netif, intfp->def_txhdl, intfp->def_txfilt); if (!ok) printf("flowman: cannot set tx filter\n"); /* install default filter */ ok = LMPFCtl$SetDefault(cdst->rx_pfctl, intfp->def_rxhdl); if (!ok) printf("flowman: cannot set rx default filter\n"); /* set xxstat to 0 */ memset(&host_st->ipstat, 0, sizeof(ipstat_st)); memset(&host_st->tcpstat, 0, sizeof(tcpstat_st)); memset(&host_st->udpstat, 0, sizeof(udpstat_st)); /* Thiemo: should be something with time of day, so that it grows * after crashing */ host_st->ip_id = NOW() & 0xffff; mtu = ALIGN4(mtu); #define PIPEDEPTH 16 rxbuf = Heap$Malloc(intfp->def_heap, PIPEDEPTH * (mtu + header_size)); for(i=0; i<PIPEDEPTH; i++) { /* chop up memory */ rxrecs[0].base = rxbuf + i*(mtu+header_size); rxrecs[0].len = header_size; rxrecs[1].base = rxbuf + i*(mtu+header_size) + header_size; rxrecs[1].len = mtu; /* send recs */ // TRC(printf("prime : %p+%d %p+%d \n", // rxrecs[0].base, rxrecs[0].len, // rxrecs[1].base, rxrecs[1].len)); /* Actually, want to skip the first 2 bytes of header, so Ethernet * frames land mis-aligned, but IP layer and up is correctly * aligned */ ((char *)rxrecs[0].base) += 2; rxrecs[0].len -= 2; if (!IO$PutPkt(intfp->io_rx, 2, rxrecs, 0, 0)) printf("flowman: default prime %d failed\n", i); // TRC(printf("prime %d sent\n", i)); } while (1) { /* loop and get incoming packets */ DTRC(printf("flowman: default: waiting for packet..\n")); IO$GetPkt(intfp->io_rx, 2, rxrecs, FOREVER, &rec_recs, &value); ((char *)rxrecs[0].base) -= 2; rxrecs[0].len += 2; DTRC(printf("flowman: got packet on default channel, " "nr of IO_Recs %d, rec[0].len=%d\n", rec_recs, rxrecs[0].len)); ether_input(rxrecs, rec_recs, cdst->mac, host_st); /* send down an empty packet after adapting to orig. size */ /* XXX check base */ rxrecs[0].len = header_size; rxrecs[1].len = mtu-header_size; /* again, send down the version which is advanced slightly */ ((char *)rxrecs[0].base) += 2; rxrecs[0].len -= 2; IO$PutPkt(intfp->io_rx, 2, rxrecs, 0, 0); } }
static int __init ar_init(void) { struct ar_device *ar; int ret; int i; DEBUG(1, "ar_init:\n"); ret = -EIO; printk(KERN_INFO "arv: Colour AR VGA driver %s\n", VERSION); ar = &ardev; memset(ar, 0, sizeof(struct ar_device)); #if USE_INT /* allocate a DMA buffer for 1 line. */ ar->line_buff = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL | GFP_DMA); if (ar->line_buff == NULL || ! ALIGN4(ar->line_buff)) { printk("arv: buffer allocation failed for DMA.\n"); ret = -ENOMEM; goto out_end; } #endif /* allocate buffers for a frame */ for (i = 0; i < MAX_AR_HEIGHT; i++) { ar->frame[i] = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL); if (ar->frame[i] == NULL || ! ALIGN4(ar->frame[i])) { printk("arv: buffer allocation failed for frame.\n"); ret = -ENOMEM; goto out_line_buff; } } ar->vdev = video_device_alloc(); if (!ar->vdev) { printk(KERN_ERR "arv: video_device_alloc() failed\n"); return -ENOMEM; } memcpy(ar->vdev, &ar_template, sizeof(ar_template)); ar->vdev->priv = ar; if (vga) { ar->width = AR_WIDTH_VGA; ar->height = AR_HEIGHT_VGA; ar->size = AR_SIZE_VGA; ar->frame_bytes = AR_FRAME_BYTES_VGA; ar->line_bytes = AR_LINE_BYTES_VGA; if (vga_interlace) ar->mode = AR_MODE_INTERLACE; else ar->mode = AR_MODE_NORMAL; } else { ar->width = AR_WIDTH_QVGA; ar->height = AR_HEIGHT_QVGA; ar->size = AR_SIZE_QVGA; ar->frame_bytes = AR_FRAME_BYTES_QVGA; ar->line_bytes = AR_LINE_BYTES_QVGA; ar->mode = AR_MODE_INTERLACE; } init_MUTEX(&ar->lock); init_waitqueue_head(&ar->wait); #if USE_INT if (request_irq(M32R_IRQ_INT3, ar_interrupt, 0, "arv", ar)) { printk("arv: request_irq(%d) failed.\n", M32R_IRQ_INT3); ret = -EIO; goto out_irq; } #endif if (ar_initialize(ar->vdev) != 0) { printk("arv: M64278 not found.\n"); ret = -ENODEV; goto out_dev; } /* * ok, we can initialize h/w according to parameters, * so register video device as a frame grabber type. * device is named "video[0-64]". * video_register_device() initializes h/w using ar_initialize(). */ if (video_register_device(ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) { /* return -1, -ENFILE(full) or others */ printk("arv: register video (Colour AR) failed.\n"); ret = -ENODEV; goto out_dev; } printk("video%d: Found M64278 VGA (IRQ %d, Freq %dMHz).\n", ar->vdev->minor, M32R_IRQ_INT3, freq); return 0; out_dev: #if USE_INT free_irq(M32R_IRQ_INT3, ar); out_irq: #endif for (i = 0; i < MAX_AR_HEIGHT; i++) kfree(ar->frame[i]); out_line_buff: #if USE_INT kfree(ar->line_buff); out_end: #endif return ret; }
ret_t cherokee_spawner_spawn (cherokee_buffer_t *binary, cherokee_buffer_t *user, uid_t uid, gid_t gid, int env_inherited, char **envp, cherokee_logger_writer_t *error_writer, pid_t *pid_ret) { #ifdef HAVE_SYSV_SEMAPHORES char **n; int *pid_shm; int pid_prev; int k; int phase; int envs = 0; cherokee_buffer_t tmp = CHEROKEE_BUF_INIT; #define ALIGN4(buf) \ while (buf.len & 0x3) { \ cherokee_buffer_add_char (&buf, '\0'); \ } /* Check it's initialized */ if ((! _active) || (cherokee_spawn_shared.mem == NULL)) { TRACE (ENTRIES, "Spawner is not active. Returning: %s\n", binary->buf); return ret_deny; } /* Lock the monitor mutex */ k = CHEROKEE_MUTEX_TRY_LOCK (&spawning_mutex); if (k) { return ret_eagain; } /* Build the string * The first character of each block is a mark. */ cherokee_buffer_ensure_size (&tmp, SPAWN_SHARED_LEN); /* 1.- Executable */ phase = 0xF0; cherokee_buffer_add (&tmp, (char *)&phase, sizeof(int)); cherokee_buffer_add (&tmp, (char *)&binary->len, sizeof(int)); cherokee_buffer_add_buffer (&tmp, binary); cherokee_buffer_add_char (&tmp, '\0'); ALIGN4 (tmp); /* 2.- UID & GID */ phase = 0xF1; cherokee_buffer_add (&tmp, (char *)&phase, sizeof(int)); cherokee_buffer_add (&tmp, (char *)&user->len, sizeof(int)); cherokee_buffer_add_buffer (&tmp, user); cherokee_buffer_add_char (&tmp, '\0'); ALIGN4(tmp); cherokee_buffer_add (&tmp, (char *)&uid, sizeof(uid_t)); cherokee_buffer_add (&tmp, (char *)&gid, sizeof(gid_t)); /* 3.- Environment */ phase = 0xF2; cherokee_buffer_add (&tmp, (char *)&phase, sizeof(int)); for (n=envp; *n; n++) { envs ++; } cherokee_buffer_add (&tmp, (char *)&env_inherited, sizeof(int)); cherokee_buffer_add (&tmp, (char *)&envs, sizeof(int)); for (n=envp; *n; n++) { int len = strlen(*n); cherokee_buffer_add (&tmp, (char *)&len, sizeof(int)); cherokee_buffer_add (&tmp, *n, len); cherokee_buffer_add_char (&tmp, '\0'); ALIGN4(tmp); } /* 4.- Error log */ phase = 0xF3; cherokee_buffer_add (&tmp, (char *)&phase, sizeof(int)); write_logger (&tmp, error_writer); ALIGN4 (tmp); /* 5.- PID (will be rewritten by the other side) */ phase = 0xF4; cherokee_buffer_add (&tmp, (char *)&phase, sizeof(int)); pid_shm = (int *) (((char *)cherokee_spawn_shared.mem) + tmp.len); k = *pid_ret; pid_prev = *pid_ret; cherokee_buffer_add (&tmp, (char *)&k, sizeof(int)); /* Copy it to the shared memory */ if (unlikely (tmp.len > SPAWN_SHARED_LEN)) { goto error; } memcpy (cherokee_spawn_shared.mem, tmp.buf, tmp.len); cherokee_buffer_mrproper (&tmp); /* Wake up the spawning thread */ sem_unlock (cherokee_spawn_sem, SEM_LAUNCH_START); /* Wait for the PID */ sem_adquire (cherokee_spawn_sem, SEM_LAUNCH_READY); if (*pid_shm == -1) { TRACE(ENTRIES, "Could not get the PID of: '%s'\n", binary->buf); goto error; } if (*pid_shm == pid_prev) { TRACE(ENTRIES, "Could not the new PID, previously it was %d\n", pid_prev); goto error; } TRACE(ENTRIES, "Successfully launched PID=%d\n", *pid_shm); *pid_ret = *pid_shm; CHEROKEE_MUTEX_UNLOCK (&spawning_mutex); return ret_ok; error: CHEROKEE_MUTEX_UNLOCK (&spawning_mutex); return ret_error; #else return ret_not_found; #endif /* HAVE_SYSV_SEMAPHORES */ }