static void ExtraV2Mangle(u8* buffer1, u8 codeExtra) { u8 buffer2[ROUNDUP16(0x14+0xA0)]; memcpy(buffer2+0x14, buffer1, 0xA0); u32_le* pl2 = (u32_le*)buffer2; pl2[0] = 5; pl2[1] = pl2[2] = 0; pl2[3] = codeExtra; pl2[4] = 0xA0; sceUtilsBufferCopyWithRange(buffer2, 20+0xA0, buffer2, 20+0xA0, KIRK_CMD_DECRYPT_IV_0); // copy result back memcpy(buffer1, buffer2, 0xA0); }
static int DecryptPRX2(const u8 *inbuf, u8 *outbuf, u32 size, u32 tag) { const TAG_INFO2 *pti = GetTagInfo2(tag); if (!pti) { return -1; } if (!HasKey(pti->code)) { return MISSING_KEY; } // only type2 and type6 can be process by this code. if(pti->type!=2 && pti->type!=6) return -12; s32_le retsize = *(const s32_le *)&inbuf[0xB0]; u8 tmp1[0x150] = {0}; u8 tmp2[ROUNDUP16(0x90+0x14)] = {0}; u8 tmp3[ROUNDUP16(0x90+0x14)] = {0}; u8 tmp4[ROUNDUP16(0x20)] = {0}; if (inbuf != outbuf) memcpy(outbuf, inbuf, size); if (size < 0x160) { return -2; } if (((int)size - 0x150) < retsize) { return -4; } memcpy(tmp1, outbuf, 0x150); int i; u8 *p = tmp2 + 0x14; // Writes 0x90 bytes to tmp2 + 0x14. for (i = 0; i < 9; i++) { memcpy(p+(i<<4), pti->key, 0x10); p[(i << 4)] = i; // really? this is very odd } if (Scramble((u32_le *)tmp2, 0x90, pti->code) < 0) { return -5; } memcpy(outbuf, tmp1+0xD0, 0x5C); memcpy(outbuf+0x5C, tmp1+0x140, 0x10); memcpy(outbuf+0x6C, tmp1+0x12C, 0x14); memcpy(outbuf+0x80, tmp1+0x080, 0x30); memcpy(outbuf+0xB0, tmp1+0x0C0, 0x10); memcpy(outbuf+0xC0, tmp1+0x0B0, 0x10); memcpy(outbuf+0xD0, tmp1+0x000, 0x80); memcpy(tmp3+0x14, outbuf+0x5C, 0x60); if (Scramble((u32_le *)tmp3, 0x60, pti->code) < 0) { return -6; } memcpy(outbuf+0x5C, tmp3, 0x60); memcpy(tmp3, outbuf+0x6C, 0x14); memcpy(outbuf+0x70, outbuf+0x5C, 0x10); if(pti->type == 6) { memcpy(tmp4, outbuf+0x3C, 0x20); memcpy(outbuf+0x50, tmp4, 0x20); memset(outbuf+0x18, 0, 0x38); }else memset(outbuf+0x18, 0, 0x58); memcpy(outbuf+0x04, outbuf, 0x04); *((u32_le *)outbuf) = 0x014C; memcpy(outbuf+0x08, tmp2, 0x10); /* sha-1 */ if (sceUtilsBufferCopyWithRange(outbuf, 3000000, outbuf, 3000000, 0x0B) != 0) { return -7; } if (memcmp(outbuf, tmp3, 0x14) != 0) { return -8; } for (i=0; i<0x40; i++) { tmp3[i+0x14] = outbuf[i+0x80] ^ tmp2[i+0x10]; } if (Scramble((u32_le *)tmp3, 0x40, pti->code) != 0) { return -9; } for (i=0; i<0x40; i++) { outbuf[i+0x40] = tmp3[i] ^ tmp2[i+0x50]; } if (pti->type == 6) { memcpy(outbuf+0x80, tmp4, 0x20); memset(outbuf+0xA0, 0, 0x10); *(u32_le*)&outbuf[0xA4] = 1; *(u32_le*)&outbuf[0xA0] = 1; } else { memset(outbuf+0x80, 0, 0x30); *(u32_le*)&outbuf[0xA0] = 1; } memcpy(outbuf+0xB0, outbuf+0xC0, 0x10); memset(outbuf+0xC0, 0, 0x10); // The real decryption if (sceUtilsBufferCopyWithRange(outbuf, size, outbuf + 0x40, size - 0x40, 0x1) != 0) { return -1; } if (retsize < 0x150) { // Fill with 0 memset(outbuf+retsize, 0, 0x150-retsize); } return retsize; }
/** * Run the vertex shader on all vertices in the vertex queue. * Called by the draw module when the vertx cache needs to be flushed. */ void cell_vertex_shader_queue_flush(struct draw_context *draw) { #if 0 struct cell_context *const cell = (struct cell_context *) draw->driver_private; struct cell_command_vs *const vs = &cell_global.command[0].vs; uint64_t *batch; struct cell_array_info *array_info; unsigned i, j; struct cell_attribute_fetch_code *cf; assert(draw->vs.queue_nr != 0); /* XXX: do this on statechange: */ draw_update_vertex_fetch(draw); cell_update_vertex_fetch(draw); batch = cell_batch_alloc(cell, sizeof(batch[0]) + sizeof(*cf)); batch[0] = CELL_CMD_STATE_ATTRIB_FETCH; cf = (struct cell_attribute_fetch_code *) (&batch[1]); cf->base = (uint64_t) cell->attrib_fetch.store; cf->size = ROUNDUP16((unsigned)((void *) cell->attrib_fetch.csr - (void *) cell->attrib_fetch.store)); for (i = 0; i < draw->vertex_fetch.nr_attrs; i++) { const enum pipe_format format = draw->vertex_element[i].src_format; const unsigned count = ((pf_size_x(format) != 0) + (pf_size_y(format) != 0) + (pf_size_z(format) != 0) + (pf_size_w(format) != 0)); const unsigned size = pf_size_x(format) * count; batch = cell_batch_alloc(cell, sizeof(batch[0]) + sizeof(*array_info)); batch[0] = CELL_CMD_STATE_VS_ARRAY_INFO; array_info = (struct cell_array_info *) &batch[1]; assert(draw->vertex_fetch.src_ptr[i] != NULL); array_info->base = (uintptr_t) draw->vertex_fetch.src_ptr[i]; array_info->attr = i; array_info->pitch = draw->vertex_fetch.pitch[i]; array_info->size = size; array_info->function_offset = cell->attrib_fetch_offsets[i]; } batch = cell_batch_alloc(cell, sizeof(batch[0]) + sizeof(struct pipe_viewport_state)); batch[0] = CELL_CMD_STATE_VIEWPORT; (void) memcpy(&batch[1], &draw->viewport, sizeof(struct pipe_viewport_state)); { uint64_t uniforms = (uintptr_t) draw->user.constants; batch = cell_batch_alloc(cell, 2 *sizeof(batch[0])); batch[0] = CELL_CMD_STATE_UNIFORMS; batch[1] = uniforms; } cell_batch_flush(cell); vs->opcode = CELL_CMD_VS_EXECUTE; vs->nr_attrs = draw->vertex_fetch.nr_attrs; (void) memcpy(vs->plane, draw->plane, sizeof(draw->plane)); vs->nr_planes = draw->nr_planes; for (i = 0; i < draw->vs.queue_nr; i += SPU_VERTS_PER_BATCH) { const unsigned n = MIN2(SPU_VERTS_PER_BATCH, draw->vs.queue_nr - i); for (j = 0; j < n; j++) { vs->elts[j] = draw->vs.queue[i + j].elt; vs->vOut[j] = (uintptr_t) draw->vs.queue[i + j].vertex; } for (/* empty */; j < SPU_VERTS_PER_BATCH; j++) { vs->elts[j] = vs->elts[0]; vs->vOut[j] = (uintptr_t) draw->vs.queue[i + j].vertex; } vs->num_elts = n; send_mbox_message(cell_global.spe_contexts[0], CELL_CMD_VS_EXECUTE); cell_flush_int(cell, CELL_FLUSH_WAIT); } draw->vs.post_nr = draw->vs.queue_nr; draw->vs.queue_nr = 0; #else assert(0); #endif }