Пример #1
0
void
win32_dealloc(struct event_base *_base)
{
	struct win32op *win32op = _base->evbase;

	evsig_dealloc(_base);
	if (win32op->readset_in)
		mm_free(win32op->readset_in);
	if (win32op->writeset_in)
		mm_free(win32op->writeset_in);
	if (win32op->readset_out)
		mm_free(win32op->readset_out);
	if (win32op->writeset_out)
		mm_free(win32op->writeset_out);
	if (win32op->exset_out)
		mm_free(win32op->exset_out);
	/* XXXXX free the tree. */

	memset(win32op, 0, sizeof(win32op));
	mm_free(win32op);
}
Пример #2
0
int synth_record_unload(struct query_module *self)
{
	mm_free(self->mm, self->ctx);
	return KNOT_EOK;
}
Пример #3
0
static int32 genericFilterCloseAbort( FILELIST * filter, int32 fAbort )
{
  /* close transform file and dismount transform device if no other
     filter of this type is active */

  int32 result = 0 ;
  DEVICELIST *dlist ;
  FILELIST *uflptr ;

  HQASSERT( filter , "filter NULL in genericFilterCloseAbort." ) ;

  HQTRACE( debug_filters , ( "In genericFilterCloseAbort" )) ;

  dlist = theIDeviceList( filter ) ;

  SetIClosingFlag( filter ) ;

  if ( isIOutputFile( filter ))
    result = ( *theIMyFlushFile( filter ))( filter ) ;

  uflptr = theIUnderFile( filter ) ;

  if ( uflptr && isICST( filter ) &&
       isIOpenFileFilterById( theIUnderFilterId( filter ) , uflptr )) {
    /* While this filter may be being closed implicitly, the closing of the
     * source is explicit */
    if (( *theIMyCloseFile( uflptr ))( uflptr, CLOSE_EXPLICIT ) == EOF )
      result = EOF ;
  }

  if ( dlist ) {
    DEVICE_FILEDESCRIPTOR desc = theIDescriptor( filter ) ;

    HQASSERT( theIBuffer( filter ) != NULL , "encountered bad filter" ) ;

    if ( fAbort )
     (void)( *theIAbortFile( dlist ))( dlist , desc ) ;
    else
     (void)( *theICloseFile( dlist ))( dlist , desc ) ;

    (void)closeReadFileProgress( filter ) ;

    /* call to dismount device unconditional */
    (void)( *theIDevDismount( dlist ))( dlist ) ;

    device_free(dlist) ;

    theIDeviceList( filter ) = NULL ;
  }

  if (theIBuffer(filter)) {
    mm_free( mm_pool_temp ,
             ( mm_addr_t )( theIBuffer( filter ) - 4 ) ,
             ( mm_size_t )( theIBufferSize( filter ) + 4 )) ;
    theIBuffer( filter ) = NULL ;
  }

  ClearIClosingFlag( filter ) ;
  SetIEofFlag( filter ) ;
  if ( ! isIRewindable( filter ))
    ClearIOpenFlag( filter ) ;

  return result ;
}
Пример #4
0
void *mm_realloc(void *ptr, size_t size)
{
if(size == 0){
      mm_free(ptr);
      return NULL;
    }
    /* If oldptr is NULL, then this is just malloc. */
    if (ptr == NULL)
    {
        return (mm_malloc(size));
    }
      
    // get the proper alligned size
    size_t asize,currentSize;

    if (size <= DSIZE)
        asize = 2 * DSIZE;
    else
        asize = DSIZE * ((size + (DSIZE) + (DSIZE-1))/ DSIZE);


    currentSize = GET_SIZE(HDRP(ptr));


    if (asize < currentSize)
    {
        size_t splitBlockSize = currentSize-asize;
        if(splitBlockSize>= DSIZE )
        {
            // If after the realloc, the two blocks
            // are both bigger than the minium size for 
            // a memory block, split the blocks, 
            // set the header and footer for the realloc block
            // and the header and footer for the smaller split block
            // this persrves the heap consistancy.
            // Finally free the smaller block so that it can be used.
            PUT(HDRP(ptr), PACK(asize, 1));
            PUT(FTRP(ptr), PACK(asize, 1));

            PUT(HDRP(NEXT_BLKP(ptr)), PACK(currentSize-asize, 1));
            PUT(FTRP(NEXT_BLKP(ptr)), PACK(currentSize-asize, 1));

            mm_free(NEXT_BLKP(ptr));
            return ptr;
        }
        else
        {
           // or else do not touch the size of the malloc block and 
            // just return the origonal size.
            return ptr;
        }
       
    }
    else if(asize >currentSize)
    {

        if(isNextEiplog(ptr) == 1)
        {

            // If the current block is the last block on the heap
            // just increase the memory size of mem_sbrk and append 
            // the current block.  This avoids a copy and memcpy
            // as it is un-necessary.

            size_t payLoadSize = (asize- currentSize) + WSIZE;

            // extend the heap
            mem_sbrk(payLoadSize);
            size_t newSize = currentSize + payLoadSize;
            PUT(HDRP(ptr), PACK(newSize-WSIZE, 1));       // Reset the new header size
            PUT(FTRP(ptr), PACK(newSize-WSIZE, 1));       // Reset the new footer size
            PUT(HDRP(NEXT_BLKP(ptr)), PACK(0, 1));        // new epilogue header
            //return the origonal pointer
            return ptr;
        }
        else
        {
            void* newptr = mm_malloc(size);
            memcpy(newptr, ptr, currentSize);
            mm_free(ptr);
            return newptr;
        }
    }
    else
    {
        return ptr;
    }
      
}
Пример #5
0
/**
 * @brief	Create a new session for a given web connection.
 * @note	The session stores the following data points: remote IP address, request path, application name, the specified http hostname,
 * 			the remote client's user agent string, the server's host number, a unique session id, the server's current timestamp, a randomly-
 * 			generated session key for authentication, and an encrypted token for the session returned to the user as a cookie.
 * @param	con			a pointer to the connection underlying the web session.
 * @param	path		a pointer to a managed string containing the pathname of the generating request (should be "/portal/camel").
 * @param	application	a pointer to a managed string containing the name of the parent application of the session (should be "portal").
 * @return	NULL on failure or a pointer to a newly allocated session object for the specified connection.
 */
session_t *sess_create(connection_t *con, stringer_t *path, stringer_t *application) {

	session_t *output;
	multi_t key = { .type = M_TYPE_UINT64, .val.u64 = 0 };

	if (!(output = mm_alloc(sizeof(session_t)))) {
		log_pedantic("Unable to allocate %zu bytes for a session context.", sizeof(session_t));
		return NULL;
	}
	else if (pthread_mutex_init(&(output->lock), NULL) != 0) {
		log_pedantic("Unable to initialize reference lock for new user session.");
		mm_free(output);
		return NULL;
	} else if (!(output->compositions = inx_alloc(M_INX_LINKED, &sess_release_composition))) {
		log_pedantic("Unable to allocate space for user session's compositions.");
		mm_free(output);
		return NULL;
	}

	if (!(ip_copy(&(output->warden.ip), con_addr(con, MEMORYBUF(64)))) ||
		(path && !(output->request.path = st_dupe_opts(MANAGED_T | HEAP | CONTIGUOUS, path))) ||
		(application && !(output->request.application = st_dupe_opts(MANAGED_T | HEAP | CONTIGUOUS, application))) ||
		(con->http.host && !(output->request.host = st_dupe_opts(MANAGED_T | HEAP | CONTIGUOUS, con->http.host))) ||
		(con->http.agent && !(output->warden.agent = st_dupe_opts(MANAGED_T | HEAP | CONTIGUOUS, con->http.agent))) ||
		!(output->warden.host = magma.host.number) || !(key.val.u64 = output->warden.number = sess_number()) ||
		!(output->warden.stamp = time(NULL)) || !(output->warden.key = sess_key()) || !(output->warden.token = sess_token(output))) {
		log_pedantic("Unable to initialize the session warden context.");
		sess_destroy(output);
		return NULL;
	}

	output->request.httponly = true;
	output->request.secure = (con_secure(con) == 1 ? true : false);

	sess_ref_add(output);

	if (inx_insert(objects.sessions, key, output) != 1) {
		log_pedantic("Unable to insert the session into the global context.");
		sess_ref_dec(output);
		sess_destroy(output);
		return NULL;
	}

	return output;
}

/**
 * @brief	Try to retrieve the session associated with a client connection's supplied cookie.
 * @param	con				a pointer to the connection object sending the cookie.
 * @param	application		a managed string containing the application associated with the session.
 * @param	path			a managed string containing the path associated with the session.
 * @param	token			the encrypted user token retrieved from the supplied http cookie.
 * @return	1 if the cookie was found and valid, or one of the following values on failure:
 * 			 0 = Session not found.
 *			-1 = Server error.
 *			-2 = Invalid token.
 *			-3 = Security violation / incorrect user-agent.
 *			-4 = Security violation / incorrect session key.
 *			-5 = Security violation / incorrect source address.
 *			-6 = Session terminated by logout.
 *			-7 = Session timed out.
 */
int_t sess_get(connection_t *con, stringer_t *application, stringer_t *path, stringer_t *token) {

	uint64_t *numbers;
	scramble_t *scramble;
	stringer_t *binary, *encrypted;
	multi_t key = { .type = M_TYPE_UINT64, .val.u64 = 0 };
	int_t result = 1;

	/// Most session attributes need simple equality comparison, except for timeout checking. Make sure not to validate against a stale session that should have already timed out (which will have to be determined dynamically).
	encrypted = zbase32_decode(token);
	scramble = scramble_import(encrypted);
	binary = scramble_decrypt(magma.secure.sessions, scramble);

	st_cleanup(encrypted);

	if (!binary) {
		return 0;
	}

	numbers = st_data_get(binary);

	// QUESTION: Is this necessary? doesn't inx_find() lock the inx?
	inx_lock_read(objects.sessions);

	key.val.u64 = *(numbers + 2);

	if ((con->http.session = inx_find(objects.sessions, key))) {
		sess_ref_add(con->http.session);
	}

	inx_unlock(objects.sessions);
	st_free(binary);

	// Return if we didn't find the session or user.
	if (!con->http.session || !con->http.session->user) {
		return 0;
	}

	// We need to do full validation against the cookie and associated session.
	// First, the cookie.
	if ((*numbers != con->http.session->warden.host) || (*(numbers + 1) != con->http.session->warden.stamp) ||
			(*(numbers + 2) != con->http.session->warden.number)) {
		log_error("Received mismatched cookie for authenticated session { user = %s }", st_char_get(con->http.session->user->username));
		result = -2;
	} else if (*(numbers + 3) != con->http.session->warden.key) {
		log_error("Cookie contained an incorrect session key { user = %s }", st_char_get(con->http.session->user->username));
		result = -4;
	} else if (st_cmp_cs_eq(application, con->http.session->request.application)) {
		log_error("Cookie did not match session's application { user = %s }", st_char_get(con->http.session->user->username));
		result = -2;
	} else if (st_cmp_cs_eq(path, con->http.session->request.path)) {
		log_error("Cookie did not match session's path { user = %s }", st_char_get(con->http.session->user->username));
		result = -2;
	} else if (st_cmp_cs_eq(con->http.agent, con->http.session->warden.agent)) {
		log_error("Cookie contained a mismatched user agent { user = %s }", st_char_get(con->http.session->user->username));
		result = -3;
	} else if (con->http.session->request.secure != (con_secure(con) ? 1 : 0)) {
		log_error("Cookie was submitted from a mismatched transport layer { user = %s }", st_char_get(con->http.session->user->username));
		result = -5;
	} else if (!ip_address_equal(&(con->http.session->warden.ip), (ip_t *)con_addr(con, MEMORYBUF(64)))) {
		log_error("Cookie was submitted from a mismatched IP address { user = %s }", st_char_get(con->http.session->user->username));
		result = -5;
	}

	// Finally, do comparisons to see that we haven't timed out.
	/* Did we expire? */
	if (magma.http.session_timeout <= (time(NULL) - con->http.session->warden.stamp)) {
		log_pedantic("User submitted expired or invalidated cookie; marking for deletion { user = %s }", st_char_get(con->http.session->user->username));
		result = -7;
	}

	// QUESTION: This destruction needs a second look.
	if (result < 0) {

		if (!inx_delete(objects.sessions, key)) {
			log_pedantic("Unexpected error occurred attempting to delete expired cookie { user = %s }", st_char_get(con->http.session->user->username));
		}

		sess_ref_dec(con->http.session);
		//sess_destroy(con->http.session);
		con->http.session = NULL;
	}
	// Otherwise, if the last session status update is more than 10 minutes ago, check now to see if things are current.
	// QUESTION: Why is it 600 here and 120 elsewhere?
	else if ((time(NULL) - sess_refresh_stamp(con->http.session)) > 600) {
		sess_update(con->http.session);
	}

	return result;
}
Пример #6
0
/**
 * @brief	Destroy and free a generic connection object after executing its protocol-specific destructor; update any statistics accordingly.
 * @param	con		a pointer to the connection to be destroyed.
 * @return	This function returns no value.
 */
void con_destroy(connection_t *con) {

	if (con && !con_decrement_refs(con)) {

		switch (con->server->protocol) {
			case (POP):

				if (con->network.ssl) {
					stats_decrement_by_name("pop.connections.secure");
				}

				stats_decrement_by_name("pop.connections.total");
				pop_session_destroy(con);
				break;
			case (IMAP):
				if (con->network.ssl) {
					stats_decrement_by_name("imap.connections.secure");
				}

				stats_decrement_by_name("imap.connections.total");
				imap_session_destroy(con);
				break;
			case (HTTP):
				if (con->network.ssl) {
					stats_decrement_by_name("http.connections.secure");
				}

				stats_decrement_by_name("http.connections.total");
				http_session_destroy(con);
				break;
			case (SMTP):
				if (con->network.ssl) {
					stats_decrement_by_name("smtp.connections.secure");
				}

				stats_decrement_by_name("smtp.connections.total");
				smtp_session_destroy(con);
				break;
			case (SUBMISSION):
				if (con->network.ssl) {
					stats_decrement_by_name("smtp.connections.secure");
				}

				stats_decrement_by_name("smtp.connections.total");
				smtp_session_destroy(con);
				break;
			case (MOLTEN):
				if (con->network.ssl) {
					stats_decrement_by_name("molten.connections.secure");
				}

				stats_decrement_by_name("molten.connections.total");
				molten_session_destroy(con);
				break;
			default:
				break;
		}

		if (con->network.ssl) {
			ssl_free(con->network.ssl);
		}

		if (con->network.sockd != -1) {
			close(con->network.sockd);
		}

		st_cleanup(con->network.buffer);
		st_cleanup(con->network.reverse.domain);
		mutex_destroy(&(con->lock));
		mm_free(con);
	}

	return;
}
Пример #7
0
static void free_value_str(void *value)
{
	mm_free(value);
}
Пример #8
0
static void dc_power_irp_worker(pw_irp_ctx *pwc)
{
	dc_process_power_irp(pwc->hook, pwc->irp);
	mm_free(pwc);
}
Пример #9
0
static double bsr_load_mm(bsr_t **bsr, const char *filename, int b_size) {

	double start_time;
	double end_time;
	int i;

	int k;
	int blk_i; /* block row */
	int blk_j; /* block col */
	int blk_c; /* block count */
	int *blk_start;
	datatype_t **blk_vp;
	start_time = omp_get_wtime();

#if 0 /* COO -> BSR */
	mm_file_t *mm_file;
	int row;

	mm_file = mm_load(filename, 1);

	blk_start = malloc((mm_file->width / b_size + 1) * sizeof(int));
	memset(blk_start, -1, (mm_file->width / b_size + 1) * sizeof(int));
	blk_c = 0;

	for (i = 0; i < mm_file->nnz;) {
		blk_i = mm_file->data[i].row / b_size;

		printf(".");

		for (row = mm_file->data[i].row;
				i < mm_file->nnz && row == mm_file->data[i].row; i++) {

			blk_j = mm_file->data[i].col / b_size;

			if (blk_start[blk_j] != blk_i) {
				blk_start[blk_j] = blk_i;
				blk_c++;
			}
		}
	}
	free(blk_start);

	bsr_init(bsr, mm_file->width, mm_file->height, mm_file->nnz, b_size, blk_c);
	blk_c = 0;
	(*bsr)->rp[0] = 0;
	blk_vp = calloc((mm_file->width / b_size + 1), sizeof(datatype_t *));

	for (i = 0; i < mm_file->nnz;) {
		blk_i = mm_file->data[i].row / b_size;

		k = i;
		for (row = mm_file->data[i].row;
				i < mm_file->nnz && row == mm_file->data[i].row; i++) {

			blk_j = mm_file->data[i].col / b_size;

			if (blk_vp[blk_j] == NULL) {
				blk_vp[blk_j] = (*bsr)->v + blk_c * b_size * b_size;

				(*bsr)->ci[blk_c] = blk_j;
				blk_c++;
			}

			*(blk_vp[blk_j] + (b_size * (mm_file->data[i].row % b_size))
					+ (mm_file->data[i].col % b_size)) +=
					mm_file->data[i].value;
		}

		for (; k < i; k++) {
			blk_vp[mm_file->data[k].col / b_size] = NULL;
		}

		(*bsr)->rp[blk_i + 1] = blk_c;
	}
	free(blk_vp);
	mm_free(mm_file);

#else /* CSR -> BSR */

	/* This code has been inspired by
	 * https://github.com/scipy/scipy/blob/master/scipy/sparse/sparsetools/csr.h
	 */

	csr_t *csr = NULL;
	int j;

	/*
	 * TODO: we are creating BSR from a CSR matrix. We should load COO from
	 * MM and convert it to BSR.
	 */
	csr_from_mm(&csr, filename, 0);

	/* count blocks */
	blk_start = malloc((csr->_.w / b_size + 1) * sizeof(int));
	memset(blk_start, -1, (csr->_.w / b_size + 1) * sizeof(int));
	blk_c = 0;
	for (i = 0; i < csr->_.h; i++) {
		blk_i = i / b_size;
		for (j = csr->rp[i]; j < csr->rp[i + 1]; j++) {
			blk_j = csr->ci[j] / b_size;

			if (blk_start[blk_j] != blk_i) {
				blk_start[blk_j] = blk_i;
				blk_c++;
			}
		}
	}
	free(blk_start);

	bsr_init(bsr, csr->_.w, csr->_.h, csr->_.nnz, b_size, blk_c);
	blk_c = 0;
	(*bsr)->rp[0] = 0;
	blk_vp = calloc((csr->_.w / b_size + 1), sizeof(datatype_t *));
	for (i = 0; i < (csr->_.h / b_size); i++) {
		for (j = 0; j < b_size; j++) {
			blk_i = b_size * i + j;
			for (k = csr->rp[blk_i]; k < csr->rp[blk_i + 1]; k++) {
				blk_j = csr->ci[k] / b_size;

				if (blk_vp[blk_j] == NULL) {
					blk_vp[blk_j] = (*bsr)->v + blk_c * b_size * b_size;
					(*bsr)->ci[blk_c] = blk_j;
					blk_c++;
				}

				*(blk_vp[blk_j] + (b_size * j) + (csr->ci[k] % b_size)) +=
				csr->v[k];
			}
		}

		for (j = csr->rp[i * b_size]; j < csr->rp[(i + 1) * b_size]; j++) {
			blk_vp[csr->ci[j] / b_size] = NULL;
		}

		(*bsr)->rp[i + 1] = blk_c;
	}

	free(blk_vp);
	csr->_.f.free((vm_t*) csr);
#endif

	end_time = omp_get_wtime();
	return end_time - start_time;
}
Пример #10
0
/**
 * Recursively loads the files from a directory and stores them using the tank interface.
 *
 * @param location The directory path to search for files.
 * @return Returns false if an error occurs, otherwise true.
 */
bool_t check_tokyo_tank_load(char *location, inx_t *check_collection, check_tank_opt_t *opts) {

	int fd;
	multi_t key;
	DIR *working;
	struct stat info;
	check_tank_obj_t *obj;
	struct dirent *entry;
	char file[1024], *buffer;

	if (!(working = opendir(location))) {
		log_info("Unable to open the data path. {location = %s}", location);
		return false;
	}

	while (status() && (entry = readdir(working))) {

		// Reset.
		errno = 0;
		bzero(file, 1024);
		bzero(&info, sizeof(struct stat));

		// Build an absolute path.
		snprintf(file, 1024, "%s%s%s", location, "/", entry->d_name);

		// If we hit a directory, recursively call the load function.
		if (entry->d_type == DT_DIR && *(entry->d_name) != '.') {
			if (!check_tokyo_tank_load(file, check_collection, opts)) {
				return false;
			}
		}
		// Otherwise if its a regular file try storing it.
		else if (entry->d_type == DT_REG && *(entry->d_name) != '.') {

			// Read the file.
			if ((fd = open(file, O_RDONLY)) < 0) {
				log_info("%s - open error", file);
				closedir(working);
				return false;
			}

			// How big is the file?
			if (fstat(fd, &info) != 0) {
				log_info("%s - stat error", file);
				closedir(working);
				close(fd);
				return false;
			}

			// Allocate a buffer.
			if (!(buffer = mm_alloc(info.st_size + 1))) {
				log_info("%s - malloc error", file);
				closedir(working);
				close(fd);
				return false;
			}

			// Clear the buffer.
			memset(buffer, 0, info.st_size + 1);

			// Read the file.
			if (read(fd, buffer, info.st_size) != info.st_size) {
				log_info("%s - read error", file);
				closedir(working);
				mm_free(buffer);
				close(fd);
				return false;
			}

			close(fd);

			// Data used for verification.
			if (!(obj = mm_alloc(sizeof(check_tank_obj_t)))) {
				log_info("check_tank allocation failed for the file %s", file);
				closedir(working);
				mm_free(buffer);
				return false;
			}

			obj->adler32 = hash_adler32(buffer, info.st_size);
			obj->fletcher32 = hash_fletcher32(buffer, info.st_size);
			obj->crc32 = hash_crc32(buffer, info.st_size);
			obj->crc64 = hash_crc64(buffer, info.st_size);
			obj->murmur32 = hash_murmur32(buffer, info.st_size);
			obj->murmur64 = hash_murmur64(buffer, info.st_size);

			// Request the next storage tank.
			obj->tnum = tank_cycle();

			// Try storing the file data.
			if (!(obj->onum = tank_store(TANK_CHECK_DATA_HNUM, obj->tnum, TANK_CHECK_DATA_UNUM, PLACER(buffer, info.st_size), opts->engine))) {
				log_info("tank_store failed for the file %s", file);
				closedir(working);
				mm_free(buffer);
				mm_free(obj);
				return false;
			}

			mm_free(buffer);

			key = mt_set_type(key, M_TYPE_UINT64);
			key.val.u64 = obj->onum;

			if (!inx_insert(check_collection, key, obj)) {
				log_info("inx_insert failed for the file %s", file);
				closedir(working);
				mm_free(obj);
				return false;
			}
		}
	}

	closedir(working);
	return true;
}
Пример #11
0
void user_free(void *mem)
{
//  printf("user_free: %p\n", mem);

  mm_free(&g_mmheap_user, mem);
}
Пример #12
0
FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
                     size_t size)
{
  FAR struct mm_allocnode_s *oldnode;
  FAR struct mm_freenode_s  *prev;
  FAR struct mm_freenode_s  *next;
  size_t oldsize;
  size_t prevsize = 0;
  size_t nextsize = 0;
  FAR void *newmem;

  /* If oldmem is NULL, then realloc is equivalent to malloc */

  if (!oldmem)
    {
      return mm_malloc(heap, size);
    }

  /* If size is zero, then realloc is equivalent to free */

  if (size < 1)
    {
      mm_free(heap, oldmem);
      return NULL;
    }

  /* Adjust the size to account for (1) the size of the allocated node and
   * (2) to make sure that it is an even multiple of our granule size.
   */

  size = MM_ALIGN_UP(size + SIZEOF_MM_ALLOCNODE);

  /* Map the memory chunk into an allocated node structure */

  oldnode = (FAR struct mm_allocnode_s *)((FAR char*)oldmem - SIZEOF_MM_ALLOCNODE);

  /* We need to hold the MM semaphore while we muck with the nodelist. */

  mm_takesemaphore(heap);

  /* Check if this is a request to reduce the size of the allocation. */

  oldsize = oldnode->size;
  if (size <= oldsize)
    {
      /* Handle the special case where we are not going to change the size
       * of the allocation.
       */

      if (size < oldsize)
        {
          mm_shrinkchunk(heap, oldnode, size);
        }

      /* Then return the original address */

      mm_givesemaphore(heap);
      return oldmem;
    }

  /* This is a request to increase the size of the allocation,  Get the
   * available sizes before and after the oldnode so that we can make the
   * best decision
   */

  next = (FAR struct mm_freenode_s *)((FAR char*)oldnode + oldnode->size);
  if ((next->preceding & MM_ALLOC_BIT) == 0)
    {
      nextsize = next->size;
    }

  prev = (FAR struct mm_freenode_s *)((FAR char*)oldnode - (oldnode->preceding & ~MM_ALLOC_BIT));
  if ((prev->preceding & MM_ALLOC_BIT) == 0)
    {
      prevsize = prev->size;
    }

  /* Now, check if we can extend the current allocation or not */

  if (nextsize + prevsize + oldsize >= size)
    {
      size_t needed   = size - oldsize;
      size_t takeprev = 0;
      size_t takenext = 0;

      /* Check if we can extend into the previous chunk and if the
       * previous chunk is smaller than the next chunk.
       */

      if (prevsize > 0 && (nextsize >= prevsize || nextsize < 1))
        {
          /* Can we get everything we need from the previous chunk? */

          if (needed > prevsize)
            {
              /* No, take the whole previous chunk and get the
               * rest that we need from the next chunk.
               */

              takeprev = prevsize;
              takenext = needed - prevsize;
            }
          else
            {
              /* Yes, take what we need from the previous chunk */

              takeprev = needed;
              takenext = 0;
            }

          needed = 0;
        }

      /* Check if we can extend into the next chunk and if we still need
       * more memory.
       */

      if (nextsize > 0 && needed)
        {
          /* Can we get everything we need from the next chunk? */

          if (needed > nextsize)
            {
              /* No, take the whole next chunk and get the rest that we
               * need from the previous chunk.
               */

              takeprev = needed - nextsize;
              takenext = nextsize;
            }
          else
            {
              /* Yes, take what we need from the previous chunk */

              takeprev = 0;
              takenext = needed;
            }
        }

      /* Extend into the previous free chunk */

      newmem = oldmem;
      if (takeprev)
        {
          FAR struct mm_allocnode_s *newnode;

          /* Remove the previous node.  There must be a predecessor, but
           * there may not be a successor node.
           */

          DEBUGASSERT(prev->blink);
          prev->blink->flink = prev->flink;
          if (prev->flink)
            {
              prev->flink->blink = prev->blink;
            }

          /* Extend the node into the previous free chunk */

          newnode = (FAR struct mm_allocnode_s *)((FAR char*)oldnode - takeprev);

          /* Did we consume the entire preceding chunk? */

          if (takeprev < prevsize)
            {
              /* No.. just take what we need from the previous chunk and put
               * it back into the free list
               */

              prev->size        -= takeprev;
              newnode->size      = oldsize + takeprev;
              newnode->preceding = prev->size | MM_ALLOC_BIT;
              next->preceding    = newnode->size | (next->preceding & MM_ALLOC_BIT);

              /* Return the previous free node to the nodelist (with the new size) */

              mm_addfreechunk(heap, prev);
            }
          else
            {
              /* Yes.. update its size (newnode->preceding is already set) */

              newnode->size      += oldsize;
              newnode->preceding |= MM_ALLOC_BIT;
              next->preceding     = newnode->size | (next->preceding & MM_ALLOC_BIT);
            }

          /* Now we want to return newnode */

          oldnode = newnode;
          oldsize = newnode->size;

          /* Now we have to move the user contents 'down' in memory.  memcpy should
           * should be save for this.
           */

          newmem = (FAR void*)((FAR char*)newnode + SIZEOF_MM_ALLOCNODE);
          memcpy(newmem, oldmem, oldsize - SIZEOF_MM_ALLOCNODE);
        }

      /* Extend into the next free chunk */

      if (takenext)
        {
          FAR struct mm_freenode_s *newnode;
          FAR struct mm_allocnode_s *andbeyond;

          /* Get the chunk following the next node (which could be the tail
           * chunk)
           */

          andbeyond = (FAR struct mm_allocnode_s*)((char*)next + nextsize);

          /* Remove the next node.  There must be a predecessor, but there
           * may not be a successor node.
           */

          DEBUGASSERT(next->blink);
          next->blink->flink = next->flink;
          if (next->flink)
            {
              next->flink->blink = next->blink;
            }

          /* Extend the node into the next chunk */

          oldnode->size = oldsize + takenext;
          newnode       = (FAR struct mm_freenode_s *)((char*)oldnode + oldnode->size);

          /* Did we consume the entire preceding chunk? */

          if (takenext < nextsize)
            {
              /* No, take what we need from the next chunk and return it to
               * the free nodelist.
               */

              newnode->size        = nextsize - takenext;
              newnode->preceding   = oldnode->size;
              andbeyond->preceding = newnode->size | (andbeyond->preceding & MM_ALLOC_BIT);

              /* Add the new free node to the nodelist (with the new size) */

              mm_addfreechunk(heap, newnode);
            }
          else
            {
              /* Yes, just update some pointers. */

              andbeyond->preceding = oldnode->size | (andbeyond->preceding & MM_ALLOC_BIT);
            }
        }

      mm_givesemaphore(heap);
      return newmem;
    }

  /* The current chunk cannot be extended.  Just allocate a new chunk and copy */

  else
    {
      /* Allocate a new block.  On failure, realloc must return NULL but
       * leave the original memory in place.
       */

      mm_givesemaphore(heap);
      newmem = (FAR void*)mm_malloc(heap, size);
      if (newmem)
        {
          memcpy(newmem, oldmem, oldsize);
          mm_free(heap, oldmem);
        }

      return newmem;
    }
}
Пример #13
0
/*
 * mm_realloc - Implemented simply in terms of mm_malloc and mm_free
 * 重新调整之前malloc分配的block的大小
 * TODO: 改用next_fid策略后,好像这里就死循环了
 */
void *mm_realloc(void *bp, size_t size)
{
    if (bp == NULL)
        return mm_malloc(size);
    else if (0 == size) {
        mm_free(bp);
        return NULL;
    }

    /* check if bp is aliged */
    if ((uint32_t)bp % ALIGNMENT != 0)
        return NULL;

    void *new_bp = NULL;
    size_t old_size = GET_SIZE(HDRP(bp));
    size_t new_size = ALIGN(size + DSIZE); /* 别忘了header和footer的size! */
    size_t frag_size;
    if (new_size > old_size) {
        /*
         * 这个IF中的逻辑感觉多余了,只需要通过malloc()找到一个更大的空闲块就行了.
         *
         * 2015-9-18
         * 通过malloc()重新给找一个更大的block是可以,但是我这implicit的实现,每次
         * 都要遍历所有的block寻找free block,效率非常低
         * */

        /**
         * 判断相邻的下一块是否是一个足以容纳(new_size - old_size)的空闲块,
         * 如果是,那么就把当前块扩展到下一块就好了
         */
        if (!GET_ALLOC(HDRP(NEXT_BLKP(bp))) &&
                GET_SIZE(HDRP(NEXT_BLKP(bp))) >= (new_size - old_size)) {
             /* next block is large enough, bp not need to change */

            frag_size = GET_SIZE(HDRP(NEXT_BLKP(bp))) - (new_size - old_size);

            if (frag_size >= MIN_BLK) {
                PUT(HDRP(bp), PACK(new_size, 1));
                PUT(FTRP(bp), PACK(new_size, 1));

                PUT(HDRP(NEXT_BLKP(bp)), PACK(frag_size, 0));
                PUT(FTRP(NEXT_BLKP(bp)), PACK(frag_size, 0));
            } else {
                new_size = old_size + GET_SIZE(HDRP(NEXT_BLKP(bp)));
                PUT(HDRP(bp), PACK(new_size, 1));
                PUT(FTRP(bp), PACK(new_size, 1));
            }

            new_bp = bp;

        } else {
            /* next block isn't large enough, bp need to be pointed to a large region */
            if ((new_bp = find_fit(last_found, new_size)) == NULL)
                new_bp = extend_heap(MAX(new_size, CHUNKSIZE) / WSIZE);

            place(new_bp, new_size);
            /* copy payload from old block to new block */
            memcpy(new_bp, bp, old_size - DSIZE);
            /* free old block */
            mm_free(bp);
        }
    } else if (new_size < old_size) {
        /* if new_size < old_size, check if need to split */
        if (old_size - new_size >= MIN_BLK) {
            PUT(HDRP(bp), PACK(new_size, 1));
            PUT(FTRP(bp), PACK(new_size, 1));
            /* split a new free block */
            PUT(HDRP(NEXT_BLKP(bp)), PACK(old_size - new_size, 0));
            PUT(FTRP(NEXT_BLKP(bp)), PACK(old_size - new_size, 0));
        }

        new_bp = bp;
    }

    /* mm_check(); */

    return new_bp;
}
Пример #14
0
void *
mm_realloc(void *ptr, size_t size){
    if(size <= 0){
        mm_free(ptr);
        return NULL;
    }

    if(ptr == NULL){
        ptr = mm_malloc(size);
        return ptr;
    }

    if(size > 0){
        size_t currentsize = GET_SIZE(ptr);
        size_t newsize = ALIGN(size + OVERHEAD);

        if(newsize <= currentsize){

            /*void *newbp ;
            if ((currentsize - newsize) >= BLKSIZE) {
                SET_HDRP(ptr, PACK(newsize, 1));
                SET_FTRP(ptr, PACK(newsize, 1));
                newbp=ptr;
                mm_remove(ptr);
                ptr = NEXT_BLKP(ptr);
                SET_HDRP(ptr, PACK(currentsize-newsize, 0));
                SET_FTRP(ptr, PACK(currentsize-newsize, 0));
                coalesce(ptr);
                return newbp;
            }
            else {*/
                //return ptr;
            //}
            return ptr;

        } /* Defining the code if new size is greater than the current */
        else {
            size_t next_alloc = GET_ALLOC(NEXT_BLKP(ptr));
            size_t csize;
            size_t asize;
            /* next block is free and the size of the two blocks is greater than or equal the new size  */

            if(!next_alloc && ((csize = currentsize + GET_SIZE(NEXT_BLKP(ptr)))) >= newsize){
                mm_delete(NEXT_BLKP(ptr));
                SET_HDRP(ptr, PACK(csize, 1));
                SET_FTRP(ptr, PACK(csize, 1));
                return ptr;
            }
            /* if bp is the last block before epilogue */
            else if(GET_SIZE(NEXT_BLKP(ptr)) == 0){
                csize = newsize - currentsize;
                void *temp = extend_heap(csize);
                asize = currentsize + GET_SIZE(temp);
                SET_HDRP(ptr, PACK(asize, 1));
                SET_FTRP(ptr, PACK(asize, 1));
                return ptr;
            }
            /* next block is free and the block is the last block before the epilogue */

            else if(!next_alloc && ((GET_SIZE(NEXT_BLKP(NEXT_BLKP(ptr)))) == 0)){
                csize = newsize - currentsize + GET_SIZE(NEXT_BLKP(ptr));
                void *temp = extend_heap(csize);
                asize = currentsize + GET_SIZE(temp);
                SET_HDRP(ptr, PACK(asize, 1));
                SET_FTRP(ptr, PACK(asize, 1));
                return ptr;
            }

           /* otherwise there is no choice left instead to increase the heap size */

            else {
                void *newbp = mm_malloc(newsize);
                place(newbp, newsize);
                memcpy(newbp, ptr, newsize);
                mm_free(ptr);
                return newbp;
            }
        }
    }else{
        return NULL;
    }
}
Пример #15
0
struct evconnlistener *
evconnlistener_new_async(struct event_base *base,
    evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
    evutil_socket_t fd)
{
	struct sockaddr_storage ss;
	int socklen = sizeof(ss);
	struct evconnlistener_iocp *lev;
	int i;

	flags |= LEV_OPT_THREADSAFE;

	if (!base || !event_base_get_iocp(base))
		goto err;

	/* XXXX duplicate code */
	if (backlog > 0) {
		if (listen(fd, backlog) < 0)
			goto err;
	} else if (backlog < 0) {
		if (listen(fd, 128) < 0)
			goto err;
	}
	if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) {
		event_sock_warn(fd, "getsockname");
		goto err;
	}
	lev = mm_calloc(1, sizeof(struct evconnlistener_iocp));
	if (!lev) {
		event_warn("calloc");
		goto err;
	}
	lev->base.ops = &evconnlistener_iocp_ops;
	lev->base.cb = cb;
	lev->base.user_data = ptr;
	lev->base.flags = flags;
	lev->base.refcnt = 1;
	lev->base.enabled = 1;

	lev->port = event_base_get_iocp(base);
	lev->fd = fd;
	lev->event_base = base;


	if (event_iocp_port_associate(lev->port, fd, 1) < 0)
		goto err_free_lev;

	EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);

	lev->n_accepting = N_SOCKETS_PER_LISTENER;
	lev->accepting = mm_calloc(lev->n_accepting,
	    sizeof(struct accepting_socket *));
	if (!lev->accepting) {
		event_warn("calloc");
		goto err_delete_lock;
	}
	for (i = 0; i < lev->n_accepting; ++i) {
		lev->accepting[i] = new_accepting_socket(lev, ss.ss_family);
		if (!lev->accepting[i]) {
			event_warnx("Couldn't create accepting socket");
			goto err_free_accepting;
		}
		if (cb && start_accepting(lev->accepting[i]) < 0) {
			event_warnx("Couldn't start accepting on socket");
			EnterCriticalSection(&lev->accepting[i]->lock);
			free_and_unlock_accepting_socket(lev->accepting[i]);
			goto err_free_accepting;
		}
		++lev->base.refcnt;
	}

	iocp_listener_event_add(lev);

	return &lev->base;

err_free_accepting:
	mm_free(lev->accepting);
	/* XXXX free the other elements. */
err_delete_lock:
	EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
err_free_lev:
	mm_free(lev);
err:
	/* Don't close the fd, it is caller's responsibility. */
	return NULL;
}
Пример #16
0
unsigned long fs_elf_check_prepare(struct file *file,unsigned char **argv, unsigned char **env,unsigned long *t_argc, unsigned long *t_argv,unsigned long  *stack_len, unsigned long *aux_addr,unsigned char **elf_interpreter, unsigned long *tmp_stackp) {
	struct elf_phdr *elf_phdata=0;
	struct elf_phdr *eppnt;
	int retval, error, i, j;
	struct elfhdr elf_ex;
	Elf64_Addr p_entry;
	unsigned long tmp_stack_top=0;

	error = 0;
	fs_lseek(file, 0, 0);
	retval = fs_read(file, (unsigned char *) &elf_ex, sizeof(elf_ex));
	if (retval != sizeof(elf_ex)) {
		error = -1;
		return 0;
	}
	if (ut_memcmp((unsigned char *) elf_ex.e_ident, (unsigned char *) ELFMAG, SELFMAG) != 0) {
		error = -2;
		return 0;
	}

	if (elf_ex.e_type == ET_DYN)  elf_ex.e_type=ET_EXEC;
	if (elf_ex.e_type != ET_EXEC || !elf_check_arch(&elf_ex)) {
		DEBUG("error:(not executable type or mismatch in architecture %x  %x %x \n",elf_ex.e_type,elf_ex.e_phnum,elf_check_arch(&elf_ex));
		error = -3;
		return 0;
	}

	/* Now read in all of the header information */
	j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
	/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */

	elf_phdata = mm_malloc(j, 0);
	if (!elf_phdata) {
		error = -4;
		return 0;
	}

	eppnt = elf_phdata;
	fs_lseek(file, (unsigned long) elf_ex.e_phoff, 0);
	retval = fs_read(file, (unsigned char *) eppnt, j);
	if (retval != j) {
		goto out;
	}

	p_entry = elf_ex.e_entry;
	*elf_interpreter=0;
	for (i = 0; i < elf_ex.e_phnum; i++, eppnt++) /* mmap all loadable program headers */
	{
		if (eppnt->p_type == PT_INTERP){
			*elf_interpreter = (char *) ut_calloc(eppnt->p_filesz+1);
			fs_lseek(file, (unsigned long) eppnt->p_offset, 0);
			retval = fs_read(file, (unsigned char *) *elf_interpreter, eppnt->p_filesz);
			//ut_printf(" interpreter :%s: \n",*elf_interpreter);
			break;
		}
	}

	tmp_stack_top = setup_userstack(argv, env, stack_len, t_argc, t_argv, aux_addr, *elf_interpreter);
	*tmp_stackp=tmp_stack_top;
	if (tmp_stack_top == 0) {
		goto out;
	}
	tmp_stack_top = tmp_stack_top + (MAX_USERSPACE_STACK_TEMPLEN - *stack_len);

out:
	if (elf_phdata) {
	 	mm_free(elf_phdata);
	}
	if (tmp_stack_top==0 && *elf_interpreter!=0){
		ut_free(*elf_interpreter);
	}
	return tmp_stack_top;
}
Пример #17
0
static void *
epoll_init(struct event_base *base)
{
	int epfd = -1;
	struct epollop *epollop;

#ifdef EVENT__HAVE_EPOLL_CREATE1
	/* First, try the shiny new epoll_create1 interface, if we have it. */
	epfd = epoll_create1(EPOLL_CLOEXEC);
#endif
	if (epfd == -1) {
		/* Initialize the kernel queue using the old interface.  (The
		size field is ignored   since 2.6.8.) */
		if ((epfd = epoll_create(32000)) == -1) {
			if (errno != ENOSYS)
				event_warn("epoll_create");
			return (NULL);
		}
		evutil_make_socket_closeonexec(epfd);
	}

	if (!(epollop = mm_calloc(1, sizeof(struct epollop)))) {
		close(epfd);
		return (NULL);
	}

	epollop->epfd = epfd;

	/* Initialize fields */
	epollop->events = mm_calloc(INITIAL_NEVENT, sizeof(struct epoll_event));
	if (epollop->events == NULL) {
		mm_free(epollop);
		close(epfd);
		return (NULL);
	}
	epollop->nevents = INITIAL_NEVENT;

	if ((base->flags & EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST) != 0 ||
	    ((base->flags & EVENT_BASE_FLAG_IGNORE_ENV) == 0 &&
		evutil_getenv_("EVENT_EPOLL_USE_CHANGELIST") != NULL)) {

		base->evsel = &epollops_changelist;
	}

#ifdef USING_TIMERFD
	/*
	  The epoll interface ordinarily gives us one-millisecond precision,
	  so on Linux it makes perfect sense to use the CLOCK_MONOTONIC_COARSE
	  timer.  But when the user has set the new PRECISE_TIMER flag for an
	  event_base, we can try to use timerfd to give them finer granularity.
	*/
	if ((base->flags & EVENT_BASE_FLAG_PRECISE_TIMER) &&
	    base->monotonic_timer.monotonic_clock == CLOCK_MONOTONIC) {
		int fd;
		fd = epollop->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
		if (epollop->timerfd >= 0) {
			struct epoll_event epev;
			memset(&epev, 0, sizeof(epev));
			epev.data.fd = epollop->timerfd;
			epev.events = EPOLLIN;
			if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, fd, &epev) < 0) {
				event_warn("epoll_ctl(timerfd)");
				close(fd);
				epollop->timerfd = -1;
			}
		} else {
			if (errno != EINVAL && errno != ENOSYS) {
				/* These errors probably mean that we were
				 * compiled with timerfd/TFD_* support, but
				 * we're running on a kernel that lacks those.
				 */
				event_warn("timerfd_create");
			}
			epollop->timerfd = -1;
		}
	} else {
		epollop->timerfd = -1;
	}
#endif

	evsig_init_(base);

	return (epollop);
}
Пример #18
0
//unsigned long fs_loadElfLibrary(struct file *file, unsigned long tmp_stack, unsigned long stack_len, unsigned long aux_addr) {
unsigned long fs_elf_load(struct file *file,unsigned long tmp_stack, unsigned long stack_len, unsigned long aux_addr) {
	struct elf_phdr *elf_phdata;
	struct elf_phdr *eppnt;
	unsigned long elf_bss, bss_start, bss, len;
	int retval, error, i, j;
	struct elfhdr elf_ex;
	Elf64_Addr p_entry;
	unsigned long *aux_vec, aux_index, load_addr;
	struct task_struct *task=g_current_task;

	error = 0;
	fs_lseek(file, 0, 0);
	retval = fs_read(file, (unsigned char *) &elf_ex, sizeof(elf_ex));
	if (retval != sizeof(elf_ex)) {
		error = -1;
		goto out;
	}

	if (ut_memcmp((unsigned char *) elf_ex.e_ident, (unsigned char *) ELFMAG, SELFMAG) != 0) {
		error = -2;
		goto out;
	}

	if (elf_ex.e_type == ET_DYN)  elf_ex.e_type=ET_EXEC;
	/* First of all, some simple consistency checks */
	//if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
	if (elf_ex.e_type != ET_EXEC || !elf_check_arch(&elf_ex)) {
		DEBUG("error:(not executable type or mismatch in architecture %x  %x %x \n",elf_ex.e_type,elf_ex.e_phnum,elf_check_arch(&elf_ex));
		error = -3;
		goto out;
	}

	/* Now read in all of the header information */

	j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
	/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */

	elf_phdata = mm_malloc(j, 0);
	if (!elf_phdata) {
		error = -4;
		goto out;
	}

	eppnt = elf_phdata;
	fs_lseek(file, (unsigned long) elf_ex.e_phoff, 0);
	retval = fs_read(file, (unsigned char *) eppnt, j);
	if (retval != j) {
		error = -5;
		goto out;
	}
	DEBUG("START address : %x offset :%x \n",ELF_PAGESTART(eppnt->p_vaddr),eppnt->p_offset);
	for (j = 0, i = 0; i < elf_ex.e_phnum; i++){
		if ((eppnt + i)->p_type == PT_LOAD)
			j++;
	}
	if (j == 0) {
		error = -6;
		goto out;
	}
	load_addr = ELF_PAGESTART(eppnt->p_vaddr);
	p_entry = elf_ex.e_entry;
	task->mm->start_code = 0;
	task->mm->end_code =0;
	for (i = 0; i < elf_ex.e_phnum; i++, eppnt++) /* mmap all loadable program headers */
	{
		if (eppnt->p_type != PT_LOAD)
			continue;
		//ut_log("%d: LOAD section: vaddr:%x filesz:%x offset:%x flags:%x  \n",i,ELF_PAGESTART(eppnt->p_vaddr),eppnt->p_filesz,eppnt->p_offset,eppnt->p_flags);
		/* Now use mmap to map the library into memory. */
		error = 1;
		if (eppnt->p_filesz > 0) {
			unsigned long addr;
			unsigned long start_addr = ELF_PAGESTART(eppnt->p_vaddr);
			unsigned long end_addr= eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
			addr = vm_mmap(file, start_addr, end_addr, eppnt->p_flags, 0, (eppnt->p_offset
					- ELF_PAGEOFFSET(eppnt->p_vaddr)),"text");
			if (addr == 0)
				error = 0;
			if (task->mm->start_code ==0  || task->mm->start_code > start_addr ) task->mm->start_code = start_addr;
			if (task->mm->end_code < end_addr ) task->mm->end_code = end_addr;
		}
		//if (error != ELF_PAGESTART(eppnt->p_vaddr))
		if (error != 1) {
			error = -6;
			goto out;
		}

		elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
		//	padzero(elf_bss);

		/* TODO :  bss start address in not at the PAGE_ALIGN or ELF_MIN_ALIGN , need to club this partial page with the data */
	//	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
		bss_start = eppnt->p_filesz + eppnt->p_vaddr;
		bss = eppnt->p_memsz + eppnt->p_vaddr;
		//ut_log(" bss start :%x end:%x memsz:%x elf_bss:%x \n",bss_start, bss,eppnt->p_memsz,elf_bss);
		if (bss > bss_start) {
			vm_setupBrk(bss_start, bss - bss_start);
		}
		error = 0;
	}

 out:
 	if (elf_phdata) {
 		mm_free(elf_phdata);
 	}
	if (error != 0) {
		ut_log(" ERROR in elf loader filename :%s :%d\n",file->filename,-error);
	} else {
		task->mm->stack_bottom = USERSTACK_ADDR+USERSTACK_LEN;
		 elf_initialize_userspace_stack(elf_ex,aux_addr,tmp_stack, stack_len,load_addr);

		vm_mmap(0, USER_SYSCALL_PAGE, 0x1000, PROT_READ | PROT_EXEC |PROT_WRITE, MAP_ANONYMOUS, 0,"fst_syscal");
			//ut_memset((unsigned char *)SYSCALL_PAGE,(unsigned char )0xcc,0x1000);
		ut_memcpy((unsigned char *)USER_SYSCALL_PAGE,(unsigned char *)&__vsyscall_page,0x1000);
		if (g_conf_syscall_debug==1){
			//pagetable_walk(4,g_current_task->mm->pgd,1,0);
		}
	}
	DEBUG(" Program start address(autod) : %x \n",elf_ex.e_entry);

	if (error == 0)
		return p_entry;
	else
		return 0;
}
Пример #19
0
Файл: mm.c Проект: horf31/ece454
/**********************************************************
 * mm_realloc
 * Implemented simply in terms of mm_malloc and mm_free
 *********************************************************/
void *mm_realloc(void *ptr, size_t size) {

	/* Case 1: If size == 0 then this is just free, and we return NULL. */
	if (size == 0) {
		mm_free(ptr);
		return NULL;
	}

	/* Case 2: If oldptr is NULL, then this is just malloc. */
	if (ptr == NULL) {
		return (mm_malloc(size));
	}

	/* Case 3: Size is equal or smaller than the original size, return or split */
	size_t old_size = GET_SIZE(HDRP(ptr));          /* current block size */
	size_t asize;                                   /* adjusted requested block size */

	// Adjust block size to include overhead and alignment reqs.
	if (size <= DSIZE)
		asize = 2 * DSIZE;
	else
		asize = DSIZE * ((size + (DSIZE) + (DSIZE - 1)) / DSIZE);

	// If old_size is greater than the minimum block size and new size, then split
	if (old_size >= asize + 2 * DSIZE) {
		split(ptr, asize, old_size);
		return ptr;
	} else if (old_size >= asize) { // If not enough space to split, just return
		return ptr;
	}

	/* Case 4: Check next neighboring block, if free, and the combined size is equal to or greater than size, combine */
	size_t next_alloc = GET_ALLOC(HDRP(NEXT_BLKP(ptr)));      /* allocative state of the next block */
	size_t next_size = GET_SIZE(HDRP(NEXT_BLKP(ptr)));        /* size of the next block */
	size_t remainder_size;

	if ((int) next_alloc == 0 && old_size + next_size >= asize) {
		delete_from_list(NEXT_BLKP(ptr)); // Remove from the free list

		// If the combined space is too large, split
		if (old_size + next_size > asize + 2 * DSIZE) {
			PUT(HDRP(ptr), PACK(asize, 1));  // Mark the footer and header of the realloc'd block
			PUT(FTRP(ptr), PACK(asize, 1));
			remainder_size = old_size + next_size - asize; // Calculate the remainder size of the splitted block
			PUT(HDRP(NEXT_BLKP(ptr)), PACK(remainder_size, 0));  // Mark the footer and header for the splitted block
			PUT(FTRP(NEXT_BLKP(ptr)), PACK(remainder_size, 0));
			insert_freelist((intptr_t *) NEXT_BLKP(ptr));
		} else { // No enough space to split
			PUT(HDRP(ptr), PACK(old_size+next_size, 1));  // Mark the footer and header of the realloc'd block
			PUT(FTRP(ptr), PACK(old_size+next_size, 1));
		}
		return ptr;
	}

	/* Case 5: Current block is at the end of the heap, just extend heap */
	// Check if the next block ptr is at the end of the heap
	if (NEXT_BLKP(ptr) > (char*)mem_heap_hi() ) {
		size_t extendsize = asize - old_size; // Calculate the size needed to extend
		intptr_t * bp;
		if ((bp = extend_heap(extendsize / WSIZE)) == NULL) // Extend the heap
			return NULL;
		PUT(HDRP(ptr), PACK(asize, 1));  // Mark the footer and header
		PUT(FTRP(ptr), PACK(asize, 1));
		return ptr;
	}

	/* Case 6: Check previous neighboring block, if free, and the combined size is equal to or greater than size, combine and move the content */
	intptr_t * prev_p = (intptr_t *)PREV_BLKP(ptr); /* previous block's ptr */
	size_t prev_alloc = GET_ALLOC(HDRP(prev_p));    /* allocative state of the prev block */
	size_t prev_size = GET_SIZE(HDRP(prev_p));      /* size of the next block */

	if ((int) prev_alloc == 0 && old_size + prev_size >= asize) {
		delete_from_list(PREV_BLKP(ptr));  // Remove from the free list
		memmove(prev_p, ptr, asize);  // Move the content to the new starting ptr

		// If the combined space is too large, split
		if (old_size + prev_size > asize + 2 * DSIZE) {
			PUT(HDRP(prev_p), PACK(asize, 1));  // Mark the footer and header of the realloc'd block
			PUT(FTRP(prev_p), PACK(asize, 1));
			remainder_size = old_size + prev_size - asize;  // Calculate the remainder size of the splitted block
			PUT(HDRP(NEXT_BLKP(prev_p)), PACK(remainder_size, 0));  // Mark the footer and header for the splitted block
			PUT(FTRP(NEXT_BLKP(prev_p)), PACK(remainder_size, 0));
			insert_freelist((intptr_t *) NEXT_BLKP(prev_p));
		} else { // No enough space to split
			PUT(HDRP(prev_p), PACK(old_size+prev_size, 1));  // Mark the footer and header of the realloc'd block
			PUT(FTRP(prev_p), PACK(old_size+prev_size, 1));
		}
		return prev_p;
	}

	/* Case 7: Malloc a new block, copy the content, free the old block */

	void *oldptr = ptr;
	void *newptr;

	newptr = mm_malloc(size); // Malloc a new block
	if (newptr == NULL)
		return NULL;

	// Copy the old data.
	if (size < old_size)
		old_size = size;
	memcpy(newptr, oldptr, old_size);
	mm_free(oldptr);  // Free the old block
	return newptr;
}
Пример #20
0
void deferred_alloc_finish(deferred_alloc_t *dalloc)
{
  mm_free(mm_pool_temp, dalloc->requirements,
          dalloc->requirements_size * sizeof(memory_requirement_t));
  mm_free(mm_pool_temp, dalloc, sizeof(deferred_alloc_t));
}
Пример #21
0
/**********************************************************
 * mm_realloc
 * Deals with a few cases:
 * 1. if the realloc size is smaller than current size
 * we split the current block and then put the extra part
 * to free list
 * 2. if the next block of the current block is freed, we check if
 * merge these two blocks can lead to a fit block for realloc
 * 3. if the current block is at the end of heap,
 * we just increase the heap by the required amount and then merge
 * that amount into that block
 * 4. if the new size is same as old size we do nothing
 * 5. if the new size is 0, same as free
 * 6. else we malloc a new block and then copy the data from old block 
 *********************************************************/
void *mm_realloc(void *ptr, size_t size) {
    /* If size == 0 then this is just free, and we return NULL. */
    //case 5
    if (size == 0) {
        mm_free(ptr);
        return NULL;
    }
    /* If oldptr is NULL, then this is just malloc. */
    if (ptr == NULL)
        return (mm_malloc(size));

    void *oldptr = ptr;
    void *newptr;
    size_t copySize;
    size_t oldSize = GET_SIZE(HDRP(oldptr));
    size_t asize;

    /* Adjust block size to include overhead and alignment reqs. */
    if (size <= DSIZE)
        asize = 2 * DSIZE;
    else
        asize = DSIZE * ((size + (DSIZE) + (DSIZE - 1)) / DSIZE);

    //case 4 (see above)
    if (oldSize == asize) {
        return ptr;
    }        //case 1
    else if (oldSize > asize) {
        void* newptr = splitBlock(ptr, asize);
        place(newptr, asize);
        return newptr;
    }        //case 2
    else if (GET_SIZE(HDRP(NEXT_BLKP(ptr))) != 0) {

        if (GET_ALLOC(HDRP(NEXT_BLKP(ptr))) == 0) {
            //get the merge size after merge with next block
            size_t msize = oldSize + GET_SIZE(HDRP(NEXT_BLKP(ptr)));
            if (msize >= asize) {
                //coalesce next block with current block
                removeFromFreeList(NEXT_BLKP(ptr));
                PUT(HDRP(ptr), PACK(msize, 0));
                PUT(FTRP(ptr), PACK(msize, 0));

                //split block if there is extra space
                void* newptr = splitBlock(ptr, asize);
                place(newptr, asize);
                return newptr;
            }
        }
    }        //case 3
    else if (GET_SIZE(HDRP(NEXT_BLKP(ptr))) == 0) {
        //new size larger than old size and next block is epilogue
        //we can extend the heap and then coalesce

        size_t esize = asize - oldSize; //calculate sufficient space to extend
        //extend heap by the sufficient amount
        void* ebp = extend_heap(esize / WSIZE);

        if (ebp != NULL) {
            //coalesce the extend space into current block
            PUT(HDRP(ptr), PACK(asize, 1));
            PUT(FTRP(ptr), PACK(asize, 1));
            return ptr;
        }
    }

    //case 6
    newptr = mm_malloc(size);
    if (newptr == NULL)
        return NULL;

    /* Copy the old data. */
    copySize = GET_SIZE(HDRP(oldptr));

    memcpy(newptr, oldptr, copySize);
    mm_free(oldptr);
    return newptr;
}
Пример #22
0
static void rbtsimple_free( /*@out@*/ /*@only@*/ mm_addr_t what ,
                            mm_size_t size , /*@null@*/ void *data )
{
  UNUSED_PARAM( void * , data ) ;
  mm_free( mm_pool_temp , what , size ) ;
}
void CAuthorization::Free()
{
	mm_free(auth_type);
	mm_free(username);
	mm_free(realm);
	mm_free(nonce);
	mm_free(uri);
	mm_free(response);
	mm_free(digest);
	mm_free(algorithm);
	mm_free(cnonce);
	mm_free(opaque);
	mm_free(message_qop);
	mm_free(nonce_count);

	auth_type = NULL;
	username = NULL;
	realm = NULL;
	nonce = NULL;
	uri = NULL;
	response = NULL;
	digest = NULL; 		
	algorithm = NULL;   
	cnonce = NULL; 		
	opaque = NULL; 		
	message_qop = NULL; 
	nonce_count = NULL; 
	auth_param = NULL;  

}
Пример #24
0
/*
 * eval_mm_valid - Check the mm malloc package for correctness
 */
static int eval_mm_valid(trace_t *trace, int tracenum, range_t **ranges) {
    int i, j;
    int index;
    int size;
    int oldsize;
    char *newp;
    char *oldp;
    char *p;
    
    /* Reset the heap and free any records in the range list */
    mem_reset_brk();
    clear_ranges(ranges);

    /* Call the mm package's init function */
    if (mm_init() < 0) {
	malloc_error(tracenum, 0, "mm_init failed.");
	return 0;
    }

    /* Interpret each operation in the trace in order */
    for (i = 0;  i < trace->num_ops;  i++) {
	index = trace->ops[i].index;
	size = trace->ops[i].size;

        switch (trace->ops[i].type) {

        case ALLOC: /* mm_malloc */

	    /* Call the student's malloc */
	    if ((p = mm_malloc(size)) == NULL) {
		malloc_error(tracenum, i, "mm_malloc failed.");
		return 0;
	    }
	    
	    /* 
	     * Test the range of the new block for correctness and add it 
	     * to the range list if OK. The block must be  be aligned properly,
	     * and must not overlap any currently allocated block. 
	     */ 
	    if (add_range(ranges, p, size, tracenum, i) == 0)
		return 0;
	    
	    /* ADDED: cgw
	     * fill range with low byte of index.  This will be used later
	     * if we realloc the block and wish to make sure that the old
	     * data was copied to the new block
	     */
	    memset(p, index & 0xFF, size);

	    /* Remember region */
	    trace->blocks[index] = p;
	    trace->block_sizes[index] = size;
	    break;

        case REALLOC: /* mm_realloc */
	    
	    /* Call the student's realloc */
	    oldp = trace->blocks[index];
	    if ((newp = mm_realloc(oldp, size)) == NULL) {
		malloc_error(tracenum, i, "mm_realloc failed.");
		return 0;
	    }
	    
	    /* Remove the old region from the range list */
	    remove_range(ranges, oldp);
	    
	    /* Check new block for correctness and add it to range list */
	    if (add_range(ranges, newp, size, tracenum, i) == 0)
		return 0;
	    
	    /* ADDED: cgw
	     * Make sure that the new block contains the data from the old 
	     * block and then fill in the new block with the low order byte
	     * of the new index
	     */
	    oldsize = trace->block_sizes[index];
	    if (size < oldsize) oldsize = size;
	    for (j = 0; j < oldsize; j++) {
	      if (newp[j] != (index & 0xFF)) {
		malloc_error(tracenum, i, "mm_realloc did not preserve the "
			     "data from old block");
		return 0;
	      }
	    }
	    memset(newp, index & 0xFF, size);

	    /* Remember region */
	    trace->blocks[index] = newp;
	    trace->block_sizes[index] = size;
	    break;

        case FREE: /* mm_free */
	    
	    /* Remove region from list and call student's free function */
	    p = trace->blocks[index];
	    remove_range(ranges, p);
	    mm_free(p);
	    break;

	default:
	    app_error("Nonexistent request type in eval_mm_valid");
        }

    }

    /* As far as we know, this is a valid malloc package */
    return 1;
}
Пример #25
0
/* Not implemented. For consistency with 15-213 malloc driver */
void *mm_realloc(void *ptr, size_t size)
{

    void *bp;
    size_t asize = ADJUSTSIZE(size);

    if(!GETSIZE(NEXT_BLKP(ptr)))
    {
        size_t extendsize = MAX(asize, CHUNKSIZE);
        bp = extend_heap(extendsize/4);
        size_t nsize = extendsize + GETSIZE(ptr) - asize;

        PUT(HDRP(ptr), PACK(asize,1));
        PUT(FTRP(ptr), PACK(asize,1));

        void *blk = NEXT_BLKP(ptr);
        PUT(HDRP(blk), PACK(nsize,0));
        PUT(FTRP(blk), PACK(nsize, 0));
        tree_root = mm_insert(tree_root, blk);

        return ptr;
    }

    if(!(GET_ALLOC(HDRP(NEXT_BLKP(ptr)))))
    {
        bp = NEXT_BLKP(ptr);

        size_t total = GETSIZE(ptr) + GETSIZE(bp);

        if(total >= asize)
        {
            size_t nsize = total - asize;
            tree_root = mm_remove(tree_root,bp);

            if(nsize < 16)
            {
                PUT(HDRP(ptr), PACK(total, 1));
                PUT(FTRP(ptr), PACK(total, 1));
                return ptr;
            }
            else
            {
                PUT(HDRP(ptr), PACK(asize, 1));
                PUT(FTRP(ptr), PACK(asize, 1));

                void *blk = NEXT_BLKP(ptr);
                PUT(HDRP(blk), PACK(nsize,0));
                PUT(FTRP(blk), PACK(nsize,0));
                tree_root = mm_insert(tree_root, blk);

                return ptr;
            }
        }

        else if(!GETSIZE(NEXT_BLKP(bp)))
        {
            size_t extendsize = MAX(asize, CHUNKSIZE);
            extend_heap(extendsize/4);
            size_t nsize = extendsize + total - asize;

            PUT(HDRP(ptr), PACK(asize,1));
            PUT(FTRP(ptr), PACK(asize,1));

            void *blk = NEXT_BLKP(ptr);
            PUT(HDRP(blk), PACK(nsize,0));
            PUT(FTRP(blk), PACK(nsize,0));
            tree_root = mm_insert(tree_root, blk);
            return ptr;
        }
    }

    bp = mm_malloc(size);

    memcpy(bp, ptr, (GETSIZE(ptr) - DSIZE));
    mm_free(ptr);
    return bp;
}
Пример #26
0
/* 
 * eval_mm_util - Evaluate the space utilization of the student's package
 *   The idea is to remember the high water mark "hwm" of the heap for 
 *   an optimal allocator, i.e., no gaps and no internal fragmentation.
 *   Utilization is the ratio hwm/heapsize, where heapsize is the 
 *   size of the heap in bytes after running the student's malloc 
 *   package on the trace. Note that our implementation of mem_sbrk() 
 *   doesn't allow the students to decrement the brk pointer, so brk
 *   is always the high water mark of the heap. 
 *   
 */
static double eval_mm_util(trace_t *trace, int tracenum, range_t **ranges) {
    assert((int)tracenum || 1);
    assert((int)ranges || 1);

    int i;
    int index;
    int size, newsize, oldsize;
    int max_total_size = 0;
    int total_size = 0;
    char *p;
    char *newp, *oldp;

    /* initialize the heap and the mm malloc package */
    mem_reset_brk();
    if (mm_init() < 0)
	app_error("mm_init failed in eval_mm_util");

    for (i = 0;  i < trace->num_ops;  i++) {
        switch (trace->ops[i].type) {

        case ALLOC: /* mm_alloc */
	    index = trace->ops[i].index;
	    size = trace->ops[i].size;

	    if ((p = mm_malloc(size)) == NULL) 
		app_error("mm_malloc failed in eval_mm_util");
	    
	    /* Remember region and size */
	    trace->blocks[index] = p;
	    trace->block_sizes[index] = size;
	    
	    /* Keep track of current total size
	     * of all allocated blocks */
	    total_size += size;
	    
	    /* Update statistics */
	    max_total_size = (total_size > max_total_size) ?
		total_size : max_total_size;
	    break;

	case REALLOC: /* mm_realloc */
	    index = trace->ops[i].index;
	    newsize = trace->ops[i].size;
	    oldsize = trace->block_sizes[index];

	    oldp = trace->blocks[index];
	    if ((newp = mm_realloc(oldp,newsize)) == NULL)
		app_error("mm_realloc failed in eval_mm_util");

	    /* Remember region and size */
	    trace->blocks[index] = newp;
	    trace->block_sizes[index] = newsize;
	    
	    /* Keep track of current total size
	     * of all allocated blocks */
	    total_size += (newsize - oldsize);
	    
	    /* Update statistics */
	    max_total_size = (total_size > max_total_size) ?
		total_size : max_total_size;
	    break;

        case FREE: /* mm_free */
	    index = trace->ops[i].index;
	    size = trace->block_sizes[index];
	    p = trace->blocks[index];
	    
	    mm_free(p);
	    
	    /* Keep track of current total size
	     * of all allocated blocks */
	    total_size -= size;
	    
	    break;

	default:
	    app_error("Nonexistent request type in eval_mm_util");

        }
    }

    return ((double)max_total_size / (double)mem_heapsize());
}
Пример #27
0
int
_bufferevent_decref_and_unlock(struct bufferevent *bufev)
{
	struct bufferevent_private *bufev_private =
	    EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
	struct bufferevent *underlying;

	EVUTIL_ASSERT(bufev_private->refcnt > 0);

	if (--bufev_private->refcnt) {
		BEV_UNLOCK(bufev);
		return 0;
	}

	underlying = bufferevent_get_underlying(bufev);

	/* Clean up the shared info */
	if (bufev->be_ops->destruct)
		bufev->be_ops->destruct(bufev);

	/* XXX what happens if refcnt for these buffers is > 1?
	 * The buffers can share a lock with this bufferevent object,
	 * but the lock might be destroyed below. */
	/* evbuffer will free the callbacks */
	evbuffer_free(bufev->input);
	evbuffer_free(bufev->output);

	if (bufev_private->rate_limiting) {
		if (bufev_private->rate_limiting->group)
			bufferevent_remove_from_rate_limit_group_internal(bufev,0);
		if (event_initialized(&bufev_private->rate_limiting->refill_bucket_event))
			event_del(&bufev_private->rate_limiting->refill_bucket_event);
		event_debug_unassign(&bufev_private->rate_limiting->refill_bucket_event);
		mm_free(bufev_private->rate_limiting);
		bufev_private->rate_limiting = NULL;
	}

	event_debug_unassign(&bufev->ev_read);
	event_debug_unassign(&bufev->ev_write);

	BEV_UNLOCK(bufev);
	if (bufev_private->own_lock)
		EVTHREAD_FREE_LOCK(bufev_private->lock,
		    EVTHREAD_LOCKTYPE_RECURSIVE);

	/* Free the actual allocated memory. */
	mm_free(((char*)bufev) - bufev->be_ops->mem_offset);

	/* Release the reference to underlying now that we no longer need the
	 * reference to it.  We wait this long mainly in case our lock is
	 * shared with underlying.
	 *
	 * The 'destruct' function will also drop a reference to underlying
	 * if BEV_OPT_CLOSE_ON_FREE is set.
	 *
	 * XXX Should we/can we just refcount evbuffer/bufferevent locks?
	 * It would probably save us some headaches.
	 */
	if (underlying)
		bufferevent_decref(underlying);

	return 1;
}
Пример #28
0
 inline void Free(void* addr)
 {
     mm_free(m_raw_pool, addr);
 }
Пример #29
0
void
mm_zfree(struct mm_master *mm, void *address)
{
	mm_free(mm, address);
}
Пример #30
0
/*
 * mm_realloc - Reallocate a block in place, extending the heap if necessary.
 *              The new block is padded with a buffer to guarantee that the
 *              next reallocation can be done without extending the heap,
 *              assuming that the block is expanded by a constant number of bytes
 *              per reallocation.
 *
 *              If the buffer is not large enough for the next reallocation,
 *              mark the next block with the reallocation tag. Free blocks
 *              marked with this tag cannot be used for allocation or
 *              coalescing. The tag is cleared when the marked block is
 *              consumed by reallocation, when the heap is extended, or when
 *              the reallocated block is freed.
 */
void *mm_realloc(void *ptr, size_t size)
{
	void *new_ptr = ptr;    /* Pointer to be returned */
  size_t new_size = size; /* Size of new block */
  int remainder;          /* Adequacy of block sizes */
  int extendsize;         /* Size of heap extension */
  int block_buffer;       /* Size of block buffer */

	/* Filter invalid block size */
  if (size == 0)
    return NULL;

  /* Adjust block size to include boundary tag and alignment requirements */
  if (new_size <= DSIZE) {
    new_size = 2 * DSIZE;
  } else {
    new_size = DSIZE * ((new_size + (DSIZE) + (DSIZE - 1)) / DSIZE);
  }

  /* Add overhead requirements to block size */
  new_size += BUFFER;

  /* Calculate block buffer */
  block_buffer = GET_SIZE(HEAD(ptr)) - new_size;

  /* Allocate more space if overhead falls below the minimum */
  if (block_buffer < 0) {
    /* Check if next block is a free block or the epilogue block */
    if (!GET_ALLOC(HEAD(NEXT(ptr))) || !GET_SIZE(HEAD(NEXT(ptr)))) {
      remainder = GET_SIZE(HEAD(ptr)) + GET_SIZE(HEAD(NEXT(ptr))) - new_size;
      if (remainder < 0) {
        extendsize = MAX(-remainder, CHUNKSIZE);
        if (extend_heap(extendsize) == NULL)
          return NULL;
        remainder += extendsize;
      }

      delete_node(NEXT(ptr));

      // Do not split block
      PUT_NOTAG(HEAD(ptr), PACK(new_size + remainder, 1)); /* Block header */
      PUT_NOTAG(FOOT(ptr), PACK(new_size + remainder, 1)); /* Block footer */
    } else {
      new_ptr = mm_malloc(new_size - DSIZE);
      //line_count--;
      memmove(new_ptr, ptr, MIN(size, new_size));
      mm_free(ptr);
      //line_count--;
    }
    block_buffer = GET_SIZE(HEAD(new_ptr)) - new_size;
  }

  /* Tag the next block if block overhead drops below twice the overhead */
  if (block_buffer < 2 * BUFFER)
    SET_TAG(HEAD(NEXT(new_ptr)));

  /*
  // Check heap for consistency
  line_count++;
  if (CHECK && CHECK_REALLOC) {
    mm_check('r', ptr, size);
  }
  */

  /* Return reallocated block */
  return new_ptr;
}