Esempio n. 1
0
//No. 2 Best fit algorithm
void * ts_malloc(size_t size)
{
    t_block b = NULL;
    size_t s;
    //s = align4(size);
    s = ALIGN_SIZE(size, sizeof(char*));
    //find the best block
    b = _find_best_block (s);
    if (b){
        //is it big enough to be split
        if ((b->size - s) >= (BLOCK_SIZE + sizeof(char *) )) {
            _split_block (b,s);
        }
    }
    
    if (!b) {
        b = _extend_heap(s);
        if (!b) {
            return(NULL);
        }
    }
    return b->data;
}
Esempio n. 2
0
//Allocates a data block of at least size size and returns it
void* _kmalloc(size_t size, uint32_t align, uint32_t* phys) {
	uint32_t res;
	//If no heap exists
	if (blocks == NULL) { 
		return NULL;
	}; 
	_kmalloc_header* cur = blocks; 
	_kmalloc_header* start = blocks; 
	_kmalloc_header* end = blocks;
	uint32_t align_addr;
	uint32_t cur_size;
	//Search for the first available FREE block
	_kmalloc_main: 
	cur_size = 0;
	while (cur->type != FREE) { 
		//Extend the heap if we are out
		if ((cur->next) == NULL) { 
			_extend_heap(size);
		};
		cur = cur->next;
	};
	//See if it forms a long enough chain of memory blocks to work with. Otherwise, go back to the above loop. 
	start = cur;
	while (cur_size < (size * 2)) { 
		cur_size += cur->size + sizeof(_kmalloc_header);
		if (cur->next == NULL) { 
			_extend_heap(size);
		} 
		else if (cur->next->type != FREE) { 
			cur = cur->next;
			goto _kmalloc_main;
		};
		cur = cur->next;
	};
	//Since we went one block too far ahead, correct for it. 
	end = cur->prev;
	_kmalloc_align_check:
	align_addr = _find_align_addr(start, cur, size, align);
	if (align_addr == 0) { 
		if (cur->next == NULL) { 
			_extend_heap(size);
			goto _kmalloc_align_check;
		}
		else { 
			cur = cur->next;
			goto _kmalloc_main;
		};
	} 
	else if (align_addr != 1) { 
		uint32_t dist = align_addr - (uint32_t)start;
		_merge_blocks(start, end);
		_split_block(start, dist - (2 * sizeof(_kmalloc_header)));
		start = start->next;
		end = start;
	};
	
	//If it is more than one block, merge them all.
	if (start != end) { 
		_merge_blocks(start, end);
	}
	//Otherwise, if the block is too big, split it. 
	else if ((start->size - size) > KMALLOC_MAX_DIFF) { 
		_split_block(start, size);
	};
	//Mark the block as used
	start->type = USED;
	//Return the address right after the header. 
	res = (((uint32_t)start) + sizeof(_kmalloc_header));
	if (phys != NULL) { 
		uint32_t p_n = res - _kmalloc_d_start_addr;
		p_n = p_n / 4096;
		p_n--;
		p_n = p_list[p_n];
		*phys = (p_n * 4096) + (res % 4096);
	}
	return (void*)res;
} 
Esempio n. 3
0
static int _breakup_blocks(List block_list, List new_blocks,
			   select_ba_request_t *request, List my_block_list,
			   bool only_free, bool only_small)
{
	int rc = SLURM_ERROR;
	bg_record_t *bg_record = NULL;
	ListIterator itr = NULL, bit_itr = NULL;
	int total_cnode_cnt=0;
	char start_char[SYSTEM_DIMENSIONS+1];
	bitstr_t *ionodes = bit_alloc(bg_conf->ionodes_per_mp);
	int cnodes = request->procs / bg_conf->cpu_ratio;
	int curr_mp_bit = -1;
	int dim;

	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
		info("cpu_count=%d cnodes=%d o_free=%d o_small=%d",
		     request->procs, cnodes, only_free, only_small);

	switch(cnodes) {
	case 16:
		/* a 16 can go anywhere */
		break;
	case 32:
		bit_itr = list_iterator_create(bg_lists->valid_small32);
		break;
	case 64:
		bit_itr = list_iterator_create(bg_lists->valid_small64);
		break;
	case 128:
		bit_itr = list_iterator_create(bg_lists->valid_small128);
		break;
	case 256:
		bit_itr = list_iterator_create(bg_lists->valid_small256);
		break;
	default:
		error("We shouldn't be here with this size %d", cnodes);
		goto finished;
		break;
	}

	/* First try with free blocks a midplane or less.  Then try with the
	 * smallest blocks.
	 */
	itr = list_iterator_create(block_list);
	while ((bg_record = list_next(itr))) {
		if (bg_record->free_cnt) {
			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
				info("%s being freed by other job(s), skipping",
				     bg_record->bg_block_id);
			continue;
		}
		/* never look at a block if a job is running */
		if (bg_record->job_running != NO_JOB_RUNNING)
			continue;
		/* on the third time through look for just a block
		 * that isn't used */

		/* check for free blocks on the first and second time */
		if (only_free && (bg_record->state != BG_BLOCK_FREE))
			continue;

		/* check small blocks first */
		if (only_small
		    && (bg_record->cnode_cnt > bg_conf->mp_cnode_cnt))
			continue;

		if (request->avail_mp_bitmap &&
		    !bit_super_set(bg_record->bitmap,
				   request->avail_mp_bitmap)) {
			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
				info("bg block %s has nodes not usable "
				     "by this job",
				     bg_record->bg_block_id);
			continue;
		}

		if (bg_record->cnode_cnt == cnodes) {
			ba_mp_t *ba_mp = NULL;
			if (bg_record->ba_mp_list)
				ba_mp = list_peek(bg_record->ba_mp_list);
			if (!ba_mp) {
				for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
					start_char[dim] = alpha_num[
						bg_record->start[dim]];
				start_char[dim] = '\0';
				request->save_name = xstrdup(start_char);
			} else
				request->save_name = xstrdup(ba_mp->coord_str);

			rc = SLURM_SUCCESS;
			goto finished;
		}
		/* lets see if we can combine some small ones */
		if (bg_record->cnode_cnt < cnodes) {
			char bitstring[BITSIZE];
			bitstr_t *bitstr = NULL;
			int num_over = 0;
			int num_cnodes = bg_record->cnode_cnt;
			int rec_mp_bit = bit_ffs(bg_record->bitmap);

			if (curr_mp_bit != rec_mp_bit) {
				/* Got a different node than
				 * previously, since the list should
				 * be in order of nodes for small blocks
				 * just clear here since the last node
				 * doesn't have any more. */
				curr_mp_bit = rec_mp_bit;
				bit_nclear(ionodes, 0,
					   (bg_conf->ionodes_per_mp-1));
				total_cnode_cnt = 0;
			}

			/* On really busy systems we can get
			   overlapping blocks here.  If that is the
			   case only add that which doesn't overlap.
			*/
			if ((num_over = bit_overlap(
				     ionodes, bg_record->ionode_bitmap))) {
				/* Since the smallest block size is
				   the number of cnodes in an io node,
				   just multiply the num_over by that to
				   get the number of cnodes to remove.
				*/
				if ((num_cnodes -=
				     num_over * bg_conf->smallest_block) <= 0)
					continue;
			}
			bit_or(ionodes, bg_record->ionode_bitmap);

			/* check and see if the bits set are a valid
			   combo */
			if (bit_itr) {
				while ((bitstr = list_next(bit_itr))) {
					if (bit_super_set(ionodes, bitstr))
						break;
				}
				list_iterator_reset(bit_itr);
			}
			if (!bitstr) {
				bit_nclear(ionodes, 0,
					   (bg_conf->ionodes_per_mp-1));
				bit_or(ionodes, bg_record->ionode_bitmap);
				total_cnode_cnt = num_cnodes =
					bg_record->cnode_cnt;
			} else
				total_cnode_cnt += num_cnodes;

			bit_fmt(bitstring, BITSIZE, ionodes);
			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
				info("combine adding %s %s %d got %d set "
				     "ionodes %s total is %s",
				     bg_record->bg_block_id, bg_record->mp_str,
				     num_cnodes, total_cnode_cnt,
				     bg_record->ionode_str, bitstring);
			if (total_cnode_cnt == cnodes) {
				ba_mp_t *ba_mp = NULL;
				if (bg_record->ba_mp_list)
					ba_mp = list_peek(
						bg_record->ba_mp_list);
				if (!ba_mp) {
					for (dim=0; dim<SYSTEM_DIMENSIONS;
					     dim++)
						start_char[dim] = alpha_num[
							bg_record->start[dim]];
					start_char[dim] = '\0';
					request->save_name =
						xstrdup(start_char);
				} else
					request->save_name =
						xstrdup(ba_mp->coord_str);

				if (!my_block_list) {
					rc = SLURM_SUCCESS;
					goto finished;
				}

				bg_record = create_small_record(bg_record,
								ionodes,
								cnodes);
				list_append(new_blocks, bg_record);

				rc = SLURM_SUCCESS;
				goto finished;
			}
			continue;
		}
		/* we found a block that is bigger than requested */
		break;
	}

	if (bg_record) {
		ba_mp_t *ba_mp = NULL;
		if (bg_record->ba_mp_list)
			ba_mp = list_peek(bg_record->ba_mp_list);
		if (!ba_mp) {
			for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
				start_char[dim] = alpha_num[
					bg_record->start[dim]];
			start_char[dim] = '\0';
			request->save_name = xstrdup(start_char);
		} else
			request->save_name = xstrdup(ba_mp->coord_str);
		/* It appears we don't need this original record
		 * anymore, just work off the copy if indeed it is a copy. */

		/* bg_record_t *found_record = NULL; */

		/* if (bg_record->original) { */
		/* 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) */
		/* 		info("1 This was a copy %s", */
		/* 		     bg_record->bg_block_id); */
		/* 	found_record = bg_record->original; */
		/* } else { */
		/* 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) */
		/* 		info("looking for original"); */
		/* 	found_record = find_org_in_bg_list( */
		/* 		bg_lists->main, bg_record); */
		/* } */
		if ((bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
		    && bg_record->original
		    && (bg_record->original->magic != BLOCK_MAGIC)) {
			info("This record %s has bad magic, it must be "
			     "getting freed.  No worries it will all be "
			     "figured out later.",
			     bg_record->bg_block_id);
		}

		/* if (!found_record || found_record->magic != BLOCK_MAGIC) { */
		/* 	error("this record wasn't found in the list!"); */
		/* 	rc = SLURM_ERROR; */
		/* 	goto finished; */
		/* } */

		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) {
			char tmp_char[256];
			format_node_name(bg_record, tmp_char,
					 sizeof(tmp_char));
			info("going to split %s, %s",
			     bg_record->bg_block_id,
			     tmp_char);
		}

		if (!my_block_list) {
			rc = SLURM_SUCCESS;
			goto finished;
		}
		_split_block(block_list, new_blocks, bg_record, cnodes);
		rc = SLURM_SUCCESS;
		goto finished;
	}

finished:
	if (bit_itr)
		list_iterator_destroy(bit_itr);

	FREE_NULL_BITMAP(ionodes);
	if (itr)
		list_iterator_destroy(itr);

	return rc;
}