Example #1
0
static int alloc_pidmap(struct pid_namespace *pid_ns)
{
	int i, offset, max_scan, pid, last = pid_ns->last_pid;
	struct pidmap *map;

	pid = last + 1;
	if (pid >= pid_max)
		pid = RESERVED_PIDS;
	offset = pid & BITS_PER_PAGE_MASK;
	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
	max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
	for (i = 0; i <= max_scan; ++i) {
		if (unlikely(!map->page)) {
			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
			/*
			 * Free the page if someone raced with us
			 * installing it:
			 */
			spin_lock_irq(&pidmap_lock);
			if (!map->page) {
				map->page = page;
				page = NULL;
			}
			spin_unlock_irq(&pidmap_lock);
			kfree(page);
			if (unlikely(!map->page))
				break;
		}
		if (likely(atomic_read(&map->nr_free))) {
			do {
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->nr_free);
					pid_ns->last_pid = pid;
					return pid;
				}
				offset = find_next_offset(map, offset);
				pid = mk_pid(pid_ns, map, offset);
			/*
			 * find_next_offset() found a bit, the pid from it
			 * is in-bounds, and if we fell back to the last
			 * bitmap block and the final block was the same
			 * as the starting point, pid is before last_pid.
			 */
			} while (offset < BITS_PER_PAGE && pid < pid_max &&
					(i != max_scan || pid < last ||
					    !((last+1) & BITS_PER_PAGE_MASK)));
		}
		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
			++map;
			offset = 0;
		} else {
			map = &pid_ns->pidmap[0];
			offset = RESERVED_PIDS;
			if (unlikely(last == offset))
				break;
		}
		pid = mk_pid(pid_ns, map, offset);
	}
	return -1;
}
Example #2
0
static int alloc_pidmap(struct pid_namespace *pid_ns)
{
	int i, offset, max_scan, pid, last = pid_ns->last_pid;
	struct pidmap *map;

	pid = last + 1;
	if (pid >= pid_max)
		pid = RESERVED_PIDS;
	offset = pid & BITS_PER_PAGE_MASK;
	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
	/*
	 * If last_pid points into the middle of the map->page we
	 * want to scan this bitmap block twice, the second time
	 * we start with offset == 0 (or RESERVED_PIDS).
	 */
	max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
	for (i = 0; i <= max_scan; ++i) {
		if (unlikely(!map->page)) {
			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
			/*
			 * Free the page if someone raced with us
			 * installing it:
			 */
			spin_lock_irq(&pidmap_lock);
			if (!map->page) {
				map->page = page;
				page = NULL;
			}
			spin_unlock_irq(&pidmap_lock);
			kfree(page);
			if (unlikely(!map->page))
				break;
		}
		if (likely(atomic_read(&map->nr_free))) {
			do {
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->nr_free);
					set_last_pid(pid_ns, last, pid);
					return pid;
				}
				offset = find_next_offset(map, offset);
				pid = mk_pid(pid_ns, map, offset);
			} while (offset < BITS_PER_PAGE && pid < pid_max);
		}
		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
			++map;
			offset = 0;
		} else {
			map = &pid_ns->pidmap[0];
			offset = RESERVED_PIDS;
			if (unlikely(last == offset))
				break;
		}
		pid = mk_pid(pid_ns, map, offset);
	}
	return -1;
}
static int alloc_pidmap(struct pid_namespace *pid_ns)
{
	int i, offset, max_scan, pid, last = pid_ns->last_pid;
	struct pidmap *map;

	pid = last + 1;
	if (pid >= pid_max)
		pid = RESERVED_PIDS;
	offset = pid & BITS_PER_PAGE_MASK;
	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
	max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
	for (i = 0; i <= max_scan; ++i) {
		if (unlikely(!map->page)) {
			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
			spin_lock_irq(&pidmap_lock);
			if (map->page)
				kfree(page);
			else
				map->page = page;
			spin_unlock_irq(&pidmap_lock);
			if (unlikely(!map->page))
				break;
		}
		if (likely(atomic_read(&map->nr_free))) {
			do {
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->nr_free);
					set_last_pid(pid_ns, last, pid);
					return pid;
				}
				offset = find_next_offset(map, offset);
				pid = mk_pid(pid_ns, map, offset);
			} while (offset < BITS_PER_PAGE && pid < pid_max &&
					(i != max_scan || pid < last ||
					    !((last+1) & BITS_PER_PAGE_MASK)));
		}
		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
			++map;
			offset = 0;
		} else {
			map = &pid_ns->pidmap[0];
			offset = RESERVED_PIDS;
			if (unlikely(last == offset))
				break;
		}
		pid = mk_pid(pid_ns, map, offset);
	}
	return -1;
}
static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
{
	u32 i, offset, max_scan, qpn;
	struct qpn_map *map;
	u32 ret = -1;

	if (type == IB_QPT_SMI)
		ret = 0;
	else if (type == IB_QPT_GSI)
		ret = 1;

	if (ret != -1) {
		map = &qpt->map[0];
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page)) {
				ret = -ENOMEM;
				goto bail;
			}
		}
		if (!test_and_set_bit(ret, map->page))
			atomic_dec(&map->n_free);
		else
			ret = -EBUSY;
		goto bail;
	}

	qpn = qpt->last + 1;
	if (qpn >= QPN_MAX)
		qpn = 2;
	offset = qpn & BITS_PER_PAGE_MASK;
	map = &qpt->map[qpn / BITS_PER_PAGE];
	max_scan = qpt->nmaps - !offset;
	for (i = 0;;) {
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page))
				break;
		}
		if (likely(atomic_read(&map->n_free))) {
			do {
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->n_free);
					qpt->last = qpn;
					ret = qpn;
					goto bail;
				}
				offset = find_next_offset(map, offset);
				qpn = mk_qpn(qpt, map, offset);
				/*
                                             
                                             
                                          
                                             
                                            
               
     */
			} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
		}
		/*
                                                        
                                                              
                                  
   */
		if (++i > max_scan) {
			if (qpt->nmaps == QPNMAP_ENTRIES)
				break;
			map = &qpt->map[qpt->nmaps++];
			offset = 0;
		} else if (map < &qpt->map[qpt->nmaps]) {
			++map;
			offset = 0;
		} else {
			map = &qpt->map[0];
			offset = 2;
		}
		qpn = mk_qpn(qpt, map, offset);
	}

	ret = -ENOMEM;

bail:
	return ret;
}
Example #5
0
/*
 * Read address file into a linked list. Based on JPilot's
 * jp_read_DB_files (from libplugin.c)
 */
static gint jpilot_read_db_files( JPilotFile *pilotFile, GList **records ) {
	FILE *in, *pc_in;
	char *buf;
	GList *temp_list;
	int num_records, recs_returned, i, num, r;
	unsigned int offset, prev_offset, next_offset, rec_size;
	int out_of_order;
	long fpos;  /*file position indicator */
	unsigned char attrib;
	unsigned int unique_id;
	mem_rec_header *mem_rh, *temp_mem_rh, *last_mem_rh;
	record_header rh;
	RawDBHeader rdbh;
	DBHeader dbh;
	buf_rec *temp_br;
	gchar *pcFile;

	mem_rh = last_mem_rh = NULL;
	*records = NULL;
	recs_returned = 0;

	if( pilotFile->path == NULL ) {
		return MGU_BAD_ARGS;
	}

	in = g_fopen( pilotFile->path, "rb" );
	if (!in) {
		return MGU_OPEN_FILE;
	}

	/* Read the database header */
	num = fread(&rdbh, sizeof(RawDBHeader), 1, in);
	if (num != 1) {
		if (ferror(in)) {
			fclose(in);
			return MGU_ERROR_READ;
		}
		if (feof(in)) {
			fclose(in);
			return MGU_EOF;
		}
	}
	raw_header_to_header(&rdbh, &dbh);

	/* Read each record entry header */
	num_records = dbh.number_of_records;
	out_of_order = 0;
	prev_offset = 0;

	for (i = 1; i < num_records + 1; i++) {
		num = fread(&rh, sizeof(record_header), 1, in);
		if (num != 1) {
			if (ferror(in)) {
				break;
			}
			if (feof(in)) {
				free_mem_rec_header(&mem_rh);
				fclose(in);
				return MGU_EOF;
			}
		}

		offset = ((rh.Offset[0]*256+rh.Offset[1])*256+rh.Offset[2])*256+rh.Offset[3];
		if (offset < prev_offset) {
			out_of_order = 1;
		}
		prev_offset = offset;
		temp_mem_rh = (mem_rec_header *)malloc(sizeof(mem_rec_header));
		if (!temp_mem_rh) {
			break;
		}
		temp_mem_rh->next = NULL;
		temp_mem_rh->rec_num = i;
		temp_mem_rh->offset = offset;
		temp_mem_rh->attrib = rh.attrib;
		temp_mem_rh->unique_id = (rh.unique_ID[0]*256+rh.unique_ID[1])*256+rh.unique_ID[2];
		if (mem_rh == NULL) {
			mem_rh = temp_mem_rh;
			last_mem_rh = temp_mem_rh;
		} else {
			last_mem_rh->next = temp_mem_rh;
			last_mem_rh = temp_mem_rh;
		}
	}

	temp_mem_rh = mem_rh;

	if (num_records) {
		attrib = unique_id = 0;
		if (out_of_order) {
			find_next_offset(mem_rh, 0, &next_offset, &attrib, &unique_id);
		} else {
			next_offset = 0xFFFFFF;
			if (mem_rh) {
				next_offset = mem_rh->offset;
				attrib = mem_rh->attrib;
				unique_id = mem_rh->unique_id;
			}
		}
		fseek(in, next_offset, SEEK_SET);
		while(!feof(in)) {
			fpos = ftell(in);
			if (out_of_order) {
				find_next_offset(mem_rh, fpos, &next_offset, &attrib, &unique_id);
			} else {
				next_offset = 0xFFFFFF;
				if (temp_mem_rh) {
					attrib = temp_mem_rh->attrib;
					unique_id = temp_mem_rh->unique_id;
					if (temp_mem_rh->next) {
						temp_mem_rh = temp_mem_rh->next;
						next_offset = temp_mem_rh->offset;
					}
				}
			}
			rec_size = next_offset - fpos;
			buf = malloc(rec_size);
			if (!buf) break;
			num = fread(buf, rec_size, 1, in);
			if ((num != 1)) {
				if (ferror(in)) {
					free(buf);
					break;
				}
			}

			temp_br = malloc(sizeof(buf_rec));
			if (!temp_br) {
				free(buf);
				break;
			}
			temp_br->rt = PALM_REC;
			temp_br->unique_id = unique_id;
			temp_br->attrib = attrib;
			temp_br->buf = buf;
			temp_br->size = rec_size;

			*records = g_list_append(*records, temp_br);

			recs_returned++;
		}
	}
	fclose(in);
	free_mem_rec_header(&mem_rh);

	/* Read the PC3 file, if present */
	pcFile = jpilot_get_pc3_file( pilotFile );
	if( pcFile == NULL ) return MGU_SUCCESS;
	pc_in = g_fopen( pcFile, "rb");
	g_free( pcFile );

	if( pc_in == NULL ) {
		return MGU_SUCCESS;
	}

	while( ! feof( pc_in ) ) {
		temp_br = malloc(sizeof(buf_rec));
		if (!temp_br) {
			break;
		}
		r = jpilot_read_next_pc( pc_in, temp_br );
		if ( r != MGU_SUCCESS ) {
			free(temp_br);
			break;
		}
		if ((temp_br->rt!=DELETED_PC_REC)
			&&(temp_br->rt!=DELETED_PALM_REC)
			&&(temp_br->rt!=MODIFIED_PALM_REC)
			&&(temp_br->rt!=DELETED_DELETED_PALM_REC)) {
				*records = g_list_append(*records, temp_br);
				recs_returned++;
		}
		if ((temp_br->rt==DELETED_PALM_REC) || (temp_br->rt==MODIFIED_PALM_REC)) {
			temp_list=*records;
			if (*records) {
				while(temp_list->next) {
					temp_list=temp_list->next;
				}
			}
			for (; temp_list; temp_list=temp_list->prev) {
				if (((buf_rec *)temp_list->data)->unique_id == temp_br->unique_id) {
					((buf_rec *)temp_list->data)->rt = temp_br->rt;
				}
			}
		}
		free(temp_br);
	}
	fclose(pc_in);

	return MGU_SUCCESS;
}
static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
{
	u32 i, offset, max_scan, qpn;
	struct qpn_map *map;
	u32 ret = -1;

	if (type == IB_QPT_SMI)
		ret = 0;
	else if (type == IB_QPT_GSI)
		ret = 1;

	if (ret != -1) {
		map = &qpt->map[0];
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page)) {
				ret = -ENOMEM;
				goto bail;
			}
		}
		if (!test_and_set_bit(ret, map->page))
			atomic_dec(&map->n_free);
		else
			ret = -EBUSY;
		goto bail;
	}

	qpn = qpt->last + 1;
	if (qpn >= QPN_MAX)
		qpn = 2;
	offset = qpn & BITS_PER_PAGE_MASK;
	map = &qpt->map[qpn / BITS_PER_PAGE];
	max_scan = qpt->nmaps - !offset;
	for (i = 0;;) {
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page))
				break;
		}
		if (likely(atomic_read(&map->n_free))) {
			do {
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->n_free);
					qpt->last = qpn;
					ret = qpn;
					goto bail;
				}
				offset = find_next_offset(map, offset);
				qpn = mk_qpn(qpt, map, offset);
				/*
				 * This test differs from alloc_pidmap().
				 * If find_next_offset() does find a zero
				 * bit, we don't need to check for QPN
				 * wrapping around past our starting QPN.
				 * We just need to be sure we don't loop
				 * forever.
				 */
			} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
		}
		/*
		 * In order to keep the number of pages allocated to a
		 * minimum, we scan the all existing pages before increasing
		 * the size of the bitmap table.
		 */
		if (++i > max_scan) {
			if (qpt->nmaps == QPNMAP_ENTRIES)
				break;
			map = &qpt->map[qpt->nmaps++];
			offset = 0;
		} else if (map < &qpt->map[qpt->nmaps]) {
			++map;
			offset = 0;
		} else {
			map = &qpt->map[0];
			offset = 2;
		}
		qpn = mk_qpn(qpt, map, offset);
	}

	ret = -ENOMEM;

bail:
	return ret;
}