Exemplo n.º 1
0
LM_STATUS
MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
	PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
	LM_BOOL cached /* we ignore this */)
{
	struct be_b57_dev *dev;
	void *pvirt = NULL;
	area_id area_desc;
	physical_entry entry;

	dev = (struct be_b57_dev *)(pDevice);
	area_desc = dev->lockmem_list[dev->lockmem_list_num++] = create_area("broadcom_shared_mem",
		&pvirt, B_ANY_KERNEL_ADDRESS, ROUND_UP_TO_PAGE(BlockSize),
		B_CONTIGUOUS, 0);

	if (area_desc < B_OK)
		return LM_STATUS_FAILURE;

	memset(pvirt, 0, BlockSize);
	*pMemoryBlockVirt = (PLM_VOID) pvirt;

	get_memory_map(pvirt,BlockSize,&entry,1);
	pMemoryBlockPhy->Low = (uint32)entry.address;
	pMemoryBlockPhy->High = (uint32)(entry.address >> 32);
		/* We only support 32 bit */

	return LM_STATUS_SUCCESS;
}
Exemplo n.º 2
0
area_id
Stack::AllocateArea(void **logicalAddress, void **physicalAddress, size_t size,
                    const char *name)
{
    TRACE("allocating %ld bytes for %s\n", size, name);

    void *logAddress;
    size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
    area_id area = create_area(name, &logAddress, B_ANY_KERNEL_ADDRESS, size,
                               B_CONTIGUOUS, 0);

    if (area < B_OK) {
        TRACE_ERROR("couldn't allocate area %s\n", name);
        return B_ERROR;
    }

    physical_entry physicalEntry;
    status_t result = get_memory_map(logAddress, size, &physicalEntry, 1);
    if (result < B_OK) {
        delete_area(area);
        TRACE_ERROR("couldn't map area %s\n", name);
        return B_ERROR;
    }

    memset(logAddress, 0, size);
    if (logicalAddress)
        *logicalAddress = logAddress;

    if (physicalAddress)
        *physicalAddress = physicalEntry.address;

    TRACE("area = %ld, size = %ld, log = %p, phy = %p\n",
          area, size, logAddress, physicalEntry.address);
    return area;
}
Exemplo n.º 3
0
status_t
prepare_sleep_state(uint8 state, void (*wakeFunc)(void), size_t size)
{
    ACPI_STATUS acpiStatus;

    TRACE("prepare_sleep_state %d, %p, %ld\n", state, wakeFunc, size);

    if (state != ACPI_POWER_STATE_OFF) {
        physical_entry wakeVector;
        status_t status;

        // Note: The supplied code must already be locked into memory.
        status = get_memory_map((const void*)wakeFunc, size, &wakeVector, 1);
        if (status != B_OK)
            return status;

#	if B_HAIKU_PHYSICAL_BITS > 32
        if (wakeVector.address >= 0x100000000LL) {
            ERROR("prepare_sleep_state(): ACPI 2.0c says use 32 bit "
                  "vector, but we have a physical address >= 4 GB\n");
        }
#	endif
        acpiStatus = AcpiSetFirmwareWakingVector(wakeVector.address,
                     wakeVector.address);
        if (acpiStatus != AE_OK)
            return B_ERROR;
    }

    acpiStatus = AcpiEnterSleepStatePrep(state);
    if (acpiStatus != AE_OK)
        return B_ERROR;

    return B_OK;
}
Exemplo n.º 4
0
area_id
alloc_mem(void **phy, void **log, size_t size, const char *name)
{
    physical_entry pe;
    void * logadr;
    area_id areaid;
    status_t rv;

    TRACE("allocating %#08X bytes for %s\n", (int)size, name);

    size = round_to_pagesize(size);
    areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size,
                         B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
    if (areaid < B_OK) {
        TRACE("couldn't allocate area %s\n",name);
        return B_ERROR;
    }
    rv = get_memory_map(logadr,size,&pe,1);
    if (rv < B_OK) {
        delete_area(areaid);
        TRACE("couldn't map %s\n",name);
        return B_ERROR;
    }
    memset(logadr,0,size);
    if (log)
        *log = logadr;
    if (phy)
        *phy = pe.address;
    TRACE("area = %d, size = %#08X, log = %#08X, phy = %#08X\n", (int)areaid, (int)size, (int)logadr, (int)pe.address);
    return areaid;
}
Exemplo n.º 5
0
void MC_take_snapshot(mc_snapshot_t snapshot)
{
  unsigned int i = 0;
  char copy = 0;
  s_map_region reg;
  memory_map_t maps = get_memory_map();

  /* Save the std heap and the writtable mapped pages of libsimgrid */
    while(i < maps->mapsize
          && (maps->regions[i].pathname == NULL
              || memcmp(maps->regions[i].pathname, "/lib/ld", 7))){
      reg = maps->regions[i];
      if((reg.prot & PROT_WRITE)){
        if(reg.start_addr == std_heap){
          MC_snapshot_add_region(snapshot, reg.start_addr,
                                 (char*)reg.end_addr - (char*)reg.start_addr);

        }else if(copy || (reg.pathname != NULL
                 && !memcmp(basename(maps->regions[i].pathname), "libsimgrid", 10))){
          MC_snapshot_add_region(snapshot, reg.start_addr,
                                 (char*)reg.end_addr - (char*)reg.start_addr);
          /* This will force the save of the regions in the next iterations,
           * but we assume that ld will be found mapped and break the loop
           * before saving a wrong region.(This is ugly I know). */
          copy = TRUE;
        }
      }
      i++;
    }

  /* FIXME: free the memory map */
}
Exemplo n.º 6
0
Arquivo: util.c Projeto: luciang/haiku
area_id
alloc_mem(void **virt, void **phy, size_t size, uint32 protection,
	const char *name)
{
// TODO: phy should be phys_addr_t*!
	physical_entry pe;
	void * virtadr;
	area_id areaid;
	status_t rv;

	TRACE("allocating %ld bytes for %s\n", size, name);

	size = ROUNDUP(size, B_PAGE_SIZE);
	areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
		B_32_BIT_CONTIGUOUS, protection);
		// TODO: The rest of the code doesn't deal correctly with physical
		// addresses > 4 GB, so we have to force 32 bit addresses here.
	if (areaid < B_OK) {
		TRACE("couldn't allocate area %s\n", name);
		return B_ERROR;
	}
	rv = get_memory_map(virtadr, size, &pe, 1);
	if (rv < B_OK) {
		delete_area(areaid);
		TRACE("couldn't map %s\n", name);
		return B_ERROR;
	}
	if (virt)
		*virt = virtadr;
	if (phy)
		*phy = (void*)(addr_t)pe.address;
	TRACE("area = %ld, size = %ld, virt = %p, phy = %p\n", areaid, size, virtadr, pe.address);
	return areaid;
}
Exemplo n.º 7
0
area_id
alloc_contiguous(void **virt, void **phy, size_t size, uint32 protection,
	const char *name)
{
	physical_entry pe;
	void * virtadr;
	area_id areaid;
	status_t rv;

	TRACE("allocating %ld bytes for %s\n", size, name);

	size = round_to_pagesize(size);
	areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
		B_CONTIGUOUS, protection);
	if (areaid < B_OK) {
		ERROR("couldn't allocate area %s\n", name);
		return B_ERROR;
	}
	rv = get_memory_map(virtadr, size, &pe, 1);
	if (rv < B_OK) {
		delete_area(areaid);
		ERROR("couldn't get mapping for %s\n", name);
		return B_ERROR;
	}
	memset(virtadr, 0, size);
	if (virt)
		*virt = virtadr;
	if (phy)
		*phy = pe.address;
	TRACE("area = %ld, size = %ld, virt = %p, phy = %p\n", areaid, size, virtadr, pe.address);
	return areaid;
}
Exemplo n.º 8
0
void
VirtioSCSIRequest::FillRequest(uint32 inCount, uint32 outCount,
	physical_entry *entries)
{
	CALLED();
	fRequest->task_attr = VIRTIO_SCSI_S_SIMPLE;
	fRequest->tag = (addr_t)fCCB;
	fRequest->lun[0] = 1;
	fRequest->lun[1] = fCCB->target_id;
	// we don't support lun >= 256
	fRequest->lun[2] = 0x40;
	fRequest->lun[3] = fCCB->target_lun & 0xff;

	memcpy(fRequest->cdb, fCCB->cdb, min_c(fCCB->cdb_length,
		min_c(sizeof(fRequest->cdb), sizeof(fCCB->cdb))));

	get_memory_map(fBuffer, sizeof(struct virtio_scsi_cmd_req)
		+ sizeof(struct virtio_scsi_cmd_resp), &entries[0], 1);
	entries[0].size = sizeof(struct virtio_scsi_cmd_req);
	if (outCount > 1) {
		memcpy(entries + 1, fCCB->sg_list, fCCB->sg_count
			* sizeof(physical_entry));
	}

	entries[outCount].address = entries[0].address
		+ sizeof(struct virtio_scsi_cmd_req);
	entries[outCount].size = sizeof(struct virtio_scsi_cmd_resp);

	if (inCount > 1) {
		memcpy(entries + outCount + 1, fCCB->sg_list, fCCB->sg_count
			* sizeof(physical_entry));
	}
}
Exemplo n.º 9
0
static phys_addr_t
physicalAddress(volatile void *address, uint32 length)
{
	physical_entry table;

	get_memory_map((void *)address, length, &table, 1);
	return table.address;
}
Exemplo n.º 10
0
static status_t createGARTBuffer( GART_info *gart, size_t size )
{
	physical_entry map[1];
	void *unaligned_addr, *aligned_phys;

	SHOW_FLOW0( 3, "" );

	gart->buffer.size = size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	// we allocate an contiguous area having twice the size
	// to be able to find an aligned, contiguous range within it;
	// the graphics card doesn't care, but the CPU cannot
	// make an arbitrary area WC'ed, at least elder ones
	// question: is this necessary for a PCI GART because of bus snooping?
	gart->buffer.unaligned_area = create_area( "Radeon PCI GART buffer",
		&unaligned_addr, B_ANY_KERNEL_ADDRESS,
		2 * size, B_CONTIGUOUS/*B_FULL_LOCK*/, B_READ_AREA | B_WRITE_AREA | B_USER_CLONEABLE_AREA );
		// TODO: Physical aligning can be done without waste using the
		// private create_area_etc().
	if (gart->buffer.unaligned_area < 0) {
		SHOW_ERROR( 1, "cannot create PCI GART buffer (%s)",
			strerror( gart->buffer.unaligned_area ));
		return gart->buffer.unaligned_area;
	}

	get_memory_map( unaligned_addr, B_PAGE_SIZE, map, 1 );

	aligned_phys =
		(void **)((map[0].address + size - 1) & ~(size - 1));

	SHOW_FLOW( 3, "aligned_phys=%p", aligned_phys );

	gart->buffer.area = map_physical_memory( "Radeon aligned PCI GART buffer",
		(addr_t)aligned_phys,
		size, B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_WC,
		B_READ_AREA | B_WRITE_AREA, &gart->buffer.ptr );

	if( gart->buffer.area < 0 ) {
		SHOW_ERROR0( 3, "cannot map buffer with WC" );
		gart->buffer.area = map_physical_memory( "Radeon aligned PCI GART buffer",
			(addr_t)aligned_phys,
			size, B_ANY_KERNEL_BLOCK_ADDRESS,
			B_READ_AREA | B_WRITE_AREA, &gart->buffer.ptr );
	}

	if( gart->buffer.area < 0 ) {
		SHOW_ERROR0( 1, "cannot map GART buffer" );
		delete_area( gart->buffer.unaligned_area );
		gart->buffer.unaligned_area = -1;
		return gart->buffer.area;
	}

	memset( gart->buffer.ptr, 0, size );

	return B_OK;
}
Exemplo n.º 11
0
void memory_map_get_regions(Context * ctx, MemoryRegion ** regions, unsigned * cnt) {
    MemoryMap * map = get_memory_map(ctx);
    if (map == NULL) {
        *regions = NULL;
        *cnt = 0;
    }
    else {
        *regions = map->regions;
        *cnt = map->region_cnt;
    }
}
Exemplo n.º 12
0
status_t
AHCIPort::FillPrdTable(volatile prd *prdTable, int *prdCount, int prdMax,
                       const void *data, size_t dataSize)
{
    int peMax = prdMax + 1;
    physical_entry pe[peMax];
    if (get_memory_map(data, dataSize, pe, peMax ) < B_OK) {
        TRACE("AHCIPort::FillPrdTable get_memory_map failed\n");
        return B_ERROR;
    }
    int peUsed;
    for (peUsed = 0; pe[peUsed].size; peUsed++)
        ;
    return FillPrdTable(prdTable, prdCount, prdMax, pe, peUsed, dataSize);
}
Exemplo n.º 13
0
Arquivo: main.c Projeto: blm768/arm-os
//Currently, kmain uses the interrupt stack.
//It might overwrite ATAGs, etc. if care is not used.
//TODO: allocate a stack elsewhere?
void kmain(KMAIN_ARGS) {
	get_memory_map();
	map_io_area();
	bool status = init_console();
	if(status) {
		init_phys_allocators();
		//Drivers must be loaded before any heap allocations
		//because driver images are stored at the beginning of the heap.
		load_drivers();

		interrupt_init();
		enable_irqs();
		//enable_irq(cpu_timer);
	} else {
		//To do: error handling.
	}
}
status_t
DMAResource::CreateBounceBuffer(DMABounceBuffer** _buffer)
{
	void* bounceBuffer = NULL;
	phys_addr_t physicalBase = 0;
	area_id area = -1;
	phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE);

	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	physicalRestrictions.low_address = fRestrictions.low_address;
	physicalRestrictions.high_address = fRestrictions.high_address;
	physicalRestrictions.alignment = fRestrictions.alignment;
	physicalRestrictions.boundary = fRestrictions.boundary;
	area = create_area_etc(B_SYSTEM_TEAM, "dma buffer", size, B_CONTIGUOUS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0, &virtualRestrictions,
		&physicalRestrictions, &bounceBuffer);
	if (area < B_OK)
		return area;

	physical_entry entry;
	if (get_memory_map(bounceBuffer, size, &entry, 1) != B_OK) {
		panic("get_memory_map() failed.");
		delete_area(area);
		return B_ERROR;
	}

	physicalBase = entry.address;

	ASSERT(fRestrictions.high_address >= physicalBase + size);

	DMABounceBuffer* buffer = new(std::nothrow) DMABounceBuffer;
	if (buffer == NULL) {
		delete_area(area);
		return B_NO_MEMORY;
	}

	buffer->address = bounceBuffer;
	buffer->physical_address = physicalBase;
	buffer->size = size;

	*_buffer = buffer;
	return B_OK;
}
/**
 * memory_map - Allocate and fill out an array of memory descriptors
 * @map_buf: buffer containing the memory map
 * @map_size: size of the buffer containing the memory map
 * @map_key: key for the current memory map
 * @desc_size: size of the desc
 * @desc_version: memory descriptor version
 *
 * On success, @map_size contains the size of the memory map pointed
 * to by @map_buf and @map_key, @desc_size and @desc_version are
 * updated.
 */
EFI_STATUS
memory_map(EFI_MEMORY_DESCRIPTOR **map_buf, UINTN *map_size,
           UINTN *map_key, UINTN *desc_size, UINT32 *desc_version)
{
        EFI_STATUS err;

        *map_size = sizeof(**map_buf) * 31;
get_map:

        /*
         * Because we're about to allocate memory, we may
         * potentially create a new memory descriptor, thereby
         * increasing the size of the memory map. So increase
         * the buffer size by the size of one memory
         * descriptor, just in case.
         */
        *map_size += sizeof(**map_buf);

        err = allocate_pool(EfiLoaderData, *map_size,
                            (void **)map_buf);
        if (err != EFI_SUCCESS) {
                error(L"Failed to allocate pool for memory map");
                goto failed;
        }

        err = get_memory_map(map_size, *map_buf, map_key,
                             desc_size, desc_version);
        if (err != EFI_SUCCESS) {
                if (err == EFI_BUFFER_TOO_SMALL) {
                        /*
                         * 'map_size' has been updated to reflect the
                         * required size of a map buffer.
                         */
                        free_pool((void *)*map_buf);
                        goto get_map;
                }

                error(L"Failed to get memory map");
                goto failed;
        }

failed:
        return err;
}
Exemplo n.º 16
0
void main(void)
{
	memset(&boot_params, 0, sizeof(boot_params));

	/* processor */
	processor = getprocessor();

	/* memory map */
	get_memory_map(processor);

	/* Get environment variables from the parameter sector. */
	get_parameters();

	if (boot_nucleos() < 0)
		printf("Error while booting kernel\n");

	/* @nucleos: only in case of error */
	while (1) halt_cpu();
}
Exemplo n.º 17
0
status_t
Stream::Init()
{
	if (fStatus == B_OK)
		Free();

	fHWChannel = fIsInput ? 0 : 1;

	if (_HWId() == SiS7018)
			fHWChannel += 0x20; // bank B optimized for PCM
	else if (_HWId() == ALi5451 && fIsInput)
			fHWChannel = 31;
	
	// assume maximal possible buffers size
	fBuffersAreaSize = 1024; // samples
	fBuffersAreaSize *= 2 * 2 * 2; // stereo + 16-bit samples + 2 buffers
	fBuffersAreaSize = (fBuffersAreaSize + (B_PAGE_SIZE - 1)) &~ (B_PAGE_SIZE - 1);
	fBuffersArea = create_area(
			(fIsInput) ? DRIVER_NAME "_record_area" : DRIVER_NAME "_playback_area",
				&fBuffersAddress, B_ANY_KERNEL_ADDRESS, fBuffersAreaSize,
				B_32_BIT_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
	if (fBuffersArea < 0) {
		ERROR("Error of creating %#lx-bytes size buffer area:%#010x\n",
												fBuffersAreaSize, fBuffersArea);
		fStatus = fBuffersArea;
		return fStatus;
	}

	physical_entry PhysEntry;
	get_memory_map(fBuffersAddress, fBuffersAreaSize, &PhysEntry, 1);

	fBuffersPhysAddress = PhysEntry.address;

	TRACE("Created area id %d with size: %#x at address %#x[phys:%#lx]\n",
		fBuffersArea, fBuffersAreaSize, fBuffersAddress, fBuffersPhysAddress);

	// back to samples - half of buffer for 16-bit stereo data
	fBufferSamplesCount = fBuffersAreaSize / (2 * 2 * 2);

	fStatus = B_OK;
	return fStatus;
}
Exemplo n.º 18
0
VirtioRNGDevice::VirtioRNGDevice(device_node *node)
	:
	fNode(node),
	fVirtio(NULL),
	fVirtioDevice(NULL),
	fStatus(B_NO_INIT),
	fOffset(BUFFER_SIZE)
{
	CALLED();

	B_INITIALIZE_SPINLOCK(&fInterruptLock);
	fInterruptCondition.Init(this, "virtio rng transfer");

	get_memory_map(fBuffer, BUFFER_SIZE, &fEntry, 1);

	// get the Virtio device from our parent's parent
	device_node *parent = gDeviceManager->get_parent_node(node);
	device_node *virtioParent = gDeviceManager->get_parent_node(parent);
	gDeviceManager->put_node(parent);

	gDeviceManager->get_driver(virtioParent, (driver_module_info **)&fVirtio,
		(void **)&fVirtioDevice);
	gDeviceManager->put_node(virtioParent);

	fVirtio->negociate_features(fVirtioDevice,
		0, &fFeatures, &get_feature_name);

	fStatus = fVirtio->alloc_queues(fVirtioDevice, 1, &fVirtioQueue);
	if (fStatus != B_OK) {
		ERROR("queue allocation failed (%s)\n", strerror(fStatus));
		return;
	}

	fStatus = fVirtio->setup_interrupt(fVirtioDevice, NULL, this);
	if (fStatus != B_OK) {
		ERROR("interrupt setup failed (%s)\n", strerror(fStatus));
		return;
	}
}
Exemplo n.º 19
0
status_t
get_iovec_memory_map(iovec *vec, size_t vec_count, size_t vec_offset, size_t len,
	physical_entry *map, size_t max_entries, size_t *num_entries, size_t *mapped_len)
{
	size_t cur_idx;
	size_t left_len;

	SHOW_FLOW(3, "vec_count=%lu, vec_offset=%lu, len=%lu, max_entries=%lu",
		vec_count, vec_offset, len, max_entries);

	// skip iovec blocks if needed
	while (vec_count > 0 && vec_offset > vec->iov_len) {
		vec_offset -= vec->iov_len;
		--vec_count;
		++vec;
	}

	for (left_len = len, cur_idx = 0; left_len > 0 && vec_count > 0 && cur_idx < max_entries;) {
		char *range_start;
		size_t range_len;
		status_t res;
		size_t cur_num_entries, cur_mapped_len;
		uint32 tmp_idx;

		SHOW_FLOW( 3, "left_len=%d, vec_count=%d, cur_idx=%d",
			(int)left_len, (int)vec_count, (int)cur_idx );

		// map one iovec
		range_start = (char *)vec->iov_base + vec_offset;
		range_len = std::min(vec->iov_len - vec_offset, left_len);

		SHOW_FLOW( 3, "range_start=%x, range_len=%x",
			(int)range_start, (int)range_len );

		vec_offset = 0;

		if ((res = get_memory_map(range_start, range_len, &map[cur_idx],
				max_entries - cur_idx)) != B_OK) {
			// according to docu, no error is ever reported - argh!
			SHOW_ERROR(1, "invalid io_vec passed (%s)", strerror(res));
			return res;
		}

		// stupid: get_memory_map does neither tell how many sg blocks
		// are used nor whether there were enough sg blocks at all;
		// -> determine that manually
		// TODO: Use get_memory_map_etc()!
		cur_mapped_len = 0;
		cur_num_entries = 0;

		for (tmp_idx = cur_idx; tmp_idx < max_entries; ++tmp_idx) {
			if (map[tmp_idx].size == 0)
				break;

			cur_mapped_len += map[tmp_idx].size;
			++cur_num_entries;
		}

		if (cur_mapped_len == 0) {
			panic("get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
				(int)left_len, (int)cur_idx, (int)max_entries);
			SHOW_ERROR(2, "get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
				(int)left_len, (int)cur_idx, (int)max_entries);
			return B_ERROR;
		}

		SHOW_FLOW( 3, "cur_num_entries=%d, cur_mapped_len=%x",
			(int)cur_num_entries, (int)cur_mapped_len );

		// try to combine with previous sg block
		if (cur_num_entries > 0 && cur_idx > 0
			&& map[cur_idx].address
				== map[cur_idx - 1].address + map[cur_idx - 1].size) {
			SHOW_FLOW0( 3, "combine with previous chunk" );
			map[cur_idx - 1].size += map[cur_idx].size;
			memcpy(&map[cur_idx], &map[cur_idx + 1], (cur_num_entries - 1) * sizeof(map[0]));
			--cur_num_entries;
		}

		cur_idx += cur_num_entries;
		left_len -= cur_mapped_len;

		// advance iovec if current one is described completely
		if (cur_mapped_len == range_len) {
			++vec;
			--vec_count;
		}
	}

	*num_entries = cur_idx;
	*mapped_len = len - left_len;

	SHOW_FLOW( 3, "num_entries=%d, mapped_len=%x",
		(int)*num_entries, (int)*mapped_len );

	return B_OK;
}
Exemplo n.º 20
0
static status_t init_ring_buffers(dp83815_properties_t *data)
{
	uint32			i;
	area_info		info;
	physical_entry	map[2];
	uint32 pages;

	descriptor_t	*RxDescRing = NULL;
	descriptor_t	*TxDescRing = NULL;

	descriptor_t	*desc_base_virt_addr;
	uint32			desc_base_phys_addr;

	void			*buff_base_virt_addr;
	uint32			buff_base_phys_addr;


	data->mem_area = 0;

#define NUM_BUFFS	2*MAX_DESC

	pages = pages_needed(2*MAX_DESC*sizeof(descriptor_t) + NUM_BUFFS*BUFFER_SIZE);

	data->mem_area = create_area(kDevName " desc buffer", (void**)&RxDescRing,
		B_ANY_KERNEL_ADDRESS, pages * B_PAGE_SIZE, B_CONTIGUOUS,
		B_READ_AREA | B_WRITE_AREA);
	if( data->mem_area < 0 )
		return -1;

	get_area_info(data->mem_area, &info);
	get_memory_map(info.address, info.size, map, 4);

	desc_base_phys_addr = (int)map[0].address + NUM_BUFFS*BUFFER_SIZE;
	desc_base_virt_addr = (info.address + NUM_BUFFS*BUFFER_SIZE);

	buff_base_phys_addr = (int)map[0].address;
	buff_base_virt_addr = info.address;

	RxDescRing = desc_base_virt_addr;
	for( i = 0; i < MAX_DESC; i++ ) {
		RxDescRing[i].link = desc_base_phys_addr + ((i+1)%MAX_DESC)*sizeof(descriptor_t);
		RxDescRing[i].cmd = MAX_PACKET_SIZE;
		RxDescRing[i].ptr = buff_base_phys_addr +i*BUFFER_SIZE;
		RxDescRing[i].virt_next = &RxDescRing[(i+1)%MAX_DESC];
		RxDescRing[i].virt_buff = buff_base_virt_addr + i*BUFFER_SIZE;
	}

	TxDescRing = desc_base_virt_addr + MAX_DESC;
	for( i = 0; i < MAX_DESC; i++ ) {
		TxDescRing[i].link = desc_base_phys_addr + MAX_DESC*sizeof(descriptor_t)+ ((i+1)%MAX_DESC)*sizeof(descriptor_t);
		TxDescRing[i].cmd = MAX_PACKET_SIZE;
		TxDescRing[i].ptr = buff_base_phys_addr + ((i+MAX_DESC)*BUFFER_SIZE);
		TxDescRing[i].virt_next = &TxDescRing[(i+1)%MAX_DESC];
		TxDescRing[i].virt_buff = buff_base_virt_addr + ((i+MAX_DESC)*BUFFER_SIZE);
	}

	data->Rx.Curr = RxDescRing;
	data->Tx.Curr = TxDescRing;

	data->Rx.CurrInt = RxDescRing;
	data->Tx.CurrInt = TxDescRing;


	write32(REG_RXDP, desc_base_phys_addr);		/* set the initial rx descriptor	*/

	i = desc_base_phys_addr+MAX_DESC*sizeof(descriptor_t);
	write32(REG_TXDP, i);							/* set the initial tx descriptor	*/

	return B_OK;
}
Exemplo n.º 21
0
// init GATT (could be used for both PCI and AGP)
static status_t initGATT( GART_info *gart )
{
	area_id map_area;
	uint32 map_area_size;
	physical_entry *map;
	physical_entry PTB_map[1];
	size_t map_count;
	uint32 i;
	uint32 *gatt_entry;
	size_t num_pages;

	SHOW_FLOW0( 3, "" );

	num_pages = (gart->buffer.size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	// GART must be contiguous
	gart->GATT.area = create_area("Radeon GATT", (void **)&gart->GATT.ptr,
		B_ANY_KERNEL_ADDRESS,
		(num_pages * sizeof( uint32 ) + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1),
		B_32_BIT_CONTIGUOUS,
			// TODO: Physical address is cast to 32 bit below! Use B_CONTIGUOUS,
			// when that is (/can be) fixed!
#ifdef HAIKU_TARGET_PLATFORM_HAIKU
		// TODO: really user read/write?
		B_READ_AREA | B_WRITE_AREA | B_USER_CLONEABLE_AREA
#else
		0
#endif
		);

	if (gart->GATT.area < 0) {
		SHOW_ERROR(1, "cannot create GATT table (%s)",
			strerror(gart->GATT.area));
		return gart->GATT.area;
	}

	get_memory_map(gart->GATT.ptr, B_PAGE_SIZE, PTB_map, 1);
	gart->GATT.phys = PTB_map[0].address;

	SHOW_INFO(3, "GATT_ptr=%p, GATT_phys=%p", gart->GATT.ptr,
		(void *)gart->GATT.phys);

	// get address mapping
	memset(gart->GATT.ptr, 0, num_pages * sizeof(uint32));

	map_count = num_pages + 1;

	// align size to B_PAGE_SIZE
	map_area_size = map_count * sizeof(physical_entry);
	if ((map_area_size / B_PAGE_SIZE) * B_PAGE_SIZE != map_area_size)
		map_area_size = ((map_area_size / B_PAGE_SIZE) + 1) * B_PAGE_SIZE;

	// temporary area where we fill in the memory map (deleted below)
	map_area = create_area("pci_gart_map_area", (void **)&map, B_ANY_ADDRESS,
		map_area_size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA);
		// TODO: We actually have a working malloc() in the kernel. Why create
		// an area?
	dprintf("pci_gart_map_area: %ld\n", map_area);

	get_memory_map( gart->buffer.ptr, gart->buffer.size, map, map_count );

	// the following looks a bit strange as the kernel
	// combines successive entries
	gatt_entry = gart->GATT.ptr;

	for( i = 0; i < map_count; ++i ) {
		phys_addr_t addr = map[i].address;
		size_t size = map[i].size;

		if( size == 0 )
			break;

		while( size > 0 ) {
			*gatt_entry++ = addr;
			//SHOW_FLOW( 3, "%lx", *(gart_entry-1) );
			addr += ATI_PCIGART_PAGE_SIZE;
			size -= ATI_PCIGART_PAGE_SIZE;
		}
	}

	delete_area(map_area);

	if( i == map_count ) {
		// this case should never happen
		SHOW_ERROR0( 0, "memory map of GART buffer too large!" );
		delete_area( gart->GATT.area );
		gart->GATT.area = -1;
		return B_ERROR;
	}

	// this might be a bit more than needed, as
	// 1. Intel CPUs have "processor order", i.e. writes appear to external
	//    devices in program order, so a simple final write should be sufficient
	// 2. if it is a PCI GART, bus snooping should provide cache coherence
	// 3. this function is a no-op :(
	clear_caches( gart->GATT.ptr, num_pages * sizeof( uint32 ),
		B_FLUSH_DCACHE );

	// back to real live - some chipsets have write buffers that
	// proove all previous assumptions wrong
	// (don't know whether this really helps though)
	asm volatile ( "wbinvd" ::: "memory" );
	return B_OK;
}
Exemplo n.º 22
0
PhysicalMemoryAllocator::PhysicalMemoryAllocator(const char *name,
	size_t minSize, size_t maxSize, uint32 minCountPerBlock)
	:	fOverhead(0),
		fStatus(B_NO_INIT)
{
	fName = strdup(name);
	mutex_init_etc(&fLock, fName, MUTEX_FLAG_CLONE_NAME);

	fArrayCount = 1;
	size_t biggestSize = minSize;
	while (biggestSize < maxSize) {
		fArrayCount++;
		biggestSize *= 2;
	}

	size_t size = fArrayCount * sizeof(uint8 *);
	fArray = (uint8 **)malloc(size);
	fOverhead += size;

	size = fArrayCount * sizeof(size_t);
	fBlockSize = (size_t *)malloc(size);
	fArrayLength = (size_t *)malloc(size);
	fArrayOffset = (size_t *)malloc(size);
	fOverhead += size * 3;

	size_t arraySlots = biggestSize / minSize;
	for (int32 i = 0; i < fArrayCount; i++) {
		size = arraySlots * minCountPerBlock * sizeof(uint8);
		fArrayLength[i] = arraySlots * minCountPerBlock;
		fBlockSize[i] = biggestSize / arraySlots;
		fArrayOffset[i] = fArrayLength[i] - 1;

		fArray[i] = (uint8 *)malloc(size);
		memset(fArray[i], 0, fArrayLength[i]);

		fOverhead += size;
		arraySlots /= 2;
	}

	fManagedMemory = fBlockSize[0] * fArrayLength[0];

	size_t roundedSize = biggestSize * minCountPerBlock;
#ifdef HAIKU_TARGET_PLATFORM_HAIKU
	fDebugBase = roundedSize;
	fDebugChunkSize = 64;
	fDebugUseMap = 0;
	roundedSize += sizeof(fDebugUseMap) * 8 * fDebugChunkSize;
#endif
	roundedSize = (roundedSize + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	fArea = create_area(fName, &fLogicalBase, B_ANY_KERNEL_ADDRESS,
		roundedSize, B_32_BIT_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
		// TODO: Use B_CONTIGUOUS when the TODOs regarding 64 bit physical
		// addresses are fixed (if possible).
	if (fArea < B_OK) {
		TRACE_ERROR(("PMA: failed to create memory area\n"));
		return;
	}

	physical_entry physicalEntry;
	if (get_memory_map(fLogicalBase, roundedSize, &physicalEntry, 1) < B_OK) {
		TRACE_ERROR(("PMA: failed to get memory map\n"));
		return;
	}

	fPhysicalBase = physicalEntry.address;
	fStatus = B_OK;
}
Exemplo n.º 23
0
/*static*/ status_t
TracingMetaData::Create(TracingMetaData*& _metaData)
{
	// search meta data in memory (from previous session)
	area_id area;
	TracingMetaData* metaData;
	status_t error = _CreateMetaDataArea(true, area, metaData);
	if (error == B_OK) {
		if (metaData->_InitPreviousTracingData()) {
			_metaData = metaData;
			return B_OK;
		}

		dprintf("Found previous tracing meta data, but failed to init.\n");

		// invalidate the meta data
		metaData->fMagic1 = 0;
		metaData->fMagic2 = 0;
		metaData->fMagic3 = 0;
		delete_area(area);
	} else
		dprintf("No previous tracing meta data found.\n");

	// no previous tracing data found -- create new one
	error = _CreateMetaDataArea(false, area, metaData);
	if (error != B_OK)
		return error;

	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
		&virtualRestrictions, &physicalRestrictions,
		(void**)&metaData->fTraceOutputBuffer);
	if (area < 0)
		return area;

	// get the physical address
	physical_entry physicalEntry;
	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
			&physicalEntry, 1) == B_OK) {
		metaData->fPhysicalAddress = physicalEntry.address;
	} else {
		dprintf("TracingMetaData::Create(): failed to get physical address "
			"of tracing buffer\n");
		metaData->fPhysicalAddress = 0;
	}

	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
		+ kTraceOutputBufferSize);
	metaData->fFirstEntry = metaData->fBuffer;
	metaData->fAfterLastEntry = metaData->fBuffer;

	metaData->fEntries = 0;
	metaData->fEntriesEver = 0;
	B_INITIALIZE_SPINLOCK(&metaData->fLock);

	metaData->fMagic1 = kMetaDataMagic1;
	metaData->fMagic2 = kMetaDataMagic2;
	metaData->fMagic3 = kMetaDataMagic3;

	_metaData = metaData;
	return B_OK;
}
Exemplo n.º 24
0
Arquivo: 53c8xx.c Projeto: DonCN/haiku
/*
 * actually execute an io transaction via SCRIPTS
 * you MUST hold st->sem_targ before calling this
 *
 */
static void exec_io(SymTarg *st, void *cmd, int cmdlen, void *msg, int msglen,
				   void *data, int datalen, int sg)
{
	cpu_status former;
	Symbios *s = st->adapter;

	memcpy((void *) &(st->priv->device.count), st->device, 4);

	st->priv->sendmsg.count = LE(msglen);
	memcpy(st->priv->_sendmsg, msg, msglen);
	st->priv->command.count = LE(cmdlen);
	memcpy(st->priv->_command, cmd, cmdlen);

	st->table_phys = st->priv_phys + ADJUST_PRIV_TO_TABLE;

	if(datalen){
		int i,sgcount;
		uint32 opcode;
		SymInd *t = st->priv->table;
		physical_entry *pe = (physical_entry *) &(st->priv->table[1]);

		if(st->inbound){
			opcode = s->op_in;
			st->datain_phys = st->table_phys;
			st->dataout_phys = s->sram_phys + Ent_phase_dataerr;
		} else {
			opcode = s->op_out;
			st->dataout_phys = st->table_phys;
			st->datain_phys = s->sram_phys + Ent_phase_dataerr;
		}

		if(sg) {
			iovec *vec = (iovec *) data;
			for(sgcount=0,i=0;i<datalen;i++){
				get_memory_map(vec[i].iov_base, vec[i].iov_len, &pe[sgcount], 130-sgcount);
				while(pe[sgcount].size && (sgcount < 130)){
					t[sgcount].address = LE((uint32) pe[sgcount].address);
					t[sgcount].count = LE(opcode | pe[sgcount].size);
					sgcount++;
				}
				if((sgcount == 130) && pe[sgcount].size){
					panic("symbios: sg list overrun");
				}
			}
		} else {
			get_memory_map(data, datalen, pe, 130);
			for(i=0;pe[i].size;i++){
				t[i].address = LE((uint32) pe[i].address);
				t[i].count = LE(opcode | pe[i].size);
			}
			sgcount = i;
		}
		t[sgcount].count = LE(OP_END);
		t[sgcount].address = LE(ARG_END);

//		for(i=0;i<=sgcount;i++){
//			dprintf("sym: %04d - %08x %08x\n",i,t[i].address,t[i].count);
//		}
	} else {
		st->datain_phys = s->sram_phys + Ent_phase_dataerr;
		st->dataout_phys = s->sram_phys + Ent_phase_dataerr;
	}

//	dprintf("sym: pp = %08x  di = %08x  do = %08x\n",st->priv_phys,st->datain_phys,st->dataout_phys);

	st->status = status_queued;

/*	dprintf("symbios: enqueueing %02x %02x %02x ... for %d (%d bytes %s)\n",
			((uchar *)cmd)[0],((uchar *)cmd)[1],((uchar *)cmd)[2],
			st->device[2],datalen,st->inbound?"IN":"OUT");
*/
	former = disable_interrupts();
	acquire_spinlock(&(s->hwlock));

	/* enqueue the request */
	if(s->startqueuetail){
		s->startqueuetail->next = st;
	} else {
		s->startqueue = st;
	}
	st->next = NULL;
	s->startqueuetail = st;

	/* If the adapter is idle, signal it so that this request may be started */
	if(s->status == IDLE) outb(sym_istat, sym_istat_sigp);

	release_spinlock(&(s->hwlock));
	restore_interrupts(former);

	/* wait for completion */
	acquire_sem(st->sem_done);

#if 0
	if(acquire_sem_etc(st->sem_done, 1, B_TIMEOUT, 10*1000000) != B_OK){
		kprintf("sym: targ %d never finished,  argh...\n",st->device[2]);
		init_symbios(st->adapter,1);
		st->state = sTIMEOUT;
		return;
	}
#endif
}
Exemplo n.º 25
0
int main(int argc, char **argv)
{
	pid_t pid;
	int i = 0;
	int j = 0;
	int regions = 1;
	int state = 0;
	char *mem;
	mach_vm_address_t base;
	mach_vm_address_t past = 0;
	mach_vm_address_t addr;
	vm_region_t **vm_region_list;
	mach_port_t task;

	if (geteuid() != 0) {
		fprintf(stderr, "[-] are you root?\n");
		exit(1);
	}
	set_signal_handler();
	printf("[+] Looking for Self Service\n");
	pid = get_pid();
	if (!pid) {
		fprintf(stderr, "[-] Not found. exiting\n");
		exit(2);
	}
	printf("[+] Self Service found: %d\n", pid);

	task = attach(pid);
	printf("[+] ATTACHED TO PROCESS %d WITH TASK %d\n", pid, task);

	base = get_base_address(task);
	addr = base;
	while (regions) {
		vm_region_list = get_memory_map(task, addr, &regions);
		printf("[+] Found %d regions\n", regions);
		for (i = 0; i < regions; ++i) {
			if (past > vm_region_list[i]->address_start) {
				printf("\n[!] Looped around somehow, exiting gracefully\n\n");
				exit(256);
			}
			printf("[+] Region %d:%d;\n[+]\tType:\t%s\n[+]\tBase Address:\t%016llx\n[+]\tEnd Address:\t%016llx\n[+]\tSize:\t0x%llx (%lld bytes)\n[+]\tPermissions:\t%s \n",
			       j, i, user_tag_to_string(vm_region_list[i]->region_type), vm_region_list[i]->address_start, vm_region_list[i]->address_start + vm_region_list[i]->size,
			       vm_region_list[i]->size, vm_region_list[i]->size, get_protection(vm_region_list[i]->protection));
			if ((vm_region_list[i]->protection) & 1) {
				printf("[+]\tMaking Local Copy of Memory\n");
				mem = (char *)read_memory_allocate(task, vm_region_list[i]->address_start, vm_region_list[i]->size);
			} else {
				printf("[+]\t\tChanging memory permissions\n");
				state = vm_region_list[i]->protection;
				change_page_protection(task, vm_region_list[i]->address_start, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
				printf("[+]\t\tMaking Local Copy of Memory\n");
				mem = (char *)read_memory_allocate(task, vm_region_list[i]->address_start, vm_region_list[i]->size);
				printf("[+]\t\tChanging memory permissions back\n");
				change_page_protection(task, vm_region_list[i]->address_start, state);
			}
			printf("[+]\tSearching local copy for username and password string\n");
			checkit(mem, vm_region_list[i]->size);
			free(mem);
		}
		printf("[+] Completed searching %d regions\n", i);
		past = addr;
		addr += base;
		if (setjmp(buf)) {
			fprintf(stderr, "\n[!] Segfault at 0x%llX\n\n", addr);
			past = addr;
			addr = vm_region_list[i]->address_start + vm_region_list[i]->size;
		}
		j++;
	}
	printf("\n[+] Completed searching through allocated memory\n");
	return 0;

}
Exemplo n.º 26
0
// prepare DMA engine to copy data from graphics mem to other mem
static status_t Radeon_PrepareDMA( 
	device_info *di, uint32 src, char *target, size_t size, bool lock_mem, bool contiguous )
{
	physical_entry map[16];
	status_t res;
	DMA_descriptor *cur_desc;
	int num_desc;

	if( lock_mem && !contiguous ) {
		res = lock_memory( target, size, B_DMA_IO | B_READ_DEVICE );
	
		if( res != B_OK ) {
			SHOW_ERROR( 2, "Cannot lock memory (%s)", strerror( res ));
			return res;
		}
	}

	// adjust virtual address for graphics card
	src += di->si->memory[mt_local].virtual_addr_start;
	
	cur_desc = (DMA_descriptor *)(di->si->local_mem + di->dma_desc_offset);
	num_desc = 0;
	
	// memory may be fragmented, so we create S/G list
	while( size > 0 ) {
		int i;
	
		if( contiguous ) {
			// if memory is contiguous, ask for start address only to reduce work
			get_memory_map( target, 1, map, 16 );
			// replace received size with total size
			map[0].size = size;
		} else {
			get_memory_map( target, size, map, 16 );
		}
		
		for( i = 0; i < 16; ++i ) {
			uint32 address = (uint32)map[i].address;
			size_t contig_size = map[i].size;
			
			if( contig_size == 0 )
				break;
				
			target += contig_size;
				
			while( contig_size > 0 ) {
				size_t cur_size;
				
				cur_size = min( contig_size, RADEON_DMA_DESC_MAX_SIZE );
				
				if( ++num_desc > (int)di->dma_desc_max_num ) {
					SHOW_ERROR( 2, "Overflow of DMA descriptors, %ld bytes left", size );
					res = B_BAD_VALUE;
					goto err;
				}
				
				cur_desc->src_address = src;
				cur_desc->dest_address = address;
				cur_desc->command = cur_size;
				cur_desc->res = 0;
			
				++cur_desc;
				address += cur_size;
				contig_size -= cur_size;
				src += cur_size;
				size -= cur_size;
			}
		}
	}
	
	// mark last descriptor as being last one	
	(cur_desc - 1)->command |= RADEON_DMA_COMMAND_EOL;
	
	return B_OK;
	
err:
	if( lock_mem && !contiguous )
		unlock_memory( target, size, B_DMA_IO| B_READ_DEVICE );
		
	return res;
}
Exemplo n.º 27
0
Arquivo: 53c8xx.c Projeto: DonCN/haiku
/*
** Allocate the actual memory for the cardinfo object
*/
static Symbios *create_cardinfo(int num, pci_info *pi, int flags)
{
	char name[32];
	Symbios *s;
	int i,scf;
	area_id aid;
	uint32 stest2,stest4;

	if((pi->u.h0.interrupt_line == 0) || (pi->u.h0.interrupt_line > 128)) {
		return NULL; /* invalid IRQ */
	}

	if(!(s = (Symbios *) malloc(sizeof(Symbios)))) return NULL;

	s->num = num;
	s->iobase = pi->u.h0.base_registers[0];
	s->irq = pi->u.h0.interrupt_line;
	s->hwlock = 0;
	s->startqueue = NULL;
	s->startqueuetail = NULL;
	s->active = NULL;

	sprintf(name,"sym%d:sram",num);
	if(flags & symf_sram){
		unsigned char *c;
		s->sram_phys = pi->u.h0.base_registers[2];
		if((aid=map_physical_memory(name, s->sram_phys, 4096,
									 B_ANY_KERNEL_ADDRESS, B_READ_AREA + B_WRITE_AREA,
									 (void **) &(s->script))) < 0){
			free(s);
			return NULL;
		}
		/* memory io test */
		c = (unsigned char *) s->script;
		for(i=0;i<4096;i++) c[i] = (255 - (i & 0xff));
		for(i=0;i<4096;i++) {
			if(c[i] != (255 - (i & 0xff))) {
				d_printf("symbios%d: scripts ram io error @ %d\n",num,i);
				goto err;
			}
		}
	} else {
		uchar *a;
		physical_entry entries[2];
		aid = create_area(name, (void **)&a, B_ANY_KERNEL_ADDRESS, 4096*5,
			B_32_BIT_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
		if(aid == B_ERROR || aid == B_BAD_VALUE || aid == B_NO_MEMORY){
			free(s);
		    return NULL;
		}
		get_memory_map(a, 4096, entries, 2);
		s->sram_phys = (uint32) entries[0].address;
		s->script = (uint32 *) a;
	}

	d_printf("symbios%d: scripts ram @ 0x%08lx, mapped to 0x%08lx (%s)\n",
			 num, s->sram_phys, (uint32) s->script,
			 flags & symf_sram ? "onboard" : "offboard" );

	/* what are we set at now? */
	s->host_targ_id = inb(sym_scid) & 0x07;
	dprintf("symbios%ld: host id %ld\n",s->num,s->host_targ_id);

	s->host_targ_id = 7;  /* XXX figure this out somehow... */
	s->max_targ_id = (flags & symf_wide) ? 15 : 7;

	stest2 = inb(sym_stest2);
	stest4 = inb(sym_stest4);

	/* software reset */
	outb(sym_istat, sym_istat_srst);
	spin(10000);
	outb(sym_istat, 0);
	spin(10000);

	/* initiator mode, full arbitration */
	outb(sym_scntl0, sym_scntl0_arb0 | sym_scntl0_arb1);

	outb(sym_scntl1, 0);
	outb(sym_scntl2, 0);

	/* initiator id=7, respond to reselection */
	/* respond to reselect of id 7 */
	outb(sym_respid, id_bits[s->host_targ_id]);
	outb(sym_scid, sym_scid_rre | s->host_targ_id);

	outb(sym_dmode, 0);

	dprintf("symbios%ld: stest2 = 0x%02lx, stest4 = 0x%02lx\n",s->num,stest2,stest4);

	/* no differential, no loopback, no hiz, no always-wide, no filter, no lowlevel */
	outb(sym_stest2, 0); // save diff bit
	outb(sym_stest3, 0);

//	if(flags & symf_quadrupler){
//		outb(sym_stest4, sym_stest4_lvd);
//	}

	outb(sym_stest1, 0);    /* make sure clock doubler is OFF  */

	s->sclk = sym_readclock(s);
	dprintf("symbios%ld: clock is %ldKHz\n",s->num,s->sclk);

	if(flags & symf_doubler){
		/* if we have a doubler and we don't already have an 80MHz clock */
		if((s->sclk > 35000) && (s->sclk < 45000)){
			dprintf("symbios%ld: enabling clock doubler...\n",s->num);
			outb(sym_stest1, 0x08);  /* enable doubler */
			spin(200);                /* wait 20us      */
			outb(sym_stest3, 0xa0);  /* halt sclk, enable TolerANT*/
			outb(sym_scntl3, 0x05);  /* SCLK/4         */
			outb(sym_stest1, 0x0c);  /* engage doubler */
			outb(sym_stest3, 0x80);  /* reenable sclk, leave TolerANT on  */

			spin(3000);

			s->sclk = sym_readclock(s);
			dprintf("symbios%ld: clock is %ldKHz\n",s->num,s->sclk);
		}
	}
	if(flags & symf_quadrupler){
		if((s->sclk > 35000) && (s->sclk < 45000)){
			dprintf("symbios%ld: enabling clock quadrupler...\n",s->num);
			outb(sym_stest1, 0x08);  /* enable doubler */
			spin(200);                /* wait 20us      */
			outb(sym_stest3, 0xa0);  /* halt sclk, enable TolerANT*/
			outb(sym_scntl3, 0x05);  /* SCLK/4         */
			outb(sym_stest1, 0x0c);  /* engage doubler */
			outb(sym_stest3, 0x80);  /* reenable sclk, leave TolerANT on  */

			spin(3000);

			s->sclk = sym_readclock(s);
			dprintf("symbios%ld: clock is %ldKHz\n",s->num,s->sclk);
			s->sclk = 160000;
		}
	}
	outb(sym_stest3, 0x80);  /* leave TolerANT on  */

	scf = 0;
	/* set CCF / SCF according to specs */
	if(s->sclk < 25010) {
		dprintf("symbios%ld: unsupported clock frequency\n",s->num);
		goto err;  //		s->scntl3 = 0x01;
	} else if(s->sclk < 37510){
		dprintf("symbios%ld: unsupported clock frequency\n",s->num);
		goto err;  //		s->scntl3 = 0x02;
	} else if(s->sclk < 50010){
		/* 40MHz - divide by 1, 2 */
		scf = 0x10;
		s->scntl3 = 0x03;
	} else if(s->sclk < 75010){
		dprintf("symbios%ld: unsupported clock frequency\n",s->num);
		goto err; //		s->scntl3 = 0x04;
	} else if(s->sclk < 85000){
		/* 80 MHz - divide by 2, 4*/
		scf = 0x30;
		s->scntl3 = 0x05;
	} else {
		/* 160 MHz - divide by 4, 8 */
		scf = 0x50;
		s->scntl3 = 0x07;
	}


	s->maxoffset = (flags & symf_short) ? 8 : 15 ;
	s->syncsize = 0;

	if(scf == 0x50){
		/* calculate values for 160MHz clock */
		for(i=0;i<4;i++){
			s->syncinfo[s->syncsize].sxfer = i << 5;
			s->syncinfo[s->syncsize].scntl3 = s->scntl3 | 0x90; /* /2, Ultra2 */
			s->syncinfo[s->syncsize].period_ns = (625 * (i+4)) / 100;
			s->syncinfo[s->syncsize].period = 4 * (s->syncinfo[s->syncsize].period_ns / 4);
			s->syncsize++;
		}
	}

	if(scf >= 0x30){
		/* calculate values for 80MHz clock */
		for(i=0;i<4;i++){
			s->syncinfo[s->syncsize].sxfer = i << 5;
			if(scf == 0x30){
				s->syncinfo[s->syncsize].scntl3 = s->scntl3 | 0x90; /* /2, Ultra */
			} else {
				s->syncinfo[s->syncsize].scntl3 = s->scntl3 | 0xb0; /* /4, Ultra2 */
			}

			s->syncinfo[s->syncsize].period_ns = (125 * (i+4)) / 10;
			s->syncinfo[s->syncsize].period = 4 * (s->syncinfo[s->syncsize].period_ns / 4);
			s->syncsize++;
		}
	}

	/* calculate values for 40MHz clock */
	for(i=0;i<8;i++){
		s->syncinfo[s->syncsize].sxfer = i << 5;
		s->syncinfo[s->syncsize].scntl3 = s->scntl3 | scf;
		s->syncinfo[s->syncsize].period_ns = 25 * (i+4);
		s->syncinfo[s->syncsize].period = 4 * (s->syncinfo[s->syncsize].period_ns / 4);
		s->syncsize++;
	}

	for(i=0;i<s->syncsize;i++){
		dprintf("symbios%ld: syncinfo[%d] = { %02x, %02x, %d ns, %d ns }\n",
				s->num, i,
				s->syncinfo[i].sxfer, s->syncinfo[i].scntl3,
				s->syncinfo[i].period_ns, s->syncinfo[i].period);
	}

	for(i=0;i<16;i++){
		s->targ[i].id = i;
		s->targ[i].adapter = s;
		s->targ[i].wide = 0;
		s->targ[i].offset = 0;
		s->targ[i].status = status_inactive;

		if((i == s->host_targ_id) || (i > s->max_targ_id)){
			s->targ[i].flags = tf_ignore;
		} else {
			s->targ[i].flags = tf_ask_sync;
			if(flags & symf_wide) s->targ[i].flags |= tf_ask_wide;
//			s->targ[i].flags = 0;

			setparams(s->targ + i, 0, 0, 0);

			sprintf(name,"sym%ld:%02d:lock",s->num,i);
			s->targ[i].sem_targ = create_sem(1,name);

			sprintf(name,"sym%ld:%02d:done",s->num,i);
			s->targ[i].sem_done = create_sem(0,name);
		}
	}

	if(flags & symf_wide){
		s->idmask = 15;
		s->op_in = OP_WDATA_IN;
		s->op_out = OP_WDATA_OUT;
	} else {
		s->idmask = 7;
		s->op_in = OP_NDATA_IN;
		s->op_out = OP_NDATA_OUT;
	}

	reloc_script(s);
    return s;

err:
	free(s);
	delete_area(aid);
	return NULL;
}
Exemplo n.º 28
0
static long sim_execute_scsi_io(BusLogic *bl, CCB_HEADER *ccbh)
{
    CCB_SCSIIO *ccb;
    int cdb_len;
    BL_CCB32 *bl_ccb;
    BL_PRIV *priv;
    uint32 priv_phys;
    uint32 bl_ccb_phys;
    physical_entry entries[2];
    physical_entry *scratch;
    uint32 tmp;
    int i,t,req;

    ccb = (CCB_SCSIIO *) ccbh;

#ifdef DEBUG_BUSLOGIC
    req = atomic_add(&(bl->reqid),1);
#endif

    /* valid cdb len? */
    cdb_len = ccb->cam_cdb_len;
    if (cdb_len != 6 && cdb_len != 10 && cdb_len != 12) {
        ccb->cam_ch.cam_status = CAM_REQ_INVALID;
        return B_ERROR;
    }

    /* acquire a CCB32 block */
    acquire_sem(bl->ccb_count);

    /* protect the freelist and unchain the CCB32 from it */
    acquire_sem(bl->ccb_lock);
    bl_ccb = bl->first_ccb;
    bl->first_ccb = bl_ccb->next;
    release_sem(bl->ccb_lock);

    bl_ccb_phys = VirtToPhys(bl_ccb);


    /* get contiguous area for bl_ccb in the private data area */
    get_memory_map((void *)ccb->cam_sim_priv, 4096, entries, 2);

    priv_phys = (uint32) entries[0].address;
    priv = (BL_PRIV *) ccb->cam_sim_priv;

    /* copy over the CDB */
    if(ccb->cam_ch.cam_flags & CAM_CDB_POINTER) {
        memcpy(bl_ccb->cdb, ccb->cam_cdb_io.cam_cdb_ptr, cdb_len);
    } else {
        memcpy(bl_ccb->cdb, ccb->cam_cdb_io.cam_cdb_bytes, cdb_len);
    }

    /* fill out the ccb header */
    bl_ccb->direction = BL_CCB_DIR_DEFAULT;
    bl_ccb->length_cdb = cdb_len;
    bl_ccb->length_sense = ccb->cam_sense_len;
    bl_ccb->_reserved1 = bl_ccb->_reserved2 = 0;
    bl_ccb->target_id = ccb->cam_ch.cam_target_id;
    bl_ccb->lun_tag = ccb->cam_ch.cam_target_lun & 0x07;
    bl_ccb->ccb_control = 0;
    bl_ccb->link_id = 0;
    bl_ccb->link = 0;
    bl_ccb->sense = toLE(priv_phys);

    /* okay, this is really disgusting and could potentially
       break if physical_entry{} changes format... we use the
       sg list as a scratchpad.  Disgusting, but a start */

    scratch = (physical_entry *) priv->sg;


    if(ccb->cam_ch.cam_flags & CAM_SCATTER_VALID) {
        /* we're using scatter gather -- things just got trickier */
        iovec *iov = (iovec *) ccb->cam_data_ptr;
        int j,sgcount = 0;

        /* dprintf("buslogic: sg count = %d\n",ccb->cam_sglist_cnt);*/
        /* multiple entries, use SG */
        bl_ccb->opcode = BL_CCB_OP_INITIATE_RETLEN_SG;
        bl_ccb->data = toLE(priv_phys + 256);

        /* for each entry in the sglist we were given ... */
        for(t=0,i=0; i<ccb->cam_sglist_cnt; i++) {
            /* map it ... */
            get_memory_map(iov[i].iov_base, iov[i].iov_len, &(scratch[sgcount]),
                           MAX_SCATTER - sgcount);

            /* and make a bl sgentry for each chunk ... */
            for(j=sgcount; scratch[j].size && j<MAX_SCATTER; j++) {
                t += scratch[j].size;
                sgcount++;
                dt_printf("buslogic/%d: SG %03d - 0x%08x (%d)\n",req,
                          j, (uint32) scratch[j].address, scratch[j].size);

                tmp = priv->sg[j].length;
                priv->sg[j].length = toLE(priv->sg[j].phys);
                priv->sg[j].phys = toLE(tmp);
            }

            if(scratch[j].size) panic("egads! sgseg overrun in BusLogic SIM");
        }
        if(t != ccb->cam_dxfer_len) {
            dt_printf("buslogic/%d: error, %d != %d\n",req,t,ccb->cam_dxfer_len);
            ccb->cam_ch.cam_status = CAM_REQ_INVALID;

            /* put the CCB32 back on the freelist and release our lock */
            acquire_sem(bl->ccb_lock);
            bl_ccb->next = bl->first_ccb;
            bl->first_ccb = bl_ccb;
            release_sem(bl->ccb_lock);
            release_sem(bl->ccb_count);
            return B_ERROR;
        }
        /* total bytes in DataSegList */
        bl_ccb->length_data = toLE(sgcount * 8);
    } else {
        get_memory_map((void *)ccb->cam_data_ptr, ccb->cam_dxfer_len, scratch,
                       MAX_SCATTER);

        if(scratch[1].size) {
            /* multiple entries, use SG */
            bl_ccb->opcode = BL_CCB_OP_INITIATE_RETLEN_SG;
            bl_ccb->data = toLE(priv_phys + 256);
            for(t=0,i=0; scratch[i].size && i<MAX_SCATTER; i++) {
                t += scratch[i].size;
                dt_printf("buslogic/%d: SG %03d - 0x%08x (%d)\n",req,
                          i, (uint32) scratch[i].address, scratch[i].size);

                tmp = priv->sg[i].length;
                priv->sg[i].length = toLE(priv->sg[i].phys);
                priv->sg[i].phys = toLE(tmp);
            }
            if(t != ccb->cam_dxfer_len) {
                dt_printf("buslogic/%d: error, %d != %d\n",req,t,ccb->cam_dxfer_len);
                ccb->cam_ch.cam_status = CAM_REQ_INVALID;

                /* put the CCB32 back on the freelist and release our lock */
                acquire_sem(bl->ccb_lock);
                bl_ccb->next = bl->first_ccb;
                bl->first_ccb = bl_ccb;
                release_sem(bl->ccb_lock);
                release_sem(bl->ccb_count);
                return B_ERROR;
            }
            /* total bytes in DataSegList */
            bl_ccb->length_data = toLE(i * 8);

        } else {
            bl_ccb->opcode = BL_CCB_OP_INITIATE_RETLEN;
            /* single entry, use direct */
            t = bl_ccb->length_data = toLE(ccb->cam_dxfer_len);
            bl_ccb->data = toLE((uint32) scratch[0].address);
        }
    }

    dt_printf("buslogic/%d: targ %d, dxfr %d, scsi op = 0x%02x\n",req,
              bl_ccb->target_id, t, bl_ccb->cdb[0]);

    acquire_sem(bl->hw_lock);

    /* check for box in use state XXX */
    bl->out_boxes[bl->out_nextbox].ccb_phys = toLE(bl_ccb_phys);
    bl->out_boxes[bl->out_nextbox].action_code = BL_ActionCode_Start;
    bl->out_nextbox++;
    if(bl->out_nextbox == bl->box_count) bl->out_nextbox = 0;
    outb(BL_COMMAND_REG, 0x02);

#ifndef SERIALIZE_REQS
    release_sem(bl->hw_lock);
#endif
    /*    d_printf("buslogic/%d: CCB %08x (%08xv) waiting on done\n",
    	  req, bl_ccb_phys, (uint32) bl_ccb);*/
    acquire_sem(bl_ccb->done);
    /*    d_printf("buslogic/%d: CCB %08x (%08xv) done\n",
    	  req, bl_ccb_phys, (uint32) bl_ccb);*/

#ifdef SERIALIZE_REQS
    release_sem(bl->hw_lock);
#endif

    if(bl_ccb->btstat) {
        /* XXX - error state xlat goes here */
        switch(bl_ccb->btstat) {
        case 0x11:
            ccb->cam_ch.cam_status = CAM_SEL_TIMEOUT;
            break;
        case 0x12:
            ccb->cam_ch.cam_status = CAM_DATA_RUN_ERR;
            break;
        case 0x13:
            ccb->cam_ch.cam_status = CAM_UNEXP_BUSFREE;
            break;
        case 0x22:
        case 0x23:
            ccb->cam_ch.cam_status = CAM_SCSI_BUS_RESET;
            break;
        case 0x34:
            ccb->cam_ch.cam_status = CAM_UNCOR_PARITY;
            break;
        default:
            ccb->cam_ch.cam_status = CAM_REQ_INVALID;
        }
        dt_printf("buslogic/%d: error stat %02x\n",req,bl_ccb->btstat);
    } else {
        dt_printf("buslogic/%d: data %d/%d, sense %d/%d\n", req,
                  bl_ccb->length_data, ccb->cam_dxfer_len,
                  bl_ccb->length_sense, ccb->cam_sense_len);

        ccb->cam_resid = bl_ccb->length_data;

        /* under what condition should we do this? */
        memcpy(ccb->cam_sense_ptr, priv->sensedata, ccb->cam_sense_len);

        ccb->cam_scsi_status = bl_ccb->sdstat;

        if(bl_ccb->sdstat == 02) {
            ccb->cam_ch.cam_status = CAM_REQ_CMP_ERR | CAM_AUTOSNS_VALID;
            ccb->cam_sense_resid = 0;
            dt_printf("buslogic/%d: error scsi\n",req);
        } else {
            ccb->cam_ch.cam_status = CAM_REQ_CMP;
            ccb->cam_sense_resid = bl_ccb->length_sense;
            dt_printf("buslogic/%d: success scsi\n",req);
            /* put the CCB32 back on the freelist and release our lock */
            acquire_sem(bl->ccb_lock);
            bl_ccb->next = bl->first_ccb;
            bl->first_ccb = bl_ccb;
            release_sem(bl->ccb_lock);
            release_sem(bl->ccb_count);
            return 0;

        }
    }

    /* put the CCB32 back on the freelist and release our lock */
    acquire_sem(bl->ccb_lock);
    bl_ccb->next = bl->first_ccb;
    bl->first_ccb = bl_ccb;
    release_sem(bl->ccb_lock);
    release_sem(bl->ccb_count);
    return B_ERROR;
}
Exemplo n.º 29
0
Arquivo: 53c8xx.c Projeto: DonCN/haiku
/* Convert a CCB_SCSIIO into a BL_CCB32 and (possibly SG array).
**
*/
static long sim_execute_scsi_io(Symbios *s, CCB_HEADER *ccbh)
{
	CCB_SCSIIO *ccb = (CCB_SCSIIO *) ccbh;
	uchar *cdb;
	physical_entry pe[2];
	SymTarg *targ;
	uchar msg[8];

	targ = s->targ + ccb->cam_ch.cam_target_id;

	if(targ->flags & tf_ignore){
		ccbh->cam_status = CAM_SEL_TIMEOUT;
		return B_OK;
	}

	if(ccb->cam_ch.cam_flags & CAM_CDB_POINTER) {
		cdb = ccb->cam_cdb_io.cam_cdb_ptr;
	} else {
		cdb = ccb->cam_cdb_io.cam_cdb_bytes;
	}

	get_memory_map((void*) (ccb->cam_sim_priv), 1536, pe, 2);

	/* identify message */
	msg[0] = 0xC0 | (ccb->cam_ch.cam_target_lun & 0x07);

	/* fill out table */
	prep_io((SymPriv *) ccb->cam_sim_priv, (uint32) pe[0].address);

	/* insure only one transaction at a time for any given target */
	acquire_sem(targ->sem_targ);

	targ->priv = (SymPriv *) ccb->cam_sim_priv;;
	targ->priv_phys = (uint32 ) pe[0].address;

	targ->inbound = (ccb->cam_ch.cam_flags & CAM_DIR_IN) ? 1 : 0;

	if(ccb->cam_ch.cam_flags & CAM_SCATTER_VALID){
		exec_io(targ, cdb, ccb->cam_cdb_len, msg, 1,
				ccb->cam_data_ptr, ccb->cam_sglist_cnt, 1);
	} else {
		exec_io(targ, cdb, ccb->cam_cdb_len, msg, 1,
				ccb->cam_data_ptr, ccb->cam_dxfer_len, 0);
	}

/*	dprintf("symbios%d: state = 0x%02x, status = 0x%02x\n",
			s->num,targ->state,targ->priv->status[0]);*/

	/* decode status */
	switch(targ->status){
	case status_complete:
		if((ccb->cam_scsi_status=targ->priv->_status[0]) != 0) {
			ccbh->cam_status = CAM_REQ_CMP_ERR;

			/* nonzero status is an error ... 0x02 = check condition */
			if((ccb->cam_scsi_status == 0x02) &&
			   !(ccb->cam_ch.cam_flags & CAM_DIS_AUTOSENSE) &&
			   ccb->cam_sense_ptr && ccb->cam_sense_len){
				   uchar command[6];

				   command[0] = 0x03;		/* request_sense */
				   command[1] = ccb->cam_ch.cam_target_lun << 5;
				   command[2] = 0;
				   command[3] = 0;
				   command[4] = ccb->cam_sense_len;
				   command[5] = 0;

				   targ->inbound = 1;
				   exec_io(targ, command, 6, msg, 1,
						   ccb->cam_sense_ptr, ccb->cam_sense_len, 0);

				   if(targ->priv->_status[0]){
					   ccb->cam_ch.cam_status |= CAM_AUTOSENSE_FAIL;
				   } else {
					   ccb->cam_ch.cam_status |= CAM_AUTOSNS_VALID;
				   }
			}
		} else {
			ccbh->cam_status = CAM_REQ_CMP;

			if(cdb[0] == 0x12) {
				/* inquiry just succeeded ... is it non SG and with enough data to
				   snoop the support bits? */
				if(!(ccb->cam_ch.cam_flags & CAM_SCATTER_VALID) && (ccb->cam_dxfer_len>7)){
					negotiator(s, targ, ccb->cam_data_ptr, msg);
				}
			}
		}
		break;

	case status_timeout:
		ccbh->cam_status = CAM_SEL_TIMEOUT;
		break;

	default: // XXX
		ccbh->cam_status = CAM_SEL_TIMEOUT;
	}

	targ->status = status_inactive;
//	dprintf("symbios%d: releasing targ @ 0x%08x\n",s->num,targ);
	release_sem(targ->sem_targ);
	return B_OK;
}
Exemplo n.º 30
0
/*
** Allocate the actual memory for the cardinfo object
*/
static BusLogic *create_cardinfo(int num, int iobase, int irq)
{
    uchar *a;
    area_id aid;
    int i;
    physical_entry entries[5];
    char name[9] = { 'b', 'l', '_', 'c', 'c', 'b', '0', '0', 0 };

    BusLogic *bl = (BusLogic *) malloc(sizeof(BusLogic));

#ifndef __INTEL__
    i = map_physical_memory("bl_regs",(void*) iobase,  4096,
                            B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, &a);
    iobase = (uint32) a;
    if(i < 0) {
        dprintf("buslogic: can't map registers...\n");
    }
#endif

    bl->id = num;
    bl->iobase = iobase;
    bl->irq = irq;
    bl->out_nextbox = bl->in_nextbox = 0;

    /* create our 20k workspace.  First 4k goes to 510 mailboxes.
    ** remaining 16k is used for up to 256 CCB32's
    */
    a = NULL;

#if BOOT
    /* life in the bootstrap is a bit different... */
    /* can't be sure of getting contig pages -- scale
       stuff down so we can live in just one page */
    bl->box_count = 4;
    if(!(a = malloc(4096*2))) {
        free(bl);
        return NULL;
    }
    a = (uchar *) ((((uint32) a) & 0xFFFFF000) + 0x1000);
    get_memory_map(a, 4096, entries, 2);
#else
    bl->box_count = MAX_CCB_COUNT;
    aid = create_area("bl_workspace", (void **)&a, B_ANY_KERNEL_ADDRESS, 4096*5,
                      B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
    if(aid == B_ERROR || aid == B_BAD_VALUE || aid == B_NO_MEMORY) {
        free(bl);
        return NULL;
    }
    get_memory_map(a, 4096*5, entries, 2);
#endif

    /* figure virtual <-> physical translations */
    bl->phys_to_virt = ((uint) a) - ((uint) entries[0].address);
    bl->virt_to_phys = (((uint) entries[0].address - (uint) a));
    bl->phys_mailboxes = (uint) entries[0].address;

    /* initialize all mailboxes to empty */
    bl->out_boxes = (BL_Out_Mailbox32 *) a;
    bl->in_boxes = (BL_In_Mailbox32 *) (a + (8 * bl->box_count));
    for(i=0; i<bl->box_count; i++) {
        bl->out_boxes[i].action_code = BL_ActionCode_NotInUse;
        bl->in_boxes[i].completion_code = BL_CompletionCode_NotInUse;
    }

    /* setup the CCB32 cache */
#if BOOT
    bl->ccb = (BL_CCB32 *) (((uchar *)a) + 1024);
#else
    bl->ccb = (BL_CCB32 *) (((uchar *)a) + 4096);
#endif
    bl->first_ccb = NULL;
    for(i=0; i<bl->box_count; i++) {
        name[6] = hextab[(i & 0xF0) >> 4];
        name[7] = hextab[i & 0x0F];
        bl->ccb[i].done = create_sem(0, name);
        bl->ccb[i].next = bl->first_ccb;
        bl->first_ccb = &(bl->ccb[i]);
    }

    bl->hw_lock = create_sem(1, "bl_hw_lock");
    bl->ccb_lock = create_sem(1, "bl_ccb_lock");
    bl->ccb_count = create_sem(MAX_CCB_COUNT, "bl_ccb_count");
    bl->reqid = 0;

    return bl;
}