Beispiel #1
0
Status	FF_Controller::Initialize(FF_Interface *p_flash)
{
 	TRACE_ENTRY(FF_Controller::Initialize);
 	
	LIST_INITIALIZE(&m_context_list_wait_buss);

	m_buss_context = 0;
	m_p_flash = p_flash;
	
 	// Initialize lists.
	for (U32 unit_index = 0; unit_index < Flash_Address::Num_Units(); unit_index++)
 	{
		LIST_INITIALIZE(&m_context_list_wait_unit[unit_index]);
		m_p_controller_context[unit_index] = 0;
 	}
 	
	// Can units overlap?
	// With the SSD, units can overlap operations.  It is possible to
	// erase unit 0 while unit 1 is performing another operation.
	// With the HBC, only one unit can be active at any one time.
	if (Flash_Address::Banks_Per_Array() == 1)
		m_can_units_overlap = 0;
	else
		m_can_units_overlap = 1;

	return OK;

} // FF_Controller::Initialize
Beispiel #2
0
/** Take action based on the interrupt cause.
 *
 * @param[in] instance UHCI structure to use.
 * @param[in] status Value of the status register at the time of interrupt.
 *
 * Interrupt might indicate:
 * - transaction completed, either by triggering IOC, SPD, or an error
 * - some kind of device error
 * - resume from suspend state (not implemented)
 */
void hc_interrupt(hc_t *instance, uint16_t status)
{
	assert(instance);
	/* Lower 2 bits are transaction error and transaction complete */
	if (status & (UHCI_STATUS_INTERRUPT | UHCI_STATUS_ERROR_INTERRUPT)) {
		LIST_INITIALIZE(done);
		transfer_list_remove_finished(
		    &instance->transfers_interrupt, &done);
		transfer_list_remove_finished(
		    &instance->transfers_control_slow, &done);
		transfer_list_remove_finished(
		    &instance->transfers_control_full, &done);
		transfer_list_remove_finished(
		    &instance->transfers_bulk_full, &done);

		while (!list_empty(&done)) {
			link_t *item = list_first(&done);
			list_remove(item);
			uhci_transfer_batch_t *batch =
			    uhci_transfer_batch_from_link(item);
			uhci_transfer_batch_finish_dispose(batch);
		}
	}
	/* Resume interrupts are not supported */
	if (status & UHCI_STATUS_RESUME) {
		usb_log_error("Resume interrupt!\n");
	}

	/* Bits 4 and 5 indicate hc error */
	if (status & (UHCI_STATUS_PROCESS_ERROR | UHCI_STATUS_SYSTEM_ERROR)) {
		usb_log_error("UHCI hardware failure!.\n");
		++instance->hw_failures;
		transfer_list_abort_all(&instance->transfers_interrupt);
		transfer_list_abort_all(&instance->transfers_control_slow);
		transfer_list_abort_all(&instance->transfers_control_full);
		transfer_list_abort_all(&instance->transfers_bulk_full);

		if (instance->hw_failures < UHCI_ALLOWED_HW_FAIL) {
			/* reinitialize hw, this triggers virtual disconnect*/
			hc_init_hw(instance);
		} else {
			usb_log_fatal("Too many UHCI hardware failures!.\n");
			hc_fini(instance);
		}
	}
}
Beispiel #3
0
CM_Frame_Table * CM_Frame_Table::Allocate(
		CM_Mem					*p_mem, 
		CM_CONFIG				*p_config, 
		CM_PREFETCH_CALLBACK	*p_prefetch_callback,
		CM_WRITE_CALLBACK		*p_write_callback,
		CM_Stats				*p_stats,
		void					*p_callback_context,
		U32						 num_page_frames,
		void					*p_page_memory)
{
	// Allocate CM_Frame_Table object
	CM_Frame_Table *p_frame_table = 
		(CM_Frame_Table *)p_mem->Allocate(sizeof(CM_Frame_Table));

	if (p_frame_table == 0)
		return 0;
	
	// Initialize table lists.
	LIST_INITIALIZE(&p_frame_table->m_list_waiting_to_flush);
	LIST_INITIALIZE(&p_frame_table->m_list_wait_frame);

	// Initialize each of our dummy frames.  Each of our frame lists
	// is a dummy frame so that, when the last frame in the list points
	// to the head of the list, we can treat it as a frame without
	// having to check to see if we are pointing to the head of the list.
	p_frame_table->m_clock_list.Initialize(0, CM_PAGE_STATE_CLEAN);
	p_frame_table->m_dirty_clock_list.Initialize(0, CM_PAGE_STATE_DIRTY);

	// Save callbacks.
	p_frame_table->m_p_prefetch_callback = p_prefetch_callback;
	p_frame_table->m_p_write_callback = p_write_callback;
	p_frame_table->m_p_callback_context = p_callback_context;

	// Make sure the page size is a multiple of 8 for alignment.
	p_frame_table->m_page_size = ((p_config->page_size + 7) / 8) * 8;

	p_frame_table->m_num_pages_replaceable = 0;
	p_frame_table->m_num_pages_clock = 0;
	p_frame_table->m_num_pages_dirty_clock = 0;
	p_frame_table->m_num_pages_being_written = 0;
	p_frame_table->m_num_pages_working_set = 0;
	p_frame_table->m_p_clock_frame = 0;
	p_frame_table->m_p_dirty_clock_frame = 0;

	// Find out how much memory is available.
	// Leave this for debugging.  Should be close to zero.
	U32 memory_available = p_mem->Memory_Available();

	// Save number of page frames.
	p_frame_table->m_num_page_frames = num_page_frames;

    if (p_config->page_table_size)
	{
		// Linear mapping
		// Don't allocate more pages than specified in the config.
		if (p_frame_table->m_num_page_frames > p_config->page_table_size)
			p_frame_table->m_num_page_frames = p_config->page_table_size;
	}

	// Allocate array of page frames
	p_frame_table->m_p_page_frame_array = (char *)p_page_memory;

	// Allocate array of frames
	p_frame_table->m_p_frame_array = 
		(CM_Frame *)p_mem->Allocate(sizeof(CM_Frame) * p_frame_table->m_num_page_frames);
	if (p_frame_table->m_p_frame_array == 0)
		return 0;

	// When m_p_clock_frame is zero, we know that the frame table is not yet initialized.
	p_frame_table->m_p_clock_frame = 0;

	// Initialize each frame
    CM_Frame *p_frame;
	for (U32 index = 0; index < p_frame_table->m_num_page_frames; index++)
	{
		// Point to the next frame.
		p_frame = p_frame_table->Get_Frame(index + 1);

		// Have the frame initialize itself.
		p_frame->Initialize(index + 1, CM_PAGE_STATE_CLEAN);
		CT_ASSERT((p_frame_table->Get_Frame(index + 1) == p_frame), CM_Frame_Table::Allocate);
		CT_ASSERT((p_frame->Get_Frame_Index() == (index + 1)), CM_Frame_Table::Allocate);

		// Make sure the list object is first in the frame.
		CT_ASSERT(((CM_Frame *)&p_frame->m_list == p_frame), CM_Frame_Table::Allocate);
		CT_ASSERT((p_frame->Is_Replaceable()), CM_Frame_Table::Allocate);

		// Initially, each frame is on the clock list
		LIST_INSERT_TAIL(&p_frame_table->m_clock_list.m_list, &p_frame->m_list);
		p_frame_table->m_num_pages_clock++;
		p_frame_table->m_num_pages_replaceable++;
	}

	// Initialize the clocks
	p_frame_table->m_p_clock_frame = p_frame;
	p_frame_table->m_p_dirty_clock_frame = &p_frame_table->m_dirty_clock_list;

	// Calculate dirty page writeback threshold from the percentage in the config file.
	p_frame_table->m_dirty_page_writeback_threshold = 
		(p_frame_table->m_num_page_frames * p_config->dirty_page_writeback_threshold)
		/ 100;

	// Calculate dirty page error threshold from the percentage in the config file.
	p_frame_table->m_dirty_page_error_threshold = (p_frame_table->m_num_page_frames 
		* p_config->dirty_page_error_threshold)
		/ 100;

	// Make sure the number of reserve pages is less than the number of pages in the cache.
	// This would only happen in a test environment where the cache size is small.
	p_frame_table->m_num_reserve_pages = p_config->num_reserve_pages;
	if (p_frame_table->m_num_reserve_pages > p_frame_table->m_num_pages_replaceable)

		// Make number of reserve pages less than number of pages in the cache.
		p_frame_table->m_num_reserve_pages = 
			p_frame_table->m_num_pages_replaceable - (p_frame_table->m_num_pages_replaceable / 2);

	// Calculate the maximum number of dirty pages.
	U32 max_number_dirty_pages = p_frame_table->m_num_pages_replaceable
		- p_frame_table->m_num_reserve_pages;

	// Make sure the dirty page error threshold is less than the maximum number of dirty pages;
	// otherwise we will have a context waiting for a page to be cleaned, and it will 
	// never happen.  This would only occur in a test situation, where cache size is small.
	if (p_frame_table->m_dirty_page_error_threshold > max_number_dirty_pages)

		// Make dirty page error threshold less than max_number_dirty_pages.
		p_frame_table->m_dirty_page_error_threshold = 
			max_number_dirty_pages - (max_number_dirty_pages / 2);

	// Make sure the writeback threshold is less than the dirty page error threshold.
	if (p_frame_table->m_dirty_page_writeback_threshold > p_frame_table->m_dirty_page_error_threshold)

		// Make dirty page writeback threshold less than dirty page error threshold.
		p_frame_table->m_dirty_page_writeback_threshold = 
			p_frame_table->m_dirty_page_error_threshold 
				- (p_frame_table->m_dirty_page_error_threshold / 2);

	// Initialize pointer to statistics object
	p_frame_table->m_p_stats = p_stats;

#ifdef _WINDOWS

	// Initialize Windows mutex
	p_frame_table->m_mutex = CreateMutex(
		NULL, // LPSECURITY_ATTRIBUTES lpEventAttributes,	// pointer to security attributes
		
		// If TRUE, the calling thread requests immediate ownership of the mutex object. 
		// Otherwise, the mutex is not owned. 
		0, // flag for initial ownership 
		// If lpName is NULL, the event object is created without a name. 
		NULL // LPCTSTR lpName 	// pointer to semaphore-object name  
	   );

	if (p_frame_table->m_mutex == 0)
	{
		DWORD erc = GetLastError();
		CT_Log_Error(CT_ERROR_TYPE_FATAL,
			"CM_Frame_Table::Allocate", 
			"CreateMutex failed",
			erc,
			0);
		return p_frame_table;
	}
#else
#ifdef THREADX
	// Initialize Threadx semaphore
	Status	status = tx_semaphore_create(&p_frame_table->m_mutex, "CmMutex",
		1); // initial count
#else
	// Initialize Nucleus semaphore
	Status	status = NU_Create_Semaphore(&p_frame_table->m_mutex, "CmMutex",
		1, // initial count
		NU_FIFO);
#endif

	if (status != OK)
	{
		CT_Log_Error(CT_ERROR_TYPE_FATAL,
			"CM_Frame_Table::Allocate", 
			"NU_Create_Semaphore failed",
			status,
			0);
		return p_frame_table;
	}
#endif

#ifdef _DEBUG
	Got_Mutex = 0;
#endif

	// Return pointer to table object
	return p_frame_table;

} // CM_Frame_Table::Allocate