u8 wmp_high_fsm_slots_handler_free_lru_slot()
{
	u8 k;
	u8 index;
	u8 lru_idx = 0;
	u64 mintimestamp = 0xFFFFFFFFFFFFFFFFL;
	struct wmp_common_sw_reg *container_cmn = wmp_common_sw_reg_get_container();
	struct wmp_fsm *wmp_fsm = wmp_common_sw_reg_get_fsm(container_cmn);

	for (k = 0; k < WMP_FSM_BUFFER_MUTEX_N; k ++) {
		index = ((wmp_high_fsm_slots_handler_last_written + k + 1) % WMP_FSM_BUFFER_MUTEX_N);
		if (!XMutex_IsLocked(&(wmp_fsm->fsm_mutex), index)) {
			if (slot_info[index].timestamp < mintimestamp) {
				mintimestamp = slot_info[index].timestamp;
				lru_idx = index;
			}
		}
	}

	slot_info[index].used = 0;
	slot_info[index].timestamp = 0;
	slot_info[index].id = 0xFFFF;

	return lru_idx;
}
/*
 * For now we look for the first unused slot starting
 * from wmp_high_fsm_slots_handler_last_written.
 *
 * Extend this function to implement a more
 * complex logic.
 */
u8 wmp_high_fsm_slots_handler_next_slot()
{
	u8 k;
	struct wmp_common_sw_reg *container_cmn = wmp_common_sw_reg_get_container();
	struct wmp_fsm *wmp_fsm = wmp_common_sw_reg_get_fsm(container_cmn);

	for (k = 0; k < WMP_FSM_BUFFER_MUTEX_N; k ++) {
		if (!XMutex_IsLocked(&(wmp_fsm->fsm_mutex),
				((wmp_high_fsm_slots_handler_last_written + k + 1) % WMP_FSM_BUFFER_MUTEX_N)))
			break;
	}

	return (u8)((wmp_high_fsm_slots_handler_last_written + k + 1) % WMP_FSM_BUFFER_MUTEX_N);
}
/**
*
* Selftest a particular Mutex hardware core.
*
* @param	InstancePtr is a pointer to the XMutex instance to be worked on.
*
* @return
*		- XST_SUCCESS if test was successful.
*		- XST_FAILURE if test was not successful.
*
* @note
*
* This test is destructive. It will fail if the Mutex is currently being used.
* This is also a blocking call, if there is another process which has the
* Mutex, the first _lock will hand the test until the other process releases
* it.
*
******************************************************************************/
int XMutex_SelfTest(XMutex *InstancePtr)
{
	int Status;
	u32 Locked;
	u32 Owner;
	int Index;

	for (Index = 0; Index < InstancePtr->Config.NumMutex; Index++) {

		/* Lock Mutex blocking call*/
		XMutex_Lock(InstancePtr, Index);

		/* Get Status and verify if Status matches */
		XMutex_GetStatus(InstancePtr, Index, &Locked, &Owner);
		if (Owner != XPAR_CPU_ID) {
			return XST_FAILURE;
		}

		if (Locked != LOCKED_BIT) {
			return XST_FAILURE;
		}

		/* Verify that the Mutex is locked */
		if (XMutex_IsLocked(InstancePtr, Index) != TRUE) {
			return XST_FAILURE;
		}

		/* Unlock Mutex */
		Status = XMutex_Unlock(InstancePtr, Index);
		if (Status != XST_SUCCESS) {
			return Status;
		}

		/* Get Status and verify if Status matches */
		XMutex_GetStatus(InstancePtr, Index, &Locked, &Owner);
		if (Owner != 0) {
			return XST_FAILURE;
		}

		if (Locked != 0) {
			return XST_FAILURE;
		}
	}

	return XST_SUCCESS;
}
u8 wmp_high_fsm_slots_handler_first_not_used_slot()
{
	u8 k;
	u8 index;
	struct wmp_common_sw_reg *container_cmn = wmp_common_sw_reg_get_container();
	struct wmp_fsm *wmp_fsm = wmp_common_sw_reg_get_fsm(container_cmn);

	for (k = 0; k < WMP_FSM_BUFFER_MUTEX_N; k ++) {
		index = ((wmp_high_fsm_slots_handler_last_written + k + 1) % WMP_FSM_BUFFER_MUTEX_N);
		if (!XMutex_IsLocked(&(wmp_fsm->fsm_mutex), index) && !slot_info[index].used)
			break;
	}

	if (k == WMP_FSM_BUFFER_MUTEX_N) {
		return wmp_high_fsm_slots_handler_free_lru_slot();
	}

	return (u8)((wmp_high_fsm_slots_handler_last_written + k + 1) % WMP_FSM_BUFFER_MUTEX_N);
}
u8 wmp_high_fsm_slots_handler_delete_slot(u16 id)
{
	u8 k;
	u8 index;
	struct wmp_common_sw_reg *container_cmn = wmp_common_sw_reg_get_container();
	struct wmp_fsm *wmp_fsm = wmp_common_sw_reg_get_fsm(container_cmn);

	for (k = 0; k < WMP_FSM_BUFFER_MUTEX_N; k ++) {
		index = ((wmp_high_fsm_slots_handler_last_written + k + 1) % WMP_FSM_BUFFER_MUTEX_N);
		if (!XMutex_IsLocked(&(wmp_fsm->fsm_mutex), index) && (slot_info[index].id == id))
			break;
	}

	if (k == WMP_FSM_BUFFER_MUTEX_N) {
		return 0xFF;
	}

	slot_info[index].used = 0;
	slot_info[index].timestamp = 0;
	slot_info[index].id = 0xFFFF;

	return index;
}
u8 wmp_high_fsm_slots_handler_is_fsm_currently_running(u16 id)
{
	u8 k;
	u8 index;
	struct wmp_common_sw_reg *container_cmn = wmp_common_sw_reg_get_container();
	struct wmp_fsm *wmp_fsm = wmp_common_sw_reg_get_fsm(container_cmn);

	for (k = 0; k < WMP_FSM_BUFFER_MUTEX_N; k ++) {
		index = ((wmp_high_fsm_slots_handler_last_written + k + 1) % WMP_FSM_BUFFER_MUTEX_N);
		if (slot_info[index].id == id) {
			break;
		}
	}

	if (k == WMP_FSM_BUFFER_MUTEX_N) {
		return 0;
	}

	if (!XMutex_IsLocked(&(wmp_fsm->fsm_mutex), index)) {
		return 0;
	}

	return 1;
}