Beispiel #1
0
void
sal_udelay(uint32 usec)
{
    static volatile int _sal_udelay_counter;
    static int loops = 0;
    int ix, iy;

    if (loops == 0 || usec == 0) {      /* Need calibration? */
        int max_loops;
        int start = 0, stop = 0;
        int mpt = USECS_PER_JIFFY;      /* usec/tick */

        for (loops = 1; loops < 0x1000 && stop == start; loops <<= 1) {
            /* Wait for clock turn over */
            for (stop = start = jiffies; start == stop; start = jiffies) {
                /* Empty */
            }
            sal_udelay(mpt);    /* Single recursion */
            stop = jiffies;
        }

        max_loops = loops / 2;  /* Loop above overshoots */

        start = stop = 0;

        if (loops < 4) {
            loops = 4;
        }

        for (loops /= 4; loops < max_loops && stop == start; loops++) {
            /* Wait for clock turn over */
            for (stop = start = jiffies; start == stop; start = jiffies) {
                /* Empty */
            }
            sal_udelay(mpt);    /* Single recursion */
            stop = jiffies;
        }
    }
   
    for (iy = 0; iy < usec; iy++) {
        for (ix = 0; ix < loops; ix++) {
            _sal_udelay_counter++;      /* Prevent optimizations */
        }
    }
}
Beispiel #2
0
int
soc_tightdelay_timeout_check(soc_timeout_t *to)
{
    if (++to->polls >= to->min_polls) {
	if (to->min_polls >= 0) {
	    /*
	     * Just exceeded min_polls; calculate expiration time by
	     * consulting O/S real time clock.
	     */

	    to->min_polls = -1;
	    to->expire = SAL_USECS_ADD(sal_time_usecs(), to->usec);
	    to->exp_delay = 1;
	}
        else if (to->expire < SOC_TIGHTLOOP_DELAY_LIMIT_USECS) {
	    /*
	     * Exceeded min_polls in a previous call.
	     * Consult O/S real time clock to check for expiration.
	     */

	    if (SAL_USECS_SUB(sal_time_usecs(), to->expire) >= 0) {
		return 1;
	    }

	    sal_udelay(to->exp_delay);

	    /* Exponential backoff with 10% maximum latency */

	    if ((to->exp_delay *= 2) > to->usec / 10) {
		to->exp_delay = to->usec / 10;
	    }
	}
        else {
	    /*
	     * Exceeded min_polls in a previous call.
	     * Consult O/S real time clock to check for expiration.
	     */

	    if (SAL_USECS_SUB(sal_time_usecs(), to->expire) >= 0) {
		return 1;
	    }

	    sal_usleep(to->exp_delay);

	    /* Exponential backoff with 10% maximum latency */

	    if ((to->exp_delay *= 2) > to->usec / 10) {
		to->exp_delay = to->usec / 10;
	    }
	}
    }

    return 0;
}
Beispiel #3
0
int
sal_core_init(void)
{
    sal_udelay(0);	/* Cause sal_udelay() to self-calibrate */

    sal_thread_main_set(sal_thread_self());

    if (sal_dpc_init()) {
        return -1;
    }

    if (sal_global_lock_init()) {
        return -1;
    }

    return 0;
}
Beispiel #4
0
static void __udelay(unsigned int usec)
{
    sal_udelay(usec);
}
Beispiel #5
0
/*
 * Function: _ioctl
 *
 * Purpose:
 *    Handle IOCTL commands from user mode.
 * Parameters:
 *    cmd - IOCTL cmd
 *    arg - IOCTL parameters
 * Returns:
 *    0 on success, <0 on error
 */
static int 
_ioctl(unsigned int cmd, unsigned long arg)
{
    lubde_ioctl_t io;
    uint32 pbase, size;
    const ibde_dev_t *bde_dev;
    int inst_id;
     bde_inst_resource_t *res;

    if (copy_from_user(&io, (void *)arg, sizeof(io))) {
        return -EFAULT;
    }
  
    io.rc = LUBDE_SUCCESS;
  
    switch(cmd) {
    case LUBDE_VERSION:
        io.d0 = 0;
        break;
    case LUBDE_GET_NUM_DEVICES:

        io.d0 = user_bde->num_devices(io.dev);
        break;
    case LUBDE_GET_DEVICE:
        bde_dev = user_bde->get_dev(io.dev);
        if (bde_dev) {
            io.d0 = bde_dev->device;
            io.d1 = bde_dev->rev;
            if (BDE_DEV_MEM_MAPPED(_devices[io.dev].dev_type)) {
                /* Get physical address to map */
                io.d2 = lkbde_get_dev_phys(io.dev);
                io.d3 = lkbde_get_dev_phys_hi(io.dev);
            }
        } else {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_GET_DEVICE_TYPE:
        io.d0 = _devices[io.dev].dev_type;
        break;
    case LUBDE_GET_BUS_FEATURES:
        user_bde->pci_bus_features(io.dev, (int *) &io.d0, (int *) &io.d1,
                                   (int *) &io.d2);
        break;
    case LUBDE_PCI_CONFIG_PUT32:
        if (_devices[io.dev].dev_type & BDE_PCI_DEV_TYPE) {
            user_bde->pci_conf_write(io.dev, io.d0, io.d1);
        } else {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_PCI_CONFIG_GET32:
        if (_devices[io.dev].dev_type & BDE_PCI_DEV_TYPE) {
            io.d0 = user_bde->pci_conf_read(io.dev, io.d0);
        } else {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_GET_DMA_INFO:
        inst_id = io.dev;
        if (_bde_multi_inst){
            _dma_resource_get(inst_id, &pbase, &size);
        } else {
            lkbde_get_dma_info(&pbase, &size);
        }
        io.d0 = pbase;
        io.d1 = size; 
        /* Optionally enable DMA mmap via /dev/linux-kernel-bde */
        io.d2 = USE_LINUX_BDE_MMAP;
        break;
    case LUBDE_ENABLE_INTERRUPTS:
        if (_devices[io.dev].dev_type & BDE_SWITCH_DEV_TYPE) {
            if (_devices[io.dev].isr && !_devices[io.dev].enabled) {
                user_bde->interrupt_connect(io.dev,
                                            _devices[io.dev].isr,
                                            _devices+io.dev);
                _devices[io.dev].enabled = 1;
            }
        } else {
            /* Process ethernet device interrupt */
            /* FIXME: for multiple chips */
            if (!_devices[io.dev].enabled) {
                user_bde->interrupt_connect(io.dev,
                                            (void(*)(void *))_ether_interrupt, 
                                            _devices+io.dev);
                _devices[io.dev].enabled = 1;
            }
        }
        break;
    case LUBDE_DISABLE_INTERRUPTS:
        if (_devices[io.dev].enabled) {
            user_bde->interrupt_disconnect(io.dev);
            _devices[io.dev].enabled = 0;
        }
        break;
    case LUBDE_WAIT_FOR_INTERRUPT:
        if (_devices[io.dev].dev_type & BDE_SWITCH_DEV_TYPE) {
            res = &_bde_inst_resource[_devices[io.dev].inst];
#ifdef BDE_LINUX_NON_INTERRUPTIBLE
            wait_event_timeout(res->intr_wq, 
                               atomic_read(&res->intr) != 0, 100);

#else
            wait_event_interruptible(res->intr_wq,
                                     atomic_read(&res->intr) != 0);
#endif
            /* 
             * Even if we get multiple interrupts, we 
             * only run the interrupt handler once.
             */
            atomic_set(&res->intr, 0);
        } else {
#ifdef BDE_LINUX_NON_INTERRUPTIBLE
            wait_event_timeout(_ether_interrupt_wq,     
                               atomic_read(&_ether_interrupt_has_taken_place) != 0, 100);
#else
            wait_event_interruptible(_ether_interrupt_wq,     
                                     atomic_read(&_ether_interrupt_has_taken_place) != 0);
#endif
            /* 
             * Even if we get multiple interrupts, we 
             * only run the interrupt handler once.
             */
            atomic_set(&_ether_interrupt_has_taken_place, 0);
        }
        break;
    case LUBDE_USLEEP:
        sal_usleep(io.d0);
        break;
    case LUBDE_UDELAY:
        sal_udelay(io.d0);
        break;
    case LUBDE_SEM_OP:
        switch (io.d0) {
        case LUBDE_SEM_OP_CREATE:
            io.p0 = (bde_kernel_addr_t)sal_sem_create("", io.d1, io.d2);
            break;
        case LUBDE_SEM_OP_DESTROY:
            sal_sem_destroy((sal_sem_t)io.p0);
            break;
        case LUBDE_SEM_OP_TAKE:
            io.rc = sal_sem_take((sal_sem_t)io.p0, io.d2);
            break;
        case LUBDE_SEM_OP_GIVE:
            io.rc = sal_sem_give((sal_sem_t)io.p0);
            break;
        default:
            io.rc = LUBDE_FAIL;
            break;
        }
        break;
    case LUBDE_WRITE_IRQ_MASK:
        io.rc = lkbde_irq_mask_set(io.dev, io.d0, io.d1, 0);
        break;
    case LUBDE_SPI_READ_REG:
        if (user_bde->spi_read(io.dev, io.d0, io.dx.buf, io.d1) == -1) {
            io.rc = LUBDE_FAIL;
        } 
        break;
    case LUBDE_SPI_WRITE_REG:
        if (user_bde->spi_write(io.dev, io.d0, io.dx.buf, io.d1) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_READ_REG_16BIT_BUS:
        io.d1 = user_bde->read(io.dev, io.d0);
        break;
    case LUBDE_WRITE_REG_16BIT_BUS:
        io.rc = user_bde->write(io.dev, io.d0, io.d1);
        break;
#if (defined(BCM_PETRA_SUPPORT) || defined(BCM_DFE_SUPPORT))
    case LUBDE_CPU_WRITE_REG:
    {
        if (lkbde_cpu_write(io.dev, io.d0, (uint32*)io.dx.buf) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    }
    case LUBDE_CPU_READ_REG:
    {
        if (lkbde_cpu_read(io.dev, io.d0, (uint32*)io.dx.buf) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    }
    case LUBDE_CPU_PCI_REGISTER:
    {
        if (lkbde_cpu_pci_register(io.dev) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    }
#endif
    case LUBDE_DEV_RESOURCE:
        bde_dev = user_bde->get_dev(io.dev);
        if (bde_dev) {
            if (BDE_DEV_MEM_MAPPED(_devices[io.dev].dev_type)) {
                /* Get physical address to map */
                io.rc = lkbde_get_dev_resource(io.dev, io.d0,
                                               &io.d1, &io.d2, &io.d3);
            }
        } else {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_IPROC_READ_REG:
        io.d1 = user_bde->iproc_read(io.dev, io.d0);
        if (io.d1 == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_IPROC_WRITE_REG:
        if (user_bde->iproc_write(io.dev, io.d0, io.d1) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_ATTACH_INSTANCE:
        io.rc = _instance_attach(io.d0, io.d1);
        break;
    default:
        gprintk("Error: Invalid ioctl (%08x)\n", cmd);
        io.rc = LUBDE_FAIL;
        break;
    }
  
    if (copy_to_user((void *)arg, &io, sizeof(io))) {
        return -EFAULT;
    }

    return 0;
}