void proceso(int i, int pid) { int k; int lwait; int lsignal; for(k=0;k<CICLOS;k++) { lwait=1; //printf("omaiga %s, g=%d, l=%d\n", pais[i], *gwait, lwait); fflush(stdout); do { atomic_xchg(lwait,*gwait); } while(lwait!=0); // comprobacion del semaforo waitsem(sem, pid); *gwait = 0; // <zona critica> printf("Entra %s",pais[i]); fflush(stdout); //sleep(5/*rand()%3*/); printf("- %s Sale\n",pais[i]); fflush(stdout); // </zona critica> lsignal=1; do { atomic_xchg(lsignal,*gsig); } while(lsignal!=0); // Llamada waitsignal signalsem(sem, pid); *gsig = 0; // Espera aleatoria fuera de la sección crítica sleep(rand()%3); } exit(0); // Termina el proceso }
void cw1200_clear_recent_scan_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, clear_recent_scan_work.work); atomic_xchg(&priv->recent_scan, 0); }
void *hilo1(void *arg) { int *mynum=(int *) arg; int i=*mynum; int k; int l; for(k=0;k<CICLOS;k++) { l=1; do { atomic_xchg(l,g); } while(l!=0); // Inicia sección Crítica printf("Entra %s",pais[i]); fflush(stdout); sleep(rand()%3); printf("- %s Sale\n",pais[i]); // Termina sección Crítica g=0; // Espera aleatoria fuera de la sección crítica sleep(rand()%3); } }
static ssize_t smo8800_misc_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct smo8800_device *smo8800 = container_of(file->private_data, struct smo8800_device, miscdev); u32 data = 0; unsigned char byte_data = 0; ssize_t retval = 1; if (count < 1) return -EINVAL; atomic_set(&smo8800->counter, 0); retval = wait_event_interruptible(smo8800->misc_wait, (data = atomic_xchg(&smo8800->counter, 0))); if (retval) return retval; byte_data = 1; retval = 1; if (data < 255) byte_data = data; else byte_data = 255; if (put_user(byte_data, buf)) retval = -EFAULT; return retval; }
/* resume dev_replace procedure that was interrupted by unmount */ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) { struct task_struct *task; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; btrfs_dev_replace_lock(dev_replace); switch (dev_replace->replace_state) { case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: btrfs_dev_replace_unlock(dev_replace); return 0; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: break; case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED; break; } if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) { pr_info("btrfs: cannot continue dev_replace, tgtdev is missing\n" "btrfs: you may cancel the operation after 'mount -o degraded'\n"); btrfs_dev_replace_unlock(dev_replace); return 0; } btrfs_dev_replace_unlock(dev_replace); WARN_ON(atomic_xchg( &fs_info->mutually_exclusive_operation_running, 1)); task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl"); return PTR_ERR_OR_ZERO(task); }
static int tegra_camera_open(struct inode *inode, struct file *file) { struct miscdevice *miscdev = file->private_data; struct tegra_camera_dev *dev = container_of(miscdev, struct tegra_camera_dev, misc_dev); int ret = 0; dev_info(dev->dev, "%s\n", __func__); if (atomic_xchg(&dev->in_use, 1)) return -EBUSY; file->private_data = dev; mutex_lock(&dev->tegra_camera_lock); /* turn on CSI regulator */ ret = tegra_camera_power_on(dev); if (ret) goto open_exit; /* set EMC request */ ret = tegra_camera_enable_emc(dev); if (ret) goto open_exit; /* enable camera HW clock */ ret = tegra_camera_enable_clk(dev); if (ret) goto open_exit; open_exit: mutex_unlock(&dev->tegra_camera_lock); return ret; }
void aee_sram_fiq_log(const char *msg) { int count = strlen(msg), rem, delay = 100; struct ram_console_buffer *buffer = ram_console_buffer; if (atomic_xchg(&rc_in_fiq, 1)) return; while ((delay > 0) && (spin_is_locked(&ram_console_lock))) { udelay(1); delay--; } if (count > ram_console_buffer_size) { msg += count - ram_console_buffer_size; count = ram_console_buffer_size; } rem = ram_console_buffer_size - buffer->start; if (rem < count) { memcpy(buffer->data + buffer->start, msg, rem); msg += rem; count -= rem; buffer->start = 0; buffer->size = ram_console_buffer_size; } memcpy(buffer->data + buffer->start, msg, count); buffer->start += count; if (buffer->size < ram_console_buffer_size) buffer->size += count; }
/* * Spinlock based trylock, we take the spinlock and check whether we * can get the lock: */ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); unsigned long flags; int prev; /* No need to trylock if the mutex is locked. */ if (mutex_is_locked(lock)) return 0; spin_lock_mutex(&lock->wait_lock, flags); prev = atomic_xchg(&lock->count, -1); if (likely(prev == 1)) { mutex_set_owner(lock); mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); } /* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); spin_unlock_mutex(&lock->wait_lock, flags); return prev == 1; }
static int tegra_camera_release(struct inode *inode, struct file *file) { int ret = 0; struct tegra_camera_dev *dev = file->private_data; dev_info(dev->dev, "%s\n", __func__); mutex_lock(&dev->tegra_camera_lock); /* disable HW clock */ ret = tegra_camera_disable_clk(dev); if (ret) goto release_exit; /* nullify EMC request */ ret = tegra_camera_disable_emc(dev); if (ret) goto release_exit; /* turn off CSI regulator */ tegra_camera_power_off(dev); if (ret) goto release_exit; release_exit: mutex_unlock(&dev->tegra_camera_lock); WARN_ON(!atomic_xchg(&dev->in_use, 0)); return 0; }
static void mr_alrt_enter(void) { if (atomic_xchg(&alrt_onoff, 1)) return; atomic_long_set(&alrt_start, jiffies); }
static int acc_release(struct inode *ip, struct file *fp) { printk(KERN_INFO "acc_release\n"); WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0)); _acc_dev->disconnected = 0; return 0; }
static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) { atomic_t *l = (void *)lock; BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); do { atomic_cond_read_relaxed(l, !VAL); } while (atomic_xchg(l, 1)); }
static void apply_puts_pending(int max) { int delta; if (atomic_read(&cpu_hotplug.puts_pending) >= max) { delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); cpu_hotplug.refcount -= delta; } }
void sys_event_attach(sys_event_t * event, sys_event_client_t * client) { sys_event_client_t ** plast; client->notify = NULL; client->next = NULL; plast = atomic_xchg(&event->plast_client, client, memory_order_seq_cst); atomic_store(plast, client, memory_order_release); }
void qemu_event_set(QemuEvent *ev) { if (atomic_mb_read(&ev->value) != EV_SET) { if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { /* There were waiters, wake them up. */ futex_wake(ev, INT_MAX); } } }
static int acc_open(struct inode *ip, struct file *fp) { printk(KERN_INFO "acc_open\n"); if (atomic_xchg(&_acc_dev->open_excl, 1)) return -EBUSY; _acc_dev->disconnected = 0; fp->private_data = _acc_dev; return 0; }
static inline unsigned int get_and_clear_irq_fired(void) { /* This is potentially not atomic since we might migrate if * preemptions are not disabled. As a tradeoff between * accuracy and tracing overheads, this seems acceptable. * If it proves to be a problem, then one could add a callback * from the migration code to invalidate irq_fired_count. */ return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0); }
void * AQMget_nowait( QUEUE * Q ) { NODE * N = NULL; void * c = NULL; atomic_inc( &(Q->read_lock) ); c = AQMget_real( Q, &N ); if ( atomic_dec_and_test( &(Q->read_lock) ) ) { if ( atomic_xchg( 0, &(Q->clean) ) ) { AQMget_cleaner( Q, (N!=NULL)?N:Q->head ); atomic_xchg( 1, &(Q->clean) ); } } return c; }
static void * AQMget_real( QUEUE * Q , NODE** n ) { NODE * N = NULL; void * c = NULL; N = Q->fasttrack; if ( unlikely( N->next == NULL ) ) { return c; } c = (void *)atomic_xchg( (atomic)c, (atomic *)( &(N->next->content) ) ); while ( c == NULL ) { N = N->next; if ( unlikely( N->next == NULL ) ) break; c = (void *)atomic_xchg( (atomic)c, (atomic *)( &(N->next->content) ) ); } *n = N; if ( likely( N!=NULL ) ) { N = (void *)atomic_xchg( (atomic)N, (atomic *)( &(Q->fasttrack) ) ); } #ifdef ATOMICQ_COUNTER_ON if ( c != NULL ) { atomic_dec( &(Q->counter) ); } #endif return c; }
void qemu_event_set(QemuEvent *ev) { /* qemu_event_set has release semantics, but because it *loads* * ev->value we need a full memory barrier here. */ smp_mb(); if (atomic_read(&ev->value) != EV_SET) { if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { /* There were waiters, wake them up. */ SetEvent(ev->event); } } }
void signalsem(mySemaphore *sem) { register int l =1; do{atomic_xchg(l,*g);}while(l != 0); if(sem->count <0) { //printf("El proceso %i termina! Le toca al siguiente\n", getpid()); resumeprocess(dequeue(sem)); } sem->count ++; *g = 0; }
/* * read the cache state */ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, size_t buflen, loff_t *pos) { struct cachefiles_cache *cache = file->private_data; unsigned long long b_released; unsigned f_released; char buffer[256]; int n; //_enter(",,%zu,", buflen); if (!test_bit(CACHEFILES_READY, &cache->flags)) return 0; /* check how much space the cache has */ cachefiles_has_space(cache, 0, 0); /* summarise */ f_released = atomic_xchg(&cache->f_released, 0); b_released = atomic_long_xchg(&cache->b_released, 0); clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags); n = snprintf(buffer, sizeof(buffer), "cull=%c" " frun=%llx" " fcull=%llx" " fstop=%llx" " brun=%llx" " bcull=%llx" " bstop=%llx" " freleased=%x" " breleased=%llx", test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0', (unsigned long long) cache->frun, (unsigned long long) cache->fcull, (unsigned long long) cache->fstop, (unsigned long long) cache->brun, (unsigned long long) cache->bcull, (unsigned long long) cache->bstop, f_released, b_released); if (n > buflen) return -EMSGSIZE; if (copy_to_user(_buffer, buffer, n) != 0) return -EFAULT; return n; }
/** * amdgpu_ih_process - interrupt handler * * @adev: amdgpu_device pointer * * Interrupt hander (VI), walk the IH ring. * Returns irq process return code. */ int amdgpu_ih_process(struct amdgpu_device *adev) { struct amdgpu_iv_entry entry; u32 wptr; if (!adev->irq.ih.enabled || adev->shutdown) return IRQ_NONE; wptr = amdgpu_ih_get_wptr(adev); restart_ih: /* is somebody else already processing irqs? */ if (atomic_xchg(&adev->irq.ih.lock, 1)) return IRQ_NONE; DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); while (adev->irq.ih.rptr != wptr) { u32 ring_index = adev->irq.ih.rptr >> 2; /* Prescreening of high-frequency interrupts */ if (!amdgpu_ih_prescreen_iv(adev)) { adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; continue; } /* Before dispatching irq to IP blocks, send it to amdkfd */ amdgpu_amdkfd_interrupt(adev, (const void *) &adev->irq.ih.ring[ring_index]); entry.iv_entry = (const uint32_t *) &adev->irq.ih.ring[ring_index]; amdgpu_ih_decode_iv(adev, &entry); adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; amdgpu_irq_dispatch(adev, &entry); } amdgpu_ih_set_rptr(adev); atomic_set(&adev->irq.ih.lock, 0); /* make sure wptr hasn't changed while processing */ wptr = amdgpu_ih_get_wptr(adev); if (wptr != adev->irq.ih.rptr) goto restart_ih; return IRQ_HANDLED; }
/* * This function is being called when pagefault occurs. It * tracks down vCPU blocking time. * * @addr: faulted host virtual address * @ptid: faulted process thread id * @rb: ramblock appropriate to addr */ static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, RAMBlock *rb) { int cpu, already_received; MigrationIncomingState *mis = migration_incoming_get_current(); PostcopyBlocktimeContext *dc = mis->blocktime_ctx; uint32_t low_time_offset; if (!dc || ptid == 0) { return; } cpu = get_mem_fault_cpu_index(ptid); if (cpu < 0) { return; } low_time_offset = get_low_time_offset(dc); if (dc->vcpu_addr[cpu] == 0) { atomic_inc(&dc->smp_cpus_down); } atomic_xchg(&dc->last_begin, low_time_offset); atomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); atomic_xchg(&dc->vcpu_addr[cpu], addr); /* check it here, not at the begining of the function, * due to, check could accur early than bitmap_set in * qemu_ufd_copy_ioctl */ already_received = ramblock_recv_bitmap_test(rb, (void *)addr); if (already_received) { atomic_xchg(&dc->vcpu_addr[cpu], 0); atomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); atomic_dec(&dc->smp_cpus_down); } trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], cpu, already_received); }
/*---------info------------*/ static int wil_info_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct net_device *ndev = wil_to_ndev(wil); int is_ac = power_supply_is_system_supplied(); int rx = atomic_xchg(&wil->isr_count_rx, 0); int tx = atomic_xchg(&wil->isr_count_tx, 0); static ulong rxf_old, txf_old; ulong rxf = ndev->stats.rx_packets; ulong txf = ndev->stats.tx_packets; unsigned int i; /* >0 : AC; 0 : battery; <0 : error */ seq_printf(s, "AC powered : %d\n", is_ac); seq_printf(s, "Rx irqs:packets : %8d : %8ld\n", rx, rxf - rxf_old); seq_printf(s, "Tx irqs:packets : %8d : %8ld\n", tx, txf - txf_old); rxf_old = rxf; txf_old = txf; #define CHECK_QSTATE(x) (state & BIT(__QUEUE_STATE_ ## x)) ? \ " " __stringify(x) : "" for (i = 0; i < ndev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(ndev, i); unsigned long state = txq->state; seq_printf(s, "Tx queue[%i] state : 0x%lx%s%s%s\n", i, state, CHECK_QSTATE(DRV_XOFF), CHECK_QSTATE(STACK_XOFF), CHECK_QSTATE(FROZEN) ); } #undef CHECK_QSTATE return 0; }
void * AQM2get_nowait( QUEUE * Q ) { NODE * N = NULL; void * c = NULL; N = (NODE *)atomic_xchg( (atomic)NULL, (atomic *)( &(Q->head) ) ); while( N == NULL ) { AtomicQ_YIELD; N = (NODE *)atomic_xchg( (atomic)NULL, (atomic *)( &(Q->head) ) ); } if ( N->next != NULL ) { NODE * N0; c = N->next->content; N0 = N; N = N->next; NODE_delete(N0); } N = (NODE *)atomic_xchg( (atomic)N, (atomic *)( &(Q->head) ) ); #ifdef ATOMICQ_COUNTER_ON if ( c != NULL ) { atomic_dec( &(Q->counter) ); } #endif return c; }
static void mr_alrt_leave(void) { unsigned long then; if (! atomic_xchg(&alrt_onoff, 0)) return; then = atomic_long_xchg(&alrt_start, 0); atomic_inc(&alrt_count); if (jiffies == then) atomic_long_add(jiffies_to_msecs(1) / 2, &alrt_time); else atomic_long_add(jiffies_to_msecs(jiffies - then), &alrt_time); }
int disp_pwm_set_backlight_cmdq(disp_pwm_id_t id, int level_1024, void *cmdq) { unsigned int reg_base; int old_pwm; int index; if ((DISP_PWM_ALL & id) == 0) { PWM_ERR("[ERROR] disp_pwm_set_backlight_cmdq: invalid PWM ID = 0x%x", id); return -EFAULT; } index = index_of_pwm(id); old_pwm = atomic_xchg(&g_pwm_backlight[index], level_1024); if (old_pwm != level_1024) { PWM_MSG("disp_pwm_set_backlight_cmdq(id = 0x%x, level_1024 = %d), old = %d", id, level_1024, old_pwm); if (level_1024 > g_pwm_max_backlight[index]) { level_1024 = g_pwm_max_backlight[index]; } else if (level_1024 < 0) { level_1024 = 0; } level_1024 = disp_pwm_level_remap(id, level_1024); reg_base = pwm_get_reg_base(id); DISP_REG_MASK(cmdq, reg_base + DISP_PWM_CON_1_OFF, level_1024 << 16, 0x1fff << 16); if (level_1024 > 0) { disp_pwm_set_enabled(cmdq, id, 1); } else { disp_pwm_set_enabled(cmdq, id, 0); /* To save power */ } DISP_REG_MASK(cmdq, reg_base + DISP_PWM_COMMIT_OFF, 1, ~0); DISP_REG_MASK(cmdq, reg_base + DISP_PWM_COMMIT_OFF, 0, ~0); g_pwm_duplicate_count = 0; } else { g_pwm_duplicate_count = (g_pwm_duplicate_count + 1) & 63; if (g_pwm_duplicate_count == 2) { PWM_MSG("disp_pwm_set_backlight_cmdq(id = 0x%x, level_1024 = %d), old = %d (dup)", id, level_1024, old_pwm); } } return 0; }
static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { DECLARE_WAITQUEUE(wait, current); u32 data; unsigned char byte_data; ssize_t retval = 1; if (count < 1) return -EINVAL; add_wait_queue(&lis3_dev.misc_wait, &wait); while (true) { set_current_state(TASK_INTERRUPTIBLE); data = atomic_xchg(&lis3_dev.count, 0); if (data) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto out; } if (signal_pending(current)) { retval = -ERESTARTSYS; goto out; } schedule(); } if (data < 255) byte_data = data; else byte_data = 255; /* make sure we are not going into copy_to_user() with * TASK_INTERRUPTIBLE state */ set_current_state(TASK_RUNNING); if (copy_to_user(buf, &byte_data, sizeof(byte_data))) retval = -EFAULT; out: __set_current_state(TASK_RUNNING); remove_wait_queue(&lis3_dev.misc_wait, &wait); return retval; }
static int ncp373_mode_disable(void) { struct ncp373_internal *dev = pncp373_internal; int old_oc_det_state; if (unlikely(!dev)) { pr_err("%s: device %s is not probed yet.\n", __func__, NCP373_DRIVER_NAME); return -ENODEV; } mutex_lock(&dev->mutex); /* If VBUS is already off, do nothing. */ if (!dev->vbus_is_on) { pr_debug("%s: ncp373 to disable mode, already off\n", __func__); goto op_done; } old_oc_det_state = atomic_xchg(&dev->oc_det_state, OC_DET_STOP); pr_debug("%s: ncp373 to disable mode, oc_det_state=%s\n", __func__, ncp373_oc_det_state_to_string(old_oc_det_state)); dev->vbus_is_on = 0; /* disable FLG irq */ if (likely(dev->flg_irq_en)) { dev->flg_irq_en = 0; free_irq(dev->flg_irq, dev); } cancel_delayed_work_sync(&dev->oc_delay_work); NCP373_DEBUG_CHECK_PIN_STATE(dev); /* set false to /EN pin(NCP373) */ dev->pdata->en_set(0); /* wait for /EN to IN */ usleep(NCP373_WAIT_EN_2_IN); NCP373_DEBUG_CHECK_PIN_STATE(dev); /* apply gnd by using mpp to IN pin(NCP373) */ dev->pdata->in_set(0); NCP373_DEBUG_CHECK_PIN_STATE(dev); op_done: mutex_unlock(&dev->mutex); return 0; }