static int ilo_close(struct inode *ip, struct file *fp) { int slot; struct ccb_data *data; struct ilo_hwinfo *hw; unsigned long flags; slot = iminor(ip) % MAX_CCB; hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); spin_lock(&hw->open_lock); if (hw->ccb_alloc[slot]->ccb_cnt == 1) { data = fp->private_data; spin_lock_irqsave(&hw->alloc_lock, flags); hw->ccb_alloc[slot] = NULL; spin_unlock_irqrestore(&hw->alloc_lock, flags); ilo_ccb_close(hw->ilo_dev, data); kfree(data); } else hw->ccb_alloc[slot]->ccb_cnt--; spin_unlock(&hw->open_lock); return 0; }
static int ilo_open(struct inode *ip, struct file *fp) { int slot, error; struct ccb_data *data; struct ilo_hwinfo *hw; unsigned long flags; slot = iminor(ip) % MAX_CCB; hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); /* new ccb allocation */ data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; spin_lock(&hw->open_lock); /* each fd private_data holds sw/hw view of ccb */ if (hw->ccb_alloc[slot] == NULL) { /* create a channel control block for this minor */ error = ilo_ccb_setup(hw, data, slot); if (error) { kfree(data); goto out; } data->ccb_cnt = 1; data->ccb_excl = fp->f_flags & O_EXCL; data->ilo_hw = hw; init_waitqueue_head(&data->ccb_waitq); /* write the ccb to hw */ spin_lock_irqsave(&hw->alloc_lock, flags); ilo_ccb_open(hw, data, slot); hw->ccb_alloc[slot] = data; spin_unlock_irqrestore(&hw->alloc_lock, flags); /* make sure the channel is functional */ error = ilo_ccb_verify(hw, data); if (error) { spin_lock_irqsave(&hw->alloc_lock, flags); hw->ccb_alloc[slot] = NULL; spin_unlock_irqrestore(&hw->alloc_lock, flags); ilo_ccb_close(hw->ilo_dev, data); kfree(data); goto out; } } else { kfree(data); if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { /* * The channel exists, and either this open * or a previous open of this channel wants * exclusive access. */ error = -EBUSY; } else { hw->ccb_alloc[slot]->ccb_cnt++; error = 0; } } out: spin_unlock(&hw->open_lock); if (!error) fp->private_data = hw->ccb_alloc[slot]; return error; }
static int ilo_open(struct inode *ip, struct file *fp) { int slot, error; struct ccb_data *data; struct ilo_hwinfo *hw; unsigned long flags; slot = iminor(ip) % MAX_CCB; hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); /* */ data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; spin_lock(&hw->open_lock); /* */ if (hw->ccb_alloc[slot] == NULL) { /* */ error = ilo_ccb_setup(hw, data, slot); if (error) { kfree(data); goto out; } data->ccb_cnt = 1; data->ccb_excl = fp->f_flags & O_EXCL; data->ilo_hw = hw; init_waitqueue_head(&data->ccb_waitq); /* */ spin_lock_irqsave(&hw->alloc_lock, flags); ilo_ccb_open(hw, data, slot); hw->ccb_alloc[slot] = data; spin_unlock_irqrestore(&hw->alloc_lock, flags); /* */ error = ilo_ccb_verify(hw, data); if (error) { spin_lock_irqsave(&hw->alloc_lock, flags); hw->ccb_alloc[slot] = NULL; spin_unlock_irqrestore(&hw->alloc_lock, flags); ilo_ccb_close(hw->ilo_dev, data); kfree(data); goto out; } } else { kfree(data); if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { /* */ error = -EBUSY; } else { hw->ccb_alloc[slot]->ccb_cnt++; error = 0; } } out: spin_unlock(&hw->open_lock); if (!error) fp->private_data = hw->ccb_alloc[slot]; return error; }