Exemplo n.º 1
0
/**
 * enter_state - Do common work needed to enter system sleep state.
 * @state: System sleep state to enter.
 *
 * Make sure that no one else is trying to put the system into a sleep state.
 * Fail if that's not the case.  Otherwise, prepare for system suspend, make the
 * system enter the given sleep state and clean up after wakeup.
 */
static int enter_state(suspend_state_t state)
{
	int error;

	if (!valid_state(state))
		return -ENODEV;

	if (!mutex_trylock(&pm_mutex))
		return -EBUSY;

	if (state == PM_SUSPEND_FREEZE)
		freeze_begin();

	printk(KERN_INFO "PM: Syncing filesystems ... ");
	sys_sync();
	printk("done.\n");

	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
	error = suspend_prepare(state);
	if (error)
		goto Unlock;

	if (suspend_test(TEST_FREEZER))
		goto Finish;

	pr_debug("PM: Entering %s sleep\n", pm_states[state]);
	pm_restrict_gfp_mask();
	error = suspend_devices_and_enter(state);
	pm_restore_gfp_mask();

 Finish:
	pr_debug("PM: Finishing wakeup.\n");
	suspend_finish();
 Unlock:
	mutex_unlock(&pm_mutex);
	return error;
}
/*--------------------------------------------------------------------------
  
  \brief vos_lock_destroy() - Destroys a vOSS Lock - probably not required
  for Linux. It may not be required for the caller to destroy a lock after
  usage.

  The \a vos_lock_destroy() function shall destroy the lock object 
  referenced by lock.  After a successful return from \a vos_lock_destroy()
  the lock object becomes, in effect, uninitialized.
   
  A destroyed lock object can be reinitialized using vos_lock_init(); 
  the results of otherwise referencing the object after it has been destroyed 
  are undefined.  Calls to vOSS lock functions to manipulate the lock such
  as vos_lock_acquire() will fail if the lock is destroyed.  Therefore, 
  don't use the lock after it has been destroyed until it has 
  been re-initialized.
  
  \param lock - the lock object to be destroyed.
  
  \return VOS_STATUS_SUCCESS - lock was successfully destroyed.
  
          VOS_STATUS_E_BUSY - The implementation has detected an attempt 
          to destroy the object referenced by lock while it is locked 
          or still referenced. 

          VOS_STATUS_E_INVAL - The value specified by lock is invalid.
          
          VOS_STATUS_E_FAULT  - lock is an invalid pointer. 

          VOS_STATUS_E_FAILURE - default return value if it fails due to 
          unknown reasons
  \sa
  ------------------------------------------------------------------------*/
VOS_STATUS vos_lock_destroy( vos_lock_t *lock )
{
      //Check for invalid pointer
      if ( NULL == lock )
      {
         VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "%s: NULL pointer passed in", __func__);
         return VOS_STATUS_E_FAULT; 
      }

      if ( LINUX_LOCK_COOKIE != lock->cookie )
      {
         VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "%s: uninitialized lock", __func__);
         return VOS_STATUS_E_INVAL;
      }

      if (in_interrupt())
      {
         VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "%s cannot be called from interrupt context!!!", __func__);
         return VOS_STATUS_E_FAULT; 
      }

      // check if lock is released
      if (!mutex_trylock(&lock->m_lock))
      {
         VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "%s: lock is not released", __func__);
         return VOS_STATUS_E_BUSY;
      }
      lock->cookie = 0;
      lock->state = LOCK_DESTROYED;
      lock->processID = 0;
      lock->refcount = 0;

      mutex_unlock(&lock->m_lock);

         
      return VOS_STATUS_SUCCESS;
}
Exemplo n.º 3
0
static void
ptmalloc_lock_all (void)
{
  mstate ar_ptr;

  if (__malloc_initialized < 1)
    return;

  if (mutex_trylock (&list_lock))
    {
      void *my_arena;
      tsd_getspecific (arena_key, my_arena);
      if (my_arena == ATFORK_ARENA_PTR)
        /* This is the same thread which already locks the global list.
           Just bump the counter.  */
        goto out;

      /* This thread has to wait its turn.  */
      (void) mutex_lock (&list_lock);
    }
  for (ar_ptr = &main_arena;; )
    {
      (void) mutex_lock (&ar_ptr->mutex);
      ar_ptr = ar_ptr->next;
      if (ar_ptr == &main_arena)
        break;
    }
  save_malloc_hook = __malloc_hook;
  save_free_hook = __free_hook;
  __malloc_hook = malloc_atfork;
  __free_hook = free_atfork;
  /* Only the current thread may perform malloc/free calls now. */
  tsd_getspecific (arena_key, save_arena);
  tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
out:
  ++atfork_recursive_cntr;
}
Exemplo n.º 4
0
/**
 *	enter_state - Do common work of entering low-power state.
 *	@state:		pm_state structure for state we're entering.
 *
 *	Make sure we're the only ones trying to enter a sleep state. Fail
 *	if someone has beat us to it, since we don't want anything weird to
 *	happen when we wake up.
 *	Then, do the setup for suspend, enter the state, and cleaup (after
 *	we've woken up).
 */
int enter_state(suspend_state_t state)
{
	int error;

	if (!valid_state(state))
		return -ENODEV;

	if (!mutex_trylock(&pm_mutex))
		return -EBUSY;

	suspend_footprint = 3;
	suspend_sys_sync_queue();
	suspend_footprint = 4;
	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
	error = suspend_prepare();
	if (error)
		goto Unlock;
	suspend_footprint = 10;
	if (suspend_test(TEST_FREEZER))
		goto Finish;
	suspend_footprint = 11;
	pr_debug("PM: Entering %s sleep\n", pm_states[state]);
	pm_restrict_gfp_mask();
	suspend_footprint = 12;
	error = suspend_devices_and_enter(state);
	suspend_footprint = 13;
	pm_restore_gfp_mask();
	suspend_footprint = 14;

 Finish:
	pr_debug("PM: Finishing wakeup.\n");
	suspend_finish();
	suspend_footprint = 18;
 Unlock:
	mutex_unlock(&pm_mutex);
	return error;
}
Exemplo n.º 5
0
static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
	int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
	int locked;

	/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
	 * otherwise the mailbox msg will be ruined/reseted by
	 * the VF FLR.
	 *
	 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
	 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
	 * which means host side had finished this VF's FLR.
	 */
	locked = mutex_trylock(&adev->lock_reset);
	if (locked)
		adev->in_gpu_reset = 1;

	do {
		if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
			goto flr_done;

		msleep(10);
		timeout -= 10;
	} while (timeout > 1);

flr_done:
	if (locked) {
		adev->in_gpu_reset = 0;
		mutex_unlock(&adev->lock_reset);
	}

	/* Trigger recovery for world switch failure if no TDR */
	if (amdgpu_device_should_recover_gpu(adev))
		amdgpu_device_gpu_recover(adev, NULL);
}
Exemplo n.º 6
0
/*
 * Start transmitting.
 */
static void tilegx_start_tx(struct uart_port *port)
{
	unsigned char ch;
	struct circ_buf *xmit;
	struct tile_uart_port *tile_uart;
	gxio_uart_context_t *context;

	tile_uart = container_of(port, struct tile_uart_port, uart);
	if (!mutex_trylock(&tile_uart->mutex))
		return;
	context = &tile_uart->context;
	xmit = &port->state->xmit;
	if (port->x_char) {
		if (tilegx_putchar(context, port->x_char))
			return;
		port->x_char = 0;
		port->icount.tx++;
	}

	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
		mutex_unlock(&tile_uart->mutex);
		return;
	}

	while (!uart_circ_empty(xmit)) {
		ch = xmit->buf[xmit->tail];
		if (tilegx_putchar(context, ch))
			break;
		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
		port->icount.tx++;
	}

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(port);

	mutex_unlock(&tile_uart->mutex);
}
Exemplo n.º 7
0
void
public_fREe(void* mem)
{
  struct malloc_arena* ar_ptr;
  mchunkptr p;                          /* chunk corresponding to mem */

  void (*hook) (void *, const void *) = __free_hook;
  if (hook != NULL) {
    (*hook)(mem, RETURN_ADDRESS (0));
    return;
  }

  if (mem == 0)                              /* free(0) has no effect */
    return;

  p = mem2chunk(mem);

  if (is_mmapped(p)) {                      /* release mmapped memory. */
    ar_ptr = arena_for_mmap_chunk(p);
    munmap_chunk(arena_to_mspace(ar_ptr), p);
    return;
  }

  ar_ptr = arena_for_chunk(p);
#if THREAD_STATS
  if(!mutex_trylock(&ar_ptr->mutex))
    ++(ar_ptr->stat_lock_direct);
  else {
    (void)mutex_lock(&ar_ptr->mutex);
    ++(ar_ptr->stat_lock_wait);
  }
#else
  (void)mutex_lock(&ar_ptr->mutex);
#endif
  mspace_free(arena_to_mspace(ar_ptr), mem);
  (void)mutex_unlock(&ar_ptr->mutex);
}
Exemplo n.º 8
0
/**
 *	enter_state - Do common work of entering low-power state.
 *	@state:		pm_state structure for state we're entering.
 *
 *	Make sure we're the only ones trying to enter a sleep state. Fail
 *	if someone has beat us to it, since we don't want anything weird to
 *	happen when we wake up.
 *	Then, do the setup for suspend, enter the state, and cleaup (after
 *	we've woken up).
 */
int enter_state(suspend_state_t state)
{
	int error;

	if (!valid_state(state))
		return -ENODEV;

	if (!mutex_trylock(&pm_mutex))
		return -EBUSY;

//20110727 [email protected]	Patch applied from P990 froyo MR-03
	printk("[LOG] star_emergency_restart() called at enter_state() \n");
	star_emergency_restart("sys", 63);

	printk(KERN_INFO "PM: Syncing filesystems ... ");
	sys_sync();
	printk("done.\n");

	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
	error = suspend_prepare();
	if (error)
		goto Unlock;

	if (suspend_test(TEST_FREEZER))
		goto Finish;

	pr_debug("PM: Entering %s sleep\n", pm_states[state]);
	error = suspend_devices_and_enter(state);

 Finish:
	pr_debug("PM: Finishing wakeup.\n");
	suspend_finish();
 Unlock:
	mutex_unlock(&pm_mutex);
	return error;
}
Exemplo n.º 9
0
/* tasklock endpoint, returning when it's connected.
 * still need dev->lock to use epdata->ep.
 */
static int
get_ready_ep (unsigned f_flags, struct ep_data *epdata)
{
	int	val;

	if (f_flags & O_NONBLOCK) {
		if (!mutex_trylock(&epdata->lock))
			goto nonblock;
		if (epdata->state != STATE_EP_ENABLED) {
			mutex_unlock(&epdata->lock);
nonblock:
			val = -EAGAIN;
		} else
			val = 0;
		return val;
	}

	val = mutex_lock_interruptible(&epdata->lock);
	if (val < 0)
		return val;

	switch (epdata->state) {
	case STATE_EP_ENABLED:
		break;
	// case STATE_EP_DISABLED:		/* "can't happen" */
	// case STATE_EP_READY:			/* "can't happen" */
	default:				/* error! */
		pr_debug ("%s: ep %p not available, state %d\n",
				shortname, epdata, epdata->state);
		// FALLTHROUGH
	case STATE_EP_UNBOUND:			/* clean disconnect */
		val = -ENODEV;
		mutex_unlock(&epdata->lock);
	}
	return val;
}
Exemplo n.º 10
0
static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	__u32 sync;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (get_user(sync, (__u32 __user *)arg))
		return -EFAULT;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

	if (!sync) {
		if (!mutex_trylock(&sbi->gc_mutex))
			return -EBUSY;
	} else {
		mutex_lock(&sbi->gc_mutex);
	}

	return f2fs_gc(sbi, sync);
}
Exemplo n.º 11
0
static int __init __self_module_loader(void) {
	struct mutex *mutex;
	int lock_status;
	int is_lock_acquired;

	mutex = kmalloc(sizeof(*mutex), GFP_KERNEL);
	if (unlikely(mutex == NULL)) {
		printk(KERN_ERR "Not enough memory to allocate.\n");
		return -1;
	}

	lock_status = mutex_is_locked(mutex);
	printk(KERN_INFO "Mutex status: %s.\n", (lock_status == 1) ? "locked" : "unlocked");

	if (likely(lock_status == 1)) {
		mutex_init(mutex);
	}

	lock_status = mutex_is_locked(mutex);
	printk(KERN_INFO "Mutex status: %s.\n", (lock_status == 1) ? "locked" : "unlocked");
	is_lock_acquired = mutex_trylock(mutex);

	if (likely(is_lock_acquired == 1)) {
		printk(KERN_INFO "Mutex has been acquired.\n");
		lock_status = mutex_is_locked(mutex);
		printk(KERN_INFO "Mutex status: %s.\n", (lock_status == 1) ? "locked" : "unlocked");
		mutex_unlock(mutex);
	}

	lock_status = mutex_is_locked(mutex);
	printk(KERN_INFO "Mutex status: %s.\n", (lock_status == 1) ? "locked" : "unlocked");

	kfree(mutex);

	return 0;
}
Exemplo n.º 12
0
static gpointer
wt_ipc_mainloop (gpointer __unused)
{
  struct timespec timestruc;
  timestruc.tv_sec = ((unsigned long long) (delay * NSEC_COUNT)) / NSEC_COUNT;
  timestruc.tv_nsec = ((unsigned long long) (delay * NSEC_COUNT)) % NSEC_COUNT;

  for (;;)
    {
      if (mutex_trylock (mutex))
        {
          mutex_unlock (mutex);
          break;
        }

      ipc_listen ();
      ipc_interact ();

      nanosleep (&timestruc, 0);
    }

  g_thread_exit (0);
  return 0;
}
Exemplo n.º 13
0
void touchkey_pressed(void)
{
    if (!device_suspended && bld_enabled)
	{
	    if (backlight_dimmed)
		{
		    if (mutex_trylock(&lock))
			{
			    cancel_delayed_work(&dimmer_work);
			    schedule_delayed_work(&dimmer_work, 0);
			}
		}
	    else
		{
		    if (!mutex_is_locked(&lock))
			{
			    cancel_delayed_work(&dimmer_work);
			    schedule_delayed_work(&dimmer_work, msecs_to_jiffies(dimmer_delay));
			}
		}
	}

    return;
}
Exemplo n.º 14
0
int enter_state(suspend_state_t state)
{
	int error;
	int dock=0;
	int retries=3;
	
	if (!valid_state(state))
		return -ENODEV;

	if (!mutex_trylock(&pm_mutex))
		return -EBUSY;
	
	printk(KERN_INFO "PM: Syncing filesystems ... ");
	sys_sync();
	printk("done.\n");
#ifdef CONFIG_ASUSEC
	if (gpio_get_value(TEGRA_GPIO_PX5)==0){
		dock=1;
		hub_suspended=0;
////		nousb=1;
//		printk("mutex+\n");
//		mutex_lock(&usb_mutex);
		if (nousb==1) {
			printk("usb wait1\n");
			msleep(500);
		}
		asusec_close_keyboard();
////		asusec_suspend_hub_callback2();
/*		while (!hub_suspended) {
			asusec_suspend_hub_callback();
			if (retries-- == 0)
				break;
			if (!hub_suspended) {
				stop_dock();
				msleep(500);
				asusec_resume(0);
				msleep(500);
//				printk("try to restart asusec\n");
//				reload_asusec();
			}
		}
*/		
//		printk("mutex-\n");
//		mutex_unlock(&usb_mutex);

/*
		if (!hub_suspended) {
			stop_dock();
			printk("Dock problem\n");
			if (failed_dock < 3) {
				printk("aborted suspend\n");
				failed_dock++;
				asusec_resume(0);
				error=999;
//				nousb=0;
				goto Unlock;
			}
		} 
*/		
//		nousb=0;
		failed_dock=0;
		msleep(2000);	
//		cpu_down(1);
	}
#endif
	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
	error = suspend_prepare();
	if (error) {
		if (dock==1) {
			asusec_resume(0);
		}
		goto Unlock;
	}

	if (suspend_test(TEST_FREEZER))
		goto Finish;

	pr_debug("PM: Entering %s sleep\n", pm_states[state]);
	pm_restrict_gfp_mask();
	error = suspend_devices_and_enter(state);
	pm_restore_gfp_mask();

 Finish:
	pr_debug("PM: Finishing wakeup.\n");
	suspend_finish();
 Unlock:
//	asusec_resume(0);
	mutex_unlock(&pm_mutex);
	return error;
}
Exemplo n.º 15
0
static bool bitforce_get_temp(struct cgpu_info *bitforce)
{
	int fdDev = bitforce->device_fd;
	char pdevbuf[0x100];
	char *s;

	if (!fdDev)
		return false;

	/* Do not try to get the temperature if we're polling for a result to
	 * minimise the chance of interleaved results */
	if (bitforce->polling)
		return true;

	// Flash instead of Temp - doing both can be too slow
	if (bitforce->flash_led) {
		bitforce_flash_led(bitforce);
 		return true;
	}

	/* It is not critical getting temperature so don't get stuck if we
	 * can't grab the mutex here */
	if (mutex_trylock(&bitforce->device_mutex))
		return false;

	BFwrite(fdDev, "ZLX", 3);
	pdevbuf[0] = '\0';
	BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
	mutex_unlock(&bitforce->device_mutex);
	
	if (unlikely(!pdevbuf[0])) {
		applog(LOG_ERR, "BFL%i: Error: Get temp returned empty string/timed out", bitforce->device_id);
		bitforce->hw_errors++;
		return false;
	}

	if ((!strncasecmp(pdevbuf, "TEMP", 4)) && (s = strchr(pdevbuf + 4, ':'))) {
		float temp = strtof(s + 1, NULL);

		/* Cope with older software  that breaks and reads nonsense
		 * values */
		if (temp > 100)
			temp = strtod(s + 1, NULL);

		if (temp > 0) {
			bitforce->temp = temp;
			if (unlikely(bitforce->cutofftemp > 0 && temp > bitforce->cutofftemp)) {
				applog(LOG_WARNING, "BFL%i: Hit thermal cutoff limit, disabling!", bitforce->device_id);
				bitforce->deven = DEV_RECOVER;
				dev_error(bitforce, REASON_DEV_THERMAL_CUTOFF);
			}
		}
	} else {
		/* Use the temperature monitor as a kind of watchdog for when
		 * our responses are out of sync and flush the buffer to
		 * hopefully recover */
		applog(LOG_WARNING, "BFL%i: Garbled response probably throttling, clearing buffer", bitforce->device_id);
		dev_error(bitforce, REASON_DEV_THROTTLE);
		/* Count throttling episodes as hardware errors */
		bitforce->hw_errors++;
		bitforce_clear_buffer(bitforce);
		return false;
	}

	return true;
}
Exemplo n.º 16
0
/**
 * sr_class1p5_calib_work() - work which actually does the calibration
 * @work: pointer to the work
 *
 * calibration routine uses the following logic:
 * on the first trigger, we start the isr to collect sr voltages
 * wait for stabilization delay (reschdule self instead of sleeping)
 * after the delay, see if we collected any isr events
 * if none, we have calibrated voltage.
 * if there are any, we retry untill we giveup.
 * on retry timeout, select a voltage to use as safe voltage.
 */
static void sr_class1p5_calib_work(struct work_struct *work)
{
	struct sr_class1p5_work_data *work_data =
	    container_of(work, struct sr_class1p5_work_data, work.work);
	unsigned long u_volt_safe = 0, u_volt_current = 0, u_volt_margin = 0;
	struct omap_volt_data *volt_data;
	struct voltagedomain *voltdm;
	int idx = 0;

	if (!work) {
		pr_err("%s: ooops.. null work_data?\n", __func__);
		return;
	}

	/*
	 * Handle the case where we might have just been scheduled AND
	 * 1.5 disable was called.
	 */
	if (!mutex_trylock(&omap_dvfs_lock)) {
		schedule_delayed_work(&work_data->work,
				      msecs_to_jiffies(SR1P5_SAMPLING_DELAY_MS *
						       SR1P5_STABLE_SAMPLES));
		return;
	}

	voltdm = work_data->voltdm;
	/*
	 * In the unlikely case that we did get through when unplanned,
	 * flag and return.
	 */
	if (unlikely(!work_data->work_active)) {
		pr_err("%s:%s unplanned work invocation!\n", __func__,
		       voltdm->name);
		mutex_unlock(&omap_dvfs_lock);
		return;
	}

	volt_data = work_data->vdata;

	work_data->num_calib_triggers++;
	/* if we are triggered first time, we need to start isr to sample */
	if (work_data->num_calib_triggers == 1) {
		/* We could be interrupted many times, so, only for debug */
		pr_debug("%s: %s: Calibration start: Voltage Nominal=%d\n",
			 __func__, voltdm->name, volt_data->volt_nominal);
		goto start_sampling;
	}

	/* Stop isr from interrupting our measurements :) */
	sr_notifier_control(voltdm, false);

	/*
	 * Quit sampling
	 * a) if we have oscillations
	 * b) if we have nominal voltage as the voltage
	 */
	if (work_data->num_calib_triggers == SR1P5_MAX_TRIGGERS)
		goto stop_sampling;

	/* if there are no samples captured.. SR is silent, aka stability! */
	if (!work_data->num_osc_samples) {
		/* Did we interrupt too early? */
		u_volt_current = omap_vp_get_curr_volt(voltdm);
		if (u_volt_current >= volt_data->volt_nominal)
			goto start_sampling;
		u_volt_safe = u_volt_current;
		goto done_calib;
	}

	/* we have potential oscillations/first sample */
start_sampling:
	work_data->num_osc_samples = 0;

	/* Clear transdone events so that we can go on. */
	do {
		if (!omap_vp_is_transdone(voltdm))
			break;
		idx++;
		/* get some constant delay */
		udelay(1);
		omap_vp_clear_transdone(voltdm);
	} while (idx < MAX_CHECK_VPTRANS_US);
	if (idx >= MAX_CHECK_VPTRANS_US)
		pr_warning("%s: timed out waiting for transdone clear!!\n",
			   __func__);

	/* Clear pending events */
	sr_notifier_control(voltdm, false);
	/* trigger sampling */
	sr_notifier_control(voltdm, true);
	schedule_delayed_work(&work_data->work,
			      msecs_to_jiffies(SR1P5_SAMPLING_DELAY_MS *
					       SR1P5_STABLE_SAMPLES));
	mutex_unlock(&omap_dvfs_lock);
	return;

stop_sampling:
	/*
	 * We are here for Oscillations due to two scenarios:
	 * a) SR is attempting to adjust voltage lower than VLIMITO
	 *    which VP will ignore, but SR will re-attempt
	 * b) actual oscillations
	 * NOTE: For debugging, enable debug to see the samples.
	 */
	pr_warning("%s: %s Stop sampling: Voltage Nominal=%d samples=%d\n",
		   __func__, work_data->voltdm->name,
		   volt_data->volt_nominal, work_data->num_osc_samples);

	/* pick up current voltage */
	u_volt_current = omap_vp_get_curr_volt(voltdm);

	/* Just in case we got more interrupts than our tiny buffer */
	if (work_data->num_osc_samples > SR1P5_STABLE_SAMPLES)
		idx = SR1P5_STABLE_SAMPLES;
	else
		idx = work_data->num_osc_samples;
	/* Index at 0 */
	idx -= 1;
	u_volt_safe = u_volt_current;
	/* Grab the max of the samples as the stable voltage */
	for (; idx >= 0; idx--) {
		pr_debug("%s: osc_v[%d]=%ld, safe_v=%ld\n", __func__, idx,
			work_data->u_volt_samples[idx], u_volt_safe);
		if (work_data->u_volt_samples[idx] > u_volt_safe)
			u_volt_safe = work_data->u_volt_samples[idx];
	}

	/* Fall through to close up common stuff */
done_calib:
	sr_disable_errgen(voltdm);
	omap_vp_disable(voltdm);
	sr_disable(voltdm);

	/* Add margin if needed */
	if (volt_data->volt_margin) {
		struct omap_voltdm_pmic *pmic = voltdm->pmic;
		/* Convert to rounded to PMIC step level if available */
		if (pmic && pmic->vsel_to_uv && pmic->uv_to_vsel) {
			/*
			 * To ensure conversion works:
			 * use a proper base voltage - we use the current volt
			 * then convert it with pmic routine to vsel and back
			 * to voltage, and finally remove the base voltage
			 */
			u_volt_margin = u_volt_current + volt_data->volt_margin;
			u_volt_margin = pmic->uv_to_vsel(u_volt_margin);
			u_volt_margin = pmic->vsel_to_uv(u_volt_margin);
			u_volt_margin -= u_volt_current;
		} else {
			u_volt_margin = volt_data->volt_margin;
		}

		u_volt_safe += u_volt_margin;
	}

	if (u_volt_safe > volt_data->volt_nominal) {
		pr_warning("%s: %s Vsafe %ld > Vnom %d. %ld[%d] margin on"
			"vnom %d curr_v=%ld\n", __func__, voltdm->name,
			u_volt_safe, volt_data->volt_nominal, u_volt_margin,
			volt_data->volt_margin, volt_data->volt_nominal,
			u_volt_current);
	}

	volt_data->volt_calibrated = u_volt_safe;
	/* Setup my dynamic voltage for the next calibration for this opp */
	volt_data->volt_dynamic_nominal = omap_get_dyn_nominal(volt_data);

	/*
	 * if the voltage we decided as safe is not the current voltage,
	 * switch
	 */
	if (volt_data->volt_calibrated != u_volt_current) {
		pr_debug("%s: %s reconfiguring to voltage %d\n",
			 __func__, voltdm->name, volt_data->volt_calibrated);
		voltdm_scale(voltdm, volt_data);
	}

	pr_info("%s: %s: Calibration complete: Voltage:Nominal=%d,"
		"Calib=%d,margin=%d\n",
		 __func__, voltdm->name, volt_data->volt_nominal,
		 volt_data->volt_calibrated, volt_data->volt_margin);
	/*
	 * TODO: Setup my wakeup voltage to allow immediate going to OFF and
	 * on - Pending twl and voltage layer cleanups.
	 * This is necessary, as this is not done as part of regular
	 * Dvfs flow.
	 * vc_setup_on_voltage(voltdm, volt_data->volt_calibrated);
	 */
	work_data->work_active = false;
	mutex_unlock(&omap_dvfs_lock);
}
Exemplo n.º 17
0
/*
 * Attempt to establish a channel connection to a remote partition.
 */
static enum xp_retval
xpc_connect_channel(struct xpc_channel *ch)
{
	unsigned long irq_flags;
	struct xpc_registration *registration = &xpc_registrations[ch->number];

	if (mutex_trylock(&registration->mutex) == 0)
		return xpRetry;

	if (!XPC_CHANNEL_REGISTERED(ch->number)) {
		mutex_unlock(&registration->mutex);
		return xpUnregistered;
	}

	spin_lock_irqsave(&ch->lock, irq_flags);

	DBUG_ON(ch->flags & XPC_C_CONNECTED);
	DBUG_ON(ch->flags & XPC_C_OPENREQUEST);

	if (ch->flags & XPC_C_DISCONNECTING) {
		spin_unlock_irqrestore(&ch->lock, irq_flags);
		mutex_unlock(&registration->mutex);
		return ch->reason;
	}

	/* add info from the channel connect registration to the channel */

	ch->kthreads_assigned_limit = registration->assigned_limit;
	ch->kthreads_idle_limit = registration->idle_limit;
	DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
	DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
	DBUG_ON(atomic_read(&ch->kthreads_active) != 0);

	ch->func = registration->func;
	DBUG_ON(registration->func == NULL);
	ch->key = registration->key;

	ch->local_nentries = registration->nentries;

	if (ch->flags & XPC_C_ROPENREQUEST) {
		if (registration->entry_size != ch->entry_size) {
			/* the local and remote sides aren't the same */

			/*
			 * Because XPC_DISCONNECT_CHANNEL() can block we're
			 * forced to up the registration sema before we unlock
			 * the channel lock. But that's okay here because we're
			 * done with the part that required the registration
			 * sema. XPC_DISCONNECT_CHANNEL() requires that the
			 * channel lock be locked and will unlock and relock
			 * the channel lock as needed.
			 */
			mutex_unlock(&registration->mutex);
			XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
					       &irq_flags);
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return xpUnequalMsgSizes;
		}
	} else {
		ch->entry_size = registration->entry_size;

		XPC_SET_REASON(ch, 0, 0);
		ch->flags &= ~XPC_C_DISCONNECTED;

		atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
	}

	mutex_unlock(&registration->mutex);

	/* initiate the connection */

	ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
	xpc_send_chctl_openrequest(ch, &irq_flags);

	xpc_process_connect(ch, &irq_flags);

	spin_unlock_irqrestore(&ch->lock, irq_flags);

	return xpSuccess;
}
Exemplo n.º 18
0
s32 LinuxTryLockMutex(struct mutex *psPVRSRVMutex)
{
    return mutex_trylock(psPVRSRVMutex);
}
int
xfs_qm_dqlock_nowait(
	xfs_dquot_t *dqp)
{
	return (mutex_trylock(&((dqp)->q_qlock)));
}
Exemplo n.º 20
0
static void tpd_suspend(struct early_suspend *h)
 {
	int ret = 0;
	int iRetry = 5;
	const char data = 0x3;
	
	//release all touch points
    input_report_key(tpd->dev, BTN_TOUCH, 0);
	input_mt_sync(tpd->dev);
	input_sync(tpd->dev);

//NEGIN <touch panel> <DATE20130831> <tp proximity> zhangxiaofei	
#ifdef TPD_PROXIMITY
	if (tpd_proximity_flag == 1)
	{
		tpd_proximity_flag_one = 1;	
		return;
	}
#endif	
//END <touch panel> <DATE20130831> <tp proximity> zhangxiaofei
	if ( g_pts ){
		 CTP_DBG("TPD enter sleep\n");
		if (atomic_read(&g_pts->isp_opened)){
			CTP_DBG("isp is already opened.");
			return;
		}

		mt_eint_mask(CUST_EINT_TOUCH_PANEL_NUM);
		//mutex_lock(&g_pts->mutex);//Unlock on resume
		mutex_trylock(&g_pts->mutex);//Unlock on resume
		 
#ifdef CONFIG_TOUCHSCREEN_FT5X05_DISABLE_KEY_WHEN_SLIDE
		fts_6x06_key_cancel();
#endif

#ifdef CONFIG_TOUCHSCREEN_POWER_DOWN_WHEN_SLEEP
	mt_set_gpio_mode(GPIO_CTP_RST_PIN, GPIO_CTP_RST_PIN_M_GPIO);
	mt_set_gpio_dir(GPIO_CTP_RST_PIN, GPIO_DIR_OUT);
	mt_set_gpio_out(GPIO_CTP_RST_PIN, GPIO_OUT_ZERO);
	msleep(2);
#ifdef MT6577
	hwPowerDown(MT65XX_POWER_LDO_VGP2, "touch"); 
#else
    hwPowerDown(MT65XX_POWER_LDO_VGP2, "touch"); 
#endif	
#else //!CONFIG_TOUCHSCREEN_POWER_DOWN_WHEN_SLEEP
		//make sure the WakeUp is high before it enter sleep mode, 
		//otherwise the touch can't be resumed.
		//mt_set_gpio_mode(GPIO_CTP_EN_PIN, GPIO_CTP_EN_PIN_M_GPIO);
		//mt_set_gpio_dir(GPIO_CTP_EN_PIN, GPIO_DIR_OUT);
		//mt_set_gpio_out(GPIO_CTP_EN_PIN, GPIO_OUT_ONE);
		//msleep(1);  

		while (iRetry) {
			ret = i2c_smbus_write_i2c_block_data(g_pts->client, 0xA5, 1, &data);  //TP enter sleep mode
			if ( ret < 0 ){
				TPD_DMESG("Enter sleep mode is %d\n", ret);
#ifdef MT6577
				hwPowerDown(MT65XX_POWER_LDO_VGP2, "touch"); 
#else
                //hwPowerDown(MT65XX_POWER_LDO_VGP2, "touch"); 
				//mt_set_gpio_mode(GPIO_CTP_EN_PIN, GPIO_CTP_EN_PIN_M_GPIO);
				//mt_set_gpio_dir(GPIO_CTP_EN_PIN, GPIO_DIR_OUT);
				//mt_set_gpio_out(GPIO_CTP_EN_PIN, GPIO_OUT_ZERO);
#endif	
				msleep(2);  
				fts_6x06_hw_init();
			}else{
				break;
			}
			iRetry--;
			msleep(100);  
		} 
#endif//CONFIG_TOUCHSCREEN_POWER_DOWN_WHEN_SLEEP
#if 0//Android 4.0 don't need to report these up events.
		ft6x06_complete_unfinished_event();
#endif
		atomic_set( &g_pts->ts_sleepState, 1 );
	}
 } 
Exemplo n.º 21
0
static int snapshot_ioctl(struct inode *inode, struct file *filp,
                          unsigned int cmd, unsigned long arg)
{
	int error = 0;
	struct snapshot_data *data;
	loff_t avail;
	sector_t offset;

	if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
		return -ENOTTY;
	if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR)
		return -ENOTTY;
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	data = filp->private_data;

	switch (cmd) {

	case SNAPSHOT_FREEZE:
		if (data->frozen)
			break;
		mutex_lock(&pm_mutex);
		if (freeze_processes()) {
			thaw_processes();
			error = -EBUSY;
		}
		mutex_unlock(&pm_mutex);
		if (!error)
			data->frozen = 1;
		break;

	case SNAPSHOT_UNFREEZE:
		if (!data->frozen || data->ready)
			break;
		mutex_lock(&pm_mutex);
		thaw_processes();
		mutex_unlock(&pm_mutex);
		data->frozen = 0;
		break;

	case SNAPSHOT_ATOMIC_SNAPSHOT:
		if (data->mode != O_RDONLY || !data->frozen  || data->ready) {
			error = -EPERM;
			break;
		}
		error = snapshot_suspend(data->platform_suspend);
		if (!error)
			error = put_user(in_suspend, (unsigned int __user *)arg);
		if (!error)
			data->ready = 1;
		break;

	case SNAPSHOT_ATOMIC_RESTORE:
		snapshot_write_finalize(&data->handle);
		if (data->mode != O_WRONLY || !data->frozen ||
		    !snapshot_image_loaded(&data->handle)) {
			error = -EPERM;
			break;
		}
		error = snapshot_restore(data->platform_suspend);
		break;

	case SNAPSHOT_FREE:
		swsusp_free();
		memset(&data->handle, 0, sizeof(struct snapshot_handle));
		data->ready = 0;
		break;

	case SNAPSHOT_SET_IMAGE_SIZE:
		image_size = arg;
		break;

	case SNAPSHOT_AVAIL_SWAP:
		avail = count_swap_pages(data->swap, 1);
		avail <<= PAGE_SHIFT;
		error = put_user(avail, (loff_t __user *)arg);
		break;

	case SNAPSHOT_GET_SWAP_PAGE:
		if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
			error = -ENODEV;
			break;
		}
		offset = alloc_swapdev_block(data->swap);
		if (offset) {
			offset <<= PAGE_SHIFT;
			error = put_user(offset, (sector_t __user *)arg);
		} else {
			error = -ENOSPC;
		}
		break;

	case SNAPSHOT_FREE_SWAP_PAGES:
		if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
			error = -ENODEV;
			break;
		}
		free_all_swap_pages(data->swap);
		break;

	case SNAPSHOT_SET_SWAP_FILE:
		if (!swsusp_swap_in_use()) {
			/*
			 * User space encodes device types as two-byte values,
			 * so we need to recode them
			 */
			if (old_decode_dev(arg)) {
				data->swap = swap_type_of(old_decode_dev(arg),
							0, NULL);
				if (data->swap < 0)
					error = -ENODEV;
			} else {
				data->swap = -1;
				error = -EINVAL;
			}
		} else {
			error = -EPERM;
		}
		break;

	case SNAPSHOT_S2RAM:
		if (!pm_ops) {
			error = -ENOSYS;
			break;
		}

		if (!data->frozen) {
			error = -EPERM;
			break;
		}

		if (!mutex_trylock(&pm_mutex)) {
			error = -EBUSY;
			break;
		}

		if (pm_ops->prepare) {
			error = pm_ops->prepare(PM_SUSPEND_MEM);
			if (error)
				goto OutS3;
		}

		/* Put devices to sleep */
		suspend_console();
		error = device_suspend(PMSG_SUSPEND);
		if (error) {
			printk(KERN_ERR "Failed to suspend some devices.\n");
		} else {
			error = disable_nonboot_cpus();
			if (!error) {
				/* Enter S3, system is already frozen */
				suspend_enter(PM_SUSPEND_MEM);
				enable_nonboot_cpus();
			}
			/* Wake up devices */
			device_resume();
		}
		resume_console();
		if (pm_ops->finish)
			pm_ops->finish(PM_SUSPEND_MEM);

 OutS3:
		mutex_unlock(&pm_mutex);
		break;

	case SNAPSHOT_PMOPS:
		error = -EINVAL;

		switch (arg) {

		case PMOPS_PREPARE:
			if (hibernation_ops) {
				data->platform_suspend = 1;
				error = 0;
			} else {
				error = -ENOSYS;
			}
			break;

		case PMOPS_ENTER:
			if (data->platform_suspend) {
				kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
				error = hibernation_ops->enter();
			}
			break;

		case PMOPS_FINISH:
			if (data->platform_suspend)
				error = 0;

			break;

		default:
			printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);

		}
		break;

	case SNAPSHOT_SET_SWAP_AREA:
		if (swsusp_swap_in_use()) {
			error = -EPERM;
		} else {
			struct resume_swap_area swap_area;
			dev_t swdev;

			error = copy_from_user(&swap_area, (void __user *)arg,
					sizeof(struct resume_swap_area));
			if (error) {
				error = -EFAULT;
				break;
			}

			/*
			 * User space encodes device types as two-byte values,
			 * so we need to recode them
			 */
			swdev = old_decode_dev(swap_area.dev);
			if (swdev) {
				offset = swap_area.offset;
				data->swap = swap_type_of(swdev, offset, NULL);
				if (data->swap < 0)
					error = -ENODEV;
			} else {
				data->swap = -1;
				error = -EINVAL;
			}
		}
		break;

	default:
		error = -ENOTTY;

	}

	return error;
}
Exemplo n.º 22
0
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
							unsigned long arg)
{
	int error = 0;
	struct snapshot_data *data;
	loff_t size;
	sector_t offset;

	if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
		return -ENOTTY;
	if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR)
		return -ENOTTY;
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (!mutex_trylock(&pm_mutex))
		return -EBUSY;

	data = filp->private_data;

	switch (cmd) {

	case SNAPSHOT_FREEZE:
		if (data->frozen)
			break;

		printk("Syncing filesystems ... ");
		sys_sync();
		printk("done.\n");

		error = usermodehelper_disable();
		if (error)
			break;

		error = freeze_processes();
		if (error) {
			thaw_processes();
			usermodehelper_enable();
		}
		if (!error)
			data->frozen = 1;
		break;

	case SNAPSHOT_UNFREEZE:
		if (!data->frozen || data->ready)
			break;
		thaw_processes();
		usermodehelper_enable();
		data->frozen = 0;
		break;

	case SNAPSHOT_CREATE_IMAGE:
	case SNAPSHOT_ATOMIC_SNAPSHOT:
		if (data->mode != O_RDONLY || !data->frozen  || data->ready) {
			error = -EPERM;
			break;
		}
		error = hibernation_snapshot(data->platform_support);
		if (!error)
			error = put_user(in_suspend, (int __user *)arg);
		if (!error)
			data->ready = 1;
		break;

	case SNAPSHOT_ATOMIC_RESTORE:
		snapshot_write_finalize(&data->handle);
		if (data->mode != O_WRONLY || !data->frozen ||
		    !snapshot_image_loaded(&data->handle)) {
			error = -EPERM;
			break;
		}
		error = hibernation_restore(data->platform_support);
		break;

	case SNAPSHOT_FREE:
		swsusp_free();
		memset(&data->handle, 0, sizeof(struct snapshot_handle));
		data->ready = 0;
		break;

	case SNAPSHOT_PREF_IMAGE_SIZE:
	case SNAPSHOT_SET_IMAGE_SIZE:
		image_size = arg;
		break;

	case SNAPSHOT_GET_IMAGE_SIZE:
		if (!data->ready) {
			error = -ENODATA;
			break;
		}
		size = snapshot_get_image_size();
		size <<= PAGE_SHIFT;
		error = put_user(size, (loff_t __user *)arg);
		break;

	case SNAPSHOT_AVAIL_SWAP_SIZE:
	case SNAPSHOT_AVAIL_SWAP:
		size = count_swap_pages(data->swap, 1);
		size <<= PAGE_SHIFT;
		error = put_user(size, (loff_t __user *)arg);
		break;

	case SNAPSHOT_ALLOC_SWAP_PAGE:
	case SNAPSHOT_GET_SWAP_PAGE:
		if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
			error = -ENODEV;
			break;
		}
		offset = alloc_swapdev_block(data->swap);
		if (offset) {
			offset <<= PAGE_SHIFT;
			error = put_user(offset, (loff_t __user *)arg);
		} else {
			error = -ENOSPC;
		}
		break;

	case SNAPSHOT_FREE_SWAP_PAGES:
		if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
			error = -ENODEV;
			break;
		}
		free_all_swap_pages(data->swap);
		break;

	case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
		if (!swsusp_swap_in_use()) {
			/*
			 * User space encodes device types as two-byte values,
			 * so we need to recode them
			 */
			if (old_decode_dev(arg)) {
				data->swap = swap_type_of(old_decode_dev(arg),
							0, NULL);
				if (data->swap < 0)
					error = -ENODEV;
			} else {
				data->swap = -1;
				error = -EINVAL;
			}
		} else {
			error = -EPERM;
		}
		break;

	case SNAPSHOT_S2RAM:
		if (!data->frozen) {
			error = -EPERM;
			break;
		}
		/*
		 * Tasks are frozen and the notifiers have been called with
		 * PM_HIBERNATION_PREPARE
		 */
		error = suspend_devices_and_enter(PM_SUSPEND_MEM);
		break;

	case SNAPSHOT_PLATFORM_SUPPORT:
		data->platform_support = !!arg;
		break;

	case SNAPSHOT_POWER_OFF:
		if (data->platform_support)
			error = hibernation_platform_enter();
		break;

	case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
		error = -EINVAL;

		switch (arg) {

		case PMOPS_PREPARE:
			data->platform_support = 1;
			error = 0;
			break;

		case PMOPS_ENTER:
			if (data->platform_support)
				error = hibernation_platform_enter();
			break;

		case PMOPS_FINISH:
			if (data->platform_support)
				error = 0;
			break;

		default:
			printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);

		}
		break;

	case SNAPSHOT_SET_SWAP_AREA:
		if (swsusp_swap_in_use()) {
			error = -EPERM;
		} else {
			struct resume_swap_area swap_area;
			dev_t swdev;

			error = copy_from_user(&swap_area, (void __user *)arg,
					sizeof(struct resume_swap_area));
			if (error) {
				error = -EFAULT;
				break;
			}

			/*
			 * User space encodes device types as two-byte values,
			 * so we need to recode them
			 */
			swdev = old_decode_dev(swap_area.dev);
			if (swdev) {
				offset = swap_area.offset;
				data->swap = swap_type_of(swdev, offset, NULL);
				if (data->swap < 0)
					error = -ENODEV;
			} else {
				data->swap = -1;
				error = -EINVAL;
			}
		}
		break;

	default:
		error = -ENOTTY;

	}

	mutex_unlock(&pm_mutex);

	return error;
}
Exemplo n.º 23
0
int
xfs_reclaim_inodes_ag(
	struct xfs_mount	*mp,
	int			flags,
	int			*nr_to_scan)
{
	struct xfs_perag	*pag;
	int			error = 0;
	int			last_error = 0;
	xfs_agnumber_t		ag;
	int			trylock = flags & SYNC_TRYLOCK;
	int			skipped;

restart:
	ag = 0;
	skipped = 0;
	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
		unsigned long	first_index = 0;
		int		done = 0;
		int		nr_found = 0;

		ag = pag->pag_agno + 1;

		if (trylock) {
			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
				skipped++;
				xfs_perag_put(pag);
				continue;
			}
			first_index = pag->pag_ici_reclaim_cursor;
		} else
			mutex_lock(&pag->pag_ici_reclaim_lock);

		do {
			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
			int	i;

			rcu_read_lock();
			nr_found = radix_tree_gang_lookup_tag(
					&pag->pag_ici_root,
					(void **)batch, first_index,
					XFS_LOOKUP_BATCH,
					XFS_ICI_RECLAIM_TAG);
			if (!nr_found) {
				done = 1;
				rcu_read_unlock();
				break;
			}

			for (i = 0; i < nr_found; i++) {
				struct xfs_inode *ip = batch[i];

				if (done || xfs_reclaim_inode_grab(ip, flags))
					batch[i] = NULL;

				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
								pag->pag_agno)
					continue;
				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
					done = 1;
			}

			
			rcu_read_unlock();

			for (i = 0; i < nr_found; i++) {
				if (!batch[i])
					continue;
				error = xfs_reclaim_inode(batch[i], pag, flags);
				if (error && last_error != EFSCORRUPTED)
					last_error = error;
			}

			*nr_to_scan -= XFS_LOOKUP_BATCH;

			cond_resched();

		} while (nr_found && !done && *nr_to_scan > 0);

		if (trylock && !done)
			pag->pag_ici_reclaim_cursor = first_index;
		else
			pag->pag_ici_reclaim_cursor = 0;
		mutex_unlock(&pag->pag_ici_reclaim_lock);
		xfs_perag_put(pag);
	}

	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
		trylock = 0;
		goto restart;
	}
	return XFS_ERROR(last_error);
}
Exemplo n.º 24
0
static int intel_runtime_suspend(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
		return -ENODEV;

	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

	DRM_DEBUG_KMS("Suspending device\n");

	/*
	 * We could deadlock here in case another thread holding struct_mutex
	 * calls RPM suspend concurrently, since the RPM suspend will wait
	 * first for this RPM suspend to finish. In this case the concurrent
	 * RPM resume will be followed by its RPM suspend counterpart. Still
	 * for consistency return -EAGAIN, which will reschedule this suspend.
	 */
	if (!mutex_trylock(&dev->struct_mutex)) {
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
		/*
		 * Bump the expiration timestamp, otherwise the suspend won't
		 * be rescheduled.
		 */
		pm_runtime_mark_last_busy(device);

		return -EAGAIN;
	}

	disable_rpm_wakeref_asserts(dev_priv);

	/*
	 * We are safe here against re-faults, since the fault handler takes
	 * an RPM reference.
	 */
	i915_gem_release_all_mmaps(dev_priv);
	mutex_unlock(&dev->struct_mutex);

	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

	intel_guc_suspend(dev);

	intel_suspend_gt_powersave(dev);
	intel_runtime_pm_disable_interrupts(dev_priv);

	ret = intel_suspend_complete(dev_priv);
	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
		intel_runtime_pm_enable_interrupts(dev_priv);

		enable_rpm_wakeref_asserts(dev_priv);

		return ret;
	}

	intel_uncore_forcewake_reset(dev, false);

	enable_rpm_wakeref_asserts(dev_priv);
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));

	if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
		DRM_ERROR("Unclaimed access detected prior to suspending\n");

	dev_priv->pm.suspended = true;

	/*
	 * FIXME: We really should find a document that references the arguments
	 * used below!
	 */
	if (IS_BROADWELL(dev)) {
		/*
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
		 * being detected, and the call we do at intel_runtime_resume()
		 * won't be able to restore them. Since PCI_D3hot matches the
		 * actual specification and appears to be working, use it.
		 */
		intel_opregion_notify_adapter(dev, PCI_D3hot);
	} else {
		/*
		 * current versions of firmware which depend on this opregion
		 * notification have repurposed the D1 definition to mean
		 * "runtime suspended" vs. what you would normally expect (D3)
		 * to distinguish it from notifications that might be sent via
		 * the suspend path.
		 */
		intel_opregion_notify_adapter(dev, PCI_D1);
	}

	assert_forcewakes_inactive(dev_priv);

	DRM_DEBUG_KMS("Device suspended\n");
	return 0;
}
Exemplo n.º 25
0
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
			 unsigned char __user *buf, size_t nr)
{
	unsigned char __user *b = buf;
	DECLARE_WAITQUEUE(wait, current);
	int c;
	int minimum, time;
	ssize_t retval = 0;
	ssize_t size;
	long timeout;
	unsigned long flags;
	int packet;

do_it_again:

	BUG_ON(!tty->read_buf);

	c = job_control(tty, file);
	if (c < 0)
		return c;

	minimum = time = 0;
	timeout = MAX_SCHEDULE_TIMEOUT;
	if (!tty->icanon) {
		time = (HZ / 10) * TIME_CHAR(tty);
		minimum = MIN_CHAR(tty);
		if (minimum) {
			if (time)
				tty->minimum_to_wake = 1;
			else if (!waitqueue_active(&tty->read_wait) ||
				 (tty->minimum_to_wake > minimum))
				tty->minimum_to_wake = minimum;
		} else {
			timeout = 0;
			if (time) {
				timeout = time;
				time = 0;
			}
			tty->minimum_to_wake = minimum = 1;
		}
	}

	if (file->f_flags & O_NONBLOCK) {
		if (!mutex_trylock(&tty->atomic_read_lock))
			return -EAGAIN;
	} else {
		if (mutex_lock_interruptible(&tty->atomic_read_lock))
			return -ERESTARTSYS;
	}
	packet = tty->packet;

	add_wait_queue(&tty->read_wait, &wait);
	while (nr) {
		
		if (packet && tty->link->ctrl_status) {
			unsigned char cs;
			if (b != buf)
				break;
			spin_lock_irqsave(&tty->link->ctrl_lock, flags);
			cs = tty->link->ctrl_status;
			tty->link->ctrl_status = 0;
			spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
			if (tty_put_user(tty, cs, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
			break;
		}
		set_current_state(TASK_INTERRUPTIBLE);

		if (((minimum - (b - buf)) < tty->minimum_to_wake) &&
		    ((minimum - (b - buf)) >= 1))
			tty->minimum_to_wake = (minimum - (b - buf));

		if (!input_available_p(tty, 0)) {
			if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
				retval = -EIO;
				break;
			}
			if (tty_hung_up_p(file))
				break;
			if (!timeout)
				break;
			if (file->f_flags & O_NONBLOCK) {
				retval = -EAGAIN;
				break;
			}
			if (signal_pending(current)) {
				retval = -ERESTARTSYS;
				break;
			}
			n_tty_set_room(tty);
			timeout = schedule_timeout(timeout);
			BUG_ON(!tty->read_buf);
			continue;
		}
		__set_current_state(TASK_RUNNING);

		
		if (packet && b == buf) {
			if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
		}

		if (tty->icanon && !L_EXTPROC(tty)) {
			
			while (nr && tty->read_cnt) {
				int eol;

				eol = test_and_clear_bit(tty->read_tail,
						tty->read_flags);
				c = tty->read_buf[tty->read_tail];
				spin_lock_irqsave(&tty->read_lock, flags);
				tty->read_tail = ((tty->read_tail+1) &
						  (N_TTY_BUF_SIZE-1));
				tty->read_cnt--;
				if (eol) {
					if (--tty->canon_data < 0)
						tty->canon_data = 0;
				}
				spin_unlock_irqrestore(&tty->read_lock, flags);

				if (!eol || (c != __DISABLED_CHAR)) {
					if (tty_put_user(tty, c, b++)) {
						retval = -EFAULT;
						b--;
						break;
					}
					nr--;
				}
				if (eol) {
					tty_audit_push(tty);
					break;
				}
			}
			if (retval)
				break;
		} else {
			int uncopied;
			uncopied = copy_from_read_buf(tty, &b, &nr);
			uncopied += copy_from_read_buf(tty, &b, &nr);
			if (uncopied) {
				retval = -EFAULT;
				break;
			}
		}

		if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
			n_tty_set_room(tty);
			check_unthrottle(tty);
		}

		if (b - buf >= minimum)
			break;
		if (time)
			timeout = time;
	}
	mutex_unlock(&tty->atomic_read_lock);
	remove_wait_queue(&tty->read_wait, &wait);

	if (!waitqueue_active(&tty->read_wait))
		tty->minimum_to_wake = minimum;

	__set_current_state(TASK_RUNNING);
	size = b - buf;
	if (size) {
		retval = size;
		if (nr)
			clear_bit(TTY_PUSH, &tty->flags);
	} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
		goto do_it_again;

	n_tty_set_room(tty);
	return retval;
}
Exemplo n.º 26
0
static bool bitforce_get_temp(struct cgpu_info *bitforce)
{
	char buf[BITFORCE_BUFSIZ+1];
	int err, amount;
	char *s;

	/* Do not try to get the temperature if we're polling for a result to
	 * minimise the chance of interleaved results */
	if (bitforce->polling)
		return true;

	// Flash instead of Temp - doing both can be too slow
	if (bitforce->flash_led) {
		bitforce_flash_led(bitforce);
 		return true;
	}

	/* It is not critical getting temperature so don't get stuck if we
	 * can't grab the mutex here */
	if (mutex_trylock(&bitforce->device_mutex))
		return false;

	if ((err = usb_write(bitforce, BITFORCE_TEMPERATURE, BITFORCE_TEMPERATURE_LEN, &amount, C_REQUESTTEMPERATURE)) < 0 || amount != BITFORCE_TEMPERATURE_LEN) {
		mutex_unlock(&bitforce->device_mutex);
		applog(LOG_ERR, "%s%i: Error: Request temp invalid/timed out (%d:%d)",
				bitforce->drv->name, bitforce->device_id, amount, err);
		bitforce->hw_errors++;
		return false;
	}

	if ((err = usb_ftdi_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_GETTEMPERATURE)) < 0 || amount < 1) {
		mutex_unlock(&bitforce->device_mutex);
		if (err < 0) {
			applog(LOG_ERR, "%s%i: Error: Get temp return invalid/timed out (%d:%d)",
					bitforce->drv->name, bitforce->device_id, amount, err);
		} else {
			applog(LOG_ERR, "%s%i: Error: Get temp returned nothing (%d:%d)",
					bitforce->drv->name, bitforce->device_id, amount, err);
		}
		bitforce->hw_errors++;
		return false;
	}

	mutex_unlock(&bitforce->device_mutex);
	
	if ((!strncasecmp(buf, "TEMP", 4)) && (s = strchr(buf + 4, ':'))) {
		float temp = strtof(s + 1, NULL);

		/* Cope with older software  that breaks and reads nonsense
		 * values */
		if (temp > 100)
			temp = strtod(s + 1, NULL);

		if (temp > 0) {
			bitforce->temp = temp;
			if (unlikely(bitforce->cutofftemp > 0 && temp > bitforce->cutofftemp)) {
				applog(LOG_WARNING, "%s%i: Hit thermal cutoff limit, disabling!",
							bitforce->drv->name, bitforce->device_id);
				bitforce->deven = DEV_RECOVER;
				dev_error(bitforce, REASON_DEV_THERMAL_CUTOFF);
			}
		}
	} else {
		/* Use the temperature monitor as a kind of watchdog for when
		 * our responses are out of sync and flush the buffer to
		 * hopefully recover */
		applog(LOG_WARNING, "%s%i: Garbled response probably throttling, clearing buffer",
					bitforce->drv->name, bitforce->device_id);
		dev_error(bitforce, REASON_DEV_THROTTLE);
		/* Count throttling episodes as hardware errors */
		bitforce->hw_errors++;
		bitforce_initialise(bitforce, true);
		return false;
	}

	return true;
}
Exemplo n.º 27
0
static int
poweroff(char *msg, char **cmd_argv)
{
	struct stat	statbuf;
	pid_t		pid, child;
	struct passwd	*pwd;
	char		*home, *user;
	char		ehome[] = "HOME=";
	char		euser[] = "LOGNAME=";
	int		status;
	char		**ca;

	if (mutex_trylock(&poweroff_mutex) != 0)
		return (0);

	if (stat("/dev/console", &statbuf) == -1 ||
	    (pwd = getpwuid(statbuf.st_uid)) == NULL) {
		mutex_unlock(&poweroff_mutex);
		return (1);
	}

	if (msg)
		syslog(LOG_NOTICE, msg);

	if (*cmd_argv == NULL) {
		logerror("No command to run.");
		mutex_unlock(&poweroff_mutex);
		return (1);
	}

	home = malloc(strlen(pwd->pw_dir) + sizeof (ehome));
	user = malloc(strlen(pwd->pw_name) + sizeof (euser));
	if (home == NULL || user == NULL) {
		free(home);
		free(user);
		logerror("No memory.");
		mutex_unlock(&poweroff_mutex);
		return (1);
	}
	(void) strcpy(home, ehome);
	(void) strcat(home, pwd->pw_dir);
	(void) strcpy(user, euser);
	(void) strcat(user, pwd->pw_name);

	/*
	 * Need to simulate the user enviroment, minimaly set HOME, and USER.
	 */
	if ((child = fork1()) == 0) {
		(void) putenv(home);
		(void) putenv(user);
		(void) setgid(pwd->pw_gid);
		(void) setuid(pwd->pw_uid);

		/*
		 * check for shutdown flag and set environment
		 */
		for (ca = cmd_argv; *ca; ca++) {
			if (strcmp("-h", *ca) == 0) {
				(void) putenv("SYSSUSPENDDODEFAULT=");
				break;
			}
		}

		(void) execv(cmd_argv[0], cmd_argv);
		exit(EXIT_FAILURE);
	} else {
		free(home);
		free(user);
		if (child == -1) {
			mutex_unlock(&poweroff_mutex);
			return (1);
		}
	}
	pid = 0;
	while (pid != child)
		pid = wait(&status);
	if (WEXITSTATUS(status)) {
		(void) syslog(LOG_ERR, "Failed to exec \"%s\".", cmd_argv[0]);
		mutex_unlock(&poweroff_mutex);
		return (1);
	}

	mutex_unlock(&poweroff_mutex);
	return (0);
}
Exemplo n.º 28
0
/*
 * this adds all existing backrefs (inline backrefs, backrefs and delayed
 * refs) for the given bytenr to the refs list, merges duplicates and resolves
 * indirect refs to their parent bytenr.
 * When roots are found, they're added to the roots list
 *
 * FIXME some caching might speed things up
 */
static int find_parent_nodes(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info, u64 bytenr,
			     u64 time_seq, struct ulist *refs,
			     struct ulist *roots, const u64 *extent_item_pos)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_delayed_ref_root *delayed_refs = NULL;
	struct btrfs_delayed_ref_head *head;
	int info_level = 0;
	int ret;
	struct list_head prefs_delayed;
	struct list_head prefs;
	struct __prelim_ref *ref;

	INIT_LIST_HEAD(&prefs);
	INIT_LIST_HEAD(&prefs_delayed);

	key.objectid = bytenr;
	key.offset = (u64)-1;
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	if (!trans)
		path->search_commit_root = 1;

	/*
	 * grab both a lock on the path and a lock on the delayed ref head.
	 * We need both to get a consistent picture of how the refs look
	 * at a specified point in time
	 */
again:
	head = NULL;

	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);

	if (trans) {
		/*
		 * look if there are updates for this ref queued and lock the
		 * head
		 */
		delayed_refs = &trans->transaction->delayed_refs;
		spin_lock(&delayed_refs->lock);
		head = btrfs_find_delayed_ref_head(trans, bytenr);
		if (head) {
			if (!mutex_trylock(&head->mutex)) {
				atomic_inc(&head->node.refs);
				spin_unlock(&delayed_refs->lock);

				btrfs_release_path(path);

				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);
				btrfs_put_delayed_ref(&head->node);
				goto again;
			}
			ret = __add_delayed_refs(head, time_seq,
						 &prefs_delayed);
			mutex_unlock(&head->mutex);
			if (ret) {
				spin_unlock(&delayed_refs->lock);
				goto out;
			}
		}
		spin_unlock(&delayed_refs->lock);
	}

	if (path->slots[0]) {
		struct extent_buffer *leaf;
		int slot;

		path->slots[0]--;
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == bytenr &&
		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
		     key.type == BTRFS_METADATA_ITEM_KEY)) {
			ret = __add_inline_refs(fs_info, path, bytenr,
						&info_level, &prefs);
			if (ret)
				goto out;
			ret = __add_keyed_refs(fs_info, path, bytenr,
					       info_level, &prefs);
			if (ret)
				goto out;
		}
	}
	btrfs_release_path(path);

	list_splice_init(&prefs_delayed, &prefs);

	ret = __add_missing_keys(fs_info, &prefs);
	if (ret)
		goto out;

	__merge_refs(&prefs, 1);

	ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
				      extent_item_pos);
	if (ret)
		goto out;

	__merge_refs(&prefs, 2);

	while (!list_empty(&prefs)) {
		ref = list_first_entry(&prefs, struct __prelim_ref, list);
		list_del(&ref->list);
		WARN_ON(ref->count < 0);
		if (ref->count && ref->root_id && ref->parent == 0) {
			/* no parent == root of tree */
			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
			if (ret < 0)
				goto out;
		}
		if (ref->count && ref->parent) {
			struct extent_inode_elem *eie = NULL;
			if (extent_item_pos && !ref->inode_list) {
				u32 bsz;
				struct extent_buffer *eb;
				bsz = btrfs_level_size(fs_info->extent_root,
							info_level);
				eb = read_tree_block(fs_info->extent_root,
							   ref->parent, bsz, 0);
				if (!eb || !extent_buffer_uptodate(eb)) {
					free_extent_buffer(eb);
					ret = -EIO;
					goto out;
				}
				ret = find_extent_in_eb(eb, bytenr,
							*extent_item_pos, &eie);
				ref->inode_list = eie;
				free_extent_buffer(eb);
			}
			ret = ulist_add_merge(refs, ref->parent,
					      (uintptr_t)ref->inode_list,
					      (u64 *)&eie, GFP_NOFS);
			if (ret < 0)
				goto out;
			if (!ret && extent_item_pos) {
				/*
				 * we've recorded that parent, so we must extend
				 * its inode list here
				 */
				BUG_ON(!eie);
				while (eie->next)
					eie = eie->next;
				eie->next = ref->inode_list;
			}
		}
		kfree(ref);
	}

out:
	btrfs_free_path(path);
	while (!list_empty(&prefs)) {
		ref = list_first_entry(&prefs, struct __prelim_ref, list);
		list_del(&ref->list);
		kfree(ref);
	}
	while (!list_empty(&prefs_delayed)) {
		ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
				       list);
		list_del(&ref->list);
		kfree(ref);
	}

	return ret;
}
Exemplo n.º 29
0
/**
 * i965_reset - reset chip after a hang
 * @dev: drm device to reset
 * @flags: reset domains
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
int i915_reset(struct drm_device *dev, u8 flags)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	/*
	 * We really should only reset the display subsystem if we actually
	 * need to
	 */
	bool need_display = true;
	int ret;

	if (!i915_try_reset)
		return 0;

	if (!mutex_trylock(&dev->struct_mutex))
		return -EBUSY;

	i915_gem_reset(dev);

	ret = -ENODEV;
	if (get_seconds() - dev_priv->last_gpu_reset < 5) {
		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
	} else switch (INTEL_INFO(dev)->gen) {
	case 7:
	case 6:
		ret = gen6_do_reset(dev, flags);
		break;
	case 5:
		ret = ironlake_do_reset(dev, flags);
		break;
	case 4:
		ret = i965_do_reset(dev, flags);
		break;
	case 2:
		ret = i8xx_do_reset(dev, flags);
		break;
	}
	dev_priv->last_gpu_reset = get_seconds();
	if (ret) {
		DRM_ERROR("Failed to reset chip.\n");
		mutex_unlock(&dev->struct_mutex);
		return ret;
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
			!dev_priv->mm.suspended) {
		dev_priv->mm.suspended = 0;

		dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
		if (HAS_BSD(dev))
		    dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
		if (HAS_BLT(dev))
		    dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);

		mutex_unlock(&dev->struct_mutex);
		drm_irq_uninstall(dev);
		drm_mode_config_reset(dev);
		drm_irq_install(dev);
		mutex_lock(&dev->struct_mutex);
	}

	mutex_unlock(&dev->struct_mutex);

	/*
	 * Perform a full modeset as on later generations, e.g. Ironlake, we may
	 * need to retrain the display link and cannot just restore the register
	 * values.
	 */
	if (need_display) {
		mutex_lock(&dev->mode_config.mutex);
		drm_helper_resume_force_mode(dev);
		mutex_unlock(&dev->mode_config.mutex);
	}

	return 0;
}
Exemplo n.º 30
0
static void ocfs2_clear_inode(struct inode *inode)
{
	int status;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);

	clear_inode(inode);
	trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
				inode->i_nlink);

	mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
			"Inode=%lu\n", inode->i_ino);

	dquot_drop(inode);

	/* To preven remote deletes we hold open lock before, now it
	 * is time to unlock PR and EX open locks. */
	ocfs2_open_unlock(inode);

	/* Do these before all the other work so that we don't bounce
	 * the downconvert thread while waiting to destroy the locks. */
	ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres);
	ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres);
	ocfs2_mark_lockres_freeing(&oi->ip_open_lockres);

	ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap,
			   &oi->ip_la_data_resv);
	ocfs2_resv_init_once(&oi->ip_la_data_resv);

	/* We very well may get a clear_inode before all an inodes
	 * metadata has hit disk. Of course, we can't drop any cluster
	 * locks until the journal has finished with it. The only
	 * exception here are successfully wiped inodes - their
	 * metadata can now be considered to be part of the system
	 * inodes from which it came. */
	if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED))
		ocfs2_checkpoint_inode(inode);

	mlog_bug_on_msg(!list_empty(&oi->ip_io_markers),
			"Clear inode of %llu, inode has io markers\n",
			(unsigned long long)oi->ip_blkno);

	ocfs2_extent_map_trunc(inode, 0);

	status = ocfs2_drop_inode_locks(inode);
	if (status < 0)
		mlog_errno(status);

	ocfs2_lock_res_free(&oi->ip_rw_lockres);
	ocfs2_lock_res_free(&oi->ip_inode_lockres);
	ocfs2_lock_res_free(&oi->ip_open_lockres);

	ocfs2_metadata_cache_exit(INODE_CACHE(inode));

	mlog_bug_on_msg(INODE_CACHE(inode)->ci_num_cached,
			"Clear inode of %llu, inode has %u cache items\n",
			(unsigned long long)oi->ip_blkno,
			INODE_CACHE(inode)->ci_num_cached);

	mlog_bug_on_msg(!(INODE_CACHE(inode)->ci_flags & OCFS2_CACHE_FL_INLINE),
			"Clear inode of %llu, inode has a bad flag\n",
			(unsigned long long)oi->ip_blkno);

	mlog_bug_on_msg(spin_is_locked(&oi->ip_lock),
			"Clear inode of %llu, inode is locked\n",
			(unsigned long long)oi->ip_blkno);

	mlog_bug_on_msg(!mutex_trylock(&oi->ip_io_mutex),
			"Clear inode of %llu, io_mutex is locked\n",
			(unsigned long long)oi->ip_blkno);
	mutex_unlock(&oi->ip_io_mutex);

	/*
	 * down_trylock() returns 0, down_write_trylock() returns 1
	 * kernel 1, world 0
	 */
	mlog_bug_on_msg(!down_write_trylock(&oi->ip_alloc_sem),
			"Clear inode of %llu, alloc_sem is locked\n",
			(unsigned long long)oi->ip_blkno);
	up_write(&oi->ip_alloc_sem);

	mlog_bug_on_msg(oi->ip_open_count,
			"Clear inode of %llu has open count %d\n",
			(unsigned long long)oi->ip_blkno, oi->ip_open_count);

	/* Clear all other flags. */
	oi->ip_flags = 0;
	oi->ip_dir_start_lookup = 0;
	oi->ip_blkno = 0ULL;

	/*
	 * ip_jinode is used to track txns against this inode. We ensure that
	 * the journal is flushed before journal shutdown. Thus it is safe to
	 * have inodes get cleaned up after journal shutdown.
	 */
	jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal,
				       &oi->ip_jinode);
}