Пример #1
0
/* Must be called with bo_lock held. */
static void vc4_bo_cache_free_old(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	unsigned long expire_time = jiffies - msecs_to_jiffies(1000);

	while (!list_empty(&vc4->bo_cache.time_list)) {
		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
						    struct vc4_bo, unref_head);
		if (time_before(expire_time, bo->free_time)) {
			mod_timer(&vc4->bo_cache.time_timer,
				  round_jiffies_up(jiffies +
						   msecs_to_jiffies(1000)));
			return;
		}

		vc4_bo_remove_from_cache(bo);
		vc4_bo_destroy(bo);
	}
}
Пример #2
0
static transaction_t *
get_transaction(journal_t *journal, transaction_t *transaction)
{
	transaction->t_journal = journal;
	transaction->t_state = T_RUNNING;
	transaction->t_start_time = ktime_get();
	transaction->t_tid = journal->j_transaction_sequence++;
	transaction->t_expires = jiffies + journal->j_commit_interval;
	spin_lock_init(&transaction->t_handle_lock);

	/* Set up the commit timer for the new transaction. */
	journal->j_commit_timer.expires =
				round_jiffies_up(transaction->t_expires);
	add_timer(&journal->j_commit_timer);

	J_ASSERT(journal->j_running_transaction == NULL);
	journal->j_running_transaction = transaction;

	return transaction;
}
Пример #3
0
static void falcon_stats_request(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	efx_oword_t reg;

	WARN_ON(nic_data->stats_pending);
	WARN_ON(nic_data->stats_disable_count);

	if (nic_data->stats_dma_done == NULL)
		return;	

	*nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
	nic_data->stats_pending = true;
	wmb(); 

	
	EFX_POPULATE_OWORD_2(reg,
			     FRF_AB_MAC_STAT_DMA_CMD, 1,
			     FRF_AB_MAC_STAT_DMA_ADR,
			     efx->stats_buffer.dma_addr);
	efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);

	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
}
Пример #4
0
void enable_debug_timer(struct ssp_data *data)
{
	mod_timer(&data->debug_timer,
		round_jiffies_up(jiffies + SSP_DEBUG_TIMER_SEC));
}
Пример #5
0
/*
 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
 * @sdev: the device to be evaluated.
 * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state
 *
 * Evaluate the Target Port Group State.
 * Returns SCSI_DH_DEV_OFFLINED if the path is
 * found to be unusable.
 */
static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition)
{
	struct scsi_sense_hdr sense_hdr;
	int len, k, off, valid_states = 0;
	unsigned char *ucp;
	unsigned err, retval;
	unsigned long expiry, interval = 0;
	unsigned int tpg_desc_tbl_off;
	unsigned char orig_transition_tmo;

	if (!h->transition_tmo)
		expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ);
	else
		expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ);

 retry:
	retval = submit_rtpg(sdev, h);
	if (retval) {
		if (!scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
					  &sense_hdr)) {
			sdev_printk(KERN_INFO, sdev,
				    "%s: rtpg failed, result %d\n",
				    ALUA_DH_NAME, retval);
			if (driver_byte(retval) == DRIVER_BUSY)
				return SCSI_DH_DEV_TEMP_BUSY;
			return SCSI_DH_IO;
		}

		/*
		 * submit_rtpg() has failed on existing arrays
		 * when requesting extended header info, and
		 * the array doesn't support extended headers,
		 * even though it shouldn't according to T10.
		 * The retry without rtpg_ext_hdr_req set
		 * handles this.
		 */
		if (!(h->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
		    sense_hdr.sense_key == ILLEGAL_REQUEST &&
		    sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
			h->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
			goto retry;
		}
		/*
		 * Retry on ALUA state transition or if any
		 * UNIT ATTENTION occurred.
		 */
		if (sense_hdr.sense_key == NOT_READY &&
		    sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
			err = SCSI_DH_RETRY;
		else if (sense_hdr.sense_key == UNIT_ATTENTION)
			err = SCSI_DH_RETRY;
		if (err == SCSI_DH_RETRY && time_before(jiffies, expiry)) {
			sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
				    ALUA_DH_NAME);
			scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
			goto retry;
		}
		sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
			    ALUA_DH_NAME);
		scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
		return SCSI_DH_IO;
	}

	len = get_unaligned_be32(&h->buff[0]) + 4;

	if (len > h->bufflen) {
		/* Resubmit with the correct length */
		if (realloc_buffer(h, len)) {
			sdev_printk(KERN_WARNING, sdev,
				    "%s: kmalloc buffer failed\n",__func__);
			/* Temporary failure, bypass */
			return SCSI_DH_DEV_TEMP_BUSY;
		}
		goto retry;
	}

	orig_transition_tmo = h->transition_tmo;
	if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0)
		h->transition_tmo = h->buff[5];
	else
		h->transition_tmo = ALUA_FAILOVER_TIMEOUT;

	if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) {
		sdev_printk(KERN_INFO, sdev,
			    "%s: transition timeout set to %d seconds\n",
			    ALUA_DH_NAME, h->transition_tmo);
		expiry = jiffies + h->transition_tmo * HZ;
	}

	if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
		tpg_desc_tbl_off = 8;
	else
		tpg_desc_tbl_off = 4;

	for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off;
	     k < len;
	     k += off, ucp += off) {

		if (h->group_id == get_unaligned_be16(&ucp[2])) {
			h->state = ucp[0] & 0x0f;
			h->pref = ucp[0] >> 7;
			valid_states = ucp[1];
		}
		off = 8 + (ucp[7] * 4);
	}
Пример #6
0
/*
 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
 * @sdev: the device to be evaluated.
 *
 * Evaluate the Target Port Group State.
 * Returns SCSI_DH_DEV_OFFLINED if the path is
 * found to be unuseable.
 */
static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
{
	struct scsi_sense_hdr sense_hdr;
	int len, k, off, valid_states = 0;
	unsigned char *ucp;
	unsigned err;
	unsigned long expiry, interval = 10;

	expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
 retry:
	err = submit_rtpg(sdev, h);

	if (err == SCSI_DH_IO && h->senselen > 0) {
		err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
					   &sense_hdr);
		if (!err)
			return SCSI_DH_IO;

		err = alua_check_sense(sdev, &sense_hdr);
		if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry))
			goto retry;
		sdev_printk(KERN_INFO, sdev,
			    "%s: rtpg sense code %02x/%02x/%02x\n",
			    ALUA_DH_NAME, sense_hdr.sense_key,
			    sense_hdr.asc, sense_hdr.ascq);
		err = SCSI_DH_IO;
	}
	if (err != SCSI_DH_OK)
		return err;

	len = (h->buff[0] << 24) + (h->buff[1] << 16) +
		(h->buff[2] << 8) + h->buff[3] + 4;

	if (len > h->bufflen) {
		/* Resubmit with the correct length */
		if (realloc_buffer(h, len)) {
			sdev_printk(KERN_WARNING, sdev,
				    "%s: kmalloc buffer failed\n",__func__);
			/* Temporary failure, bypass */
			return SCSI_DH_DEV_TEMP_BUSY;
		}
		goto retry;
	}

	for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
		if (h->group_id == (ucp[2] << 8) + ucp[3]) {
			h->state = ucp[0] & 0x0f;
			valid_states = ucp[1];
		}
		off = 8 + (ucp[7] * 4);
	}

	sdev_printk(KERN_INFO, sdev,
		    "%s: port group %02x state %c supports %c%c%c%c%c%c%c\n",
		    ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
		    valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
		    valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
		    valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
		    valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
		    valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
		    valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
		    valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');

	switch (h->state) {
	case TPGS_STATE_TRANSITIONING:
		if (time_before(jiffies, expiry)) {
			/* State transition, retry */
			interval *= 10;
			msleep(interval);
			goto retry;
		}
		/* Transitioning time exceeded, set port to standby */
		err = SCSI_DH_RETRY;
		h->state = TPGS_STATE_STANDBY;
		break;
	case TPGS_STATE_OFFLINE:
	case TPGS_STATE_UNAVAILABLE:
		/* Path unuseable for unavailable/offline */
		err = SCSI_DH_DEV_OFFLINED;
		break;
	default:
		/* Useable path if active */
		err = SCSI_DH_OK;
		break;
	}
	return err;
}
static void hangcheck_timer_reset(struct msm_gpu *gpu)
{
	DBG("%s", gpu->name);
	mod_timer(&gpu->hangcheck_timer,
			round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
}
Пример #8
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err;
	bool retry = true, need_fsck = false;
	char *options = NULL;
	int recovery, i;

try_onemore:
	err = -EINVAL;
	raw_super = NULL;
	raw_super_buf = NULL;
	recovery = 0;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	default_options(sbi);
	/* parse mount options */
	options = kstrdup((const char *)data, GFP_KERNEL);
	if (data && !options) {
		err = -ENOMEM;
		goto free_sb_buf;
	}

	err = parse_options(sb, options);
	if (err)
		goto free_options;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	init_rwsem(&sbi->node_write);

	/* disallow all the data/node/meta page writes */
	set_sbi_flag(sbi, SBI_POR_DOING);
	spin_lock_init(&sbi->stat_lock);

	init_rwsem(&sbi->read_io.io_rwsem);
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
		init_rwsem(&sbi->write_io[i].io_rwsem);
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_options;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_extent_cache_info(sbi);

	init_ino_entry_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	f2fs_join_shrinker(sbi);

	/* if there are nt orphan nodes free them */
	err = recover_orphan_inodes(sbi);
	if (err)
		goto free_node_inode;

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		err = -EINVAL;
		goto free_node_inode;
	}

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_root_inode;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto free_proc;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		/*
		 * mount should be failed, when device has readonly mode, and
		 * previous checkpoint was not done by clean system shutdown.
		 */
		if (bdev_read_only(sb->s_bdev) &&
				!is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
			err = -EROFS;
			goto free_kobj;
		}

		if (need_fsck)
			set_sbi_flag(sbi, SBI_NEED_FSCK);

		err = recover_fsync_data(sbi);
		if (err) {
			need_fsck = true;
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
			goto free_kobj;
		}
	}
	/* recover_fsync_data() cleared this already */
	clear_sbi_flag(sbi, SBI_POR_DOING);

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
	kfree(options);

	/* recover broken superblock */
	if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
		f2fs_msg(sb, KERN_INFO, "Recover invalid superblock");
		f2fs_commit_super(sbi, true);
	}

	sbi->cp_expires = round_jiffies_up(jiffies);

	return 0;

free_kobj:
	kobject_del(&sbi->s_kobj);
free_proc:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	mutex_lock(&sbi->umount_mutex);
	f2fs_leave_shrinker(sbi);
	iput(sbi->node_inode);
	mutex_unlock(&sbi->umount_mutex);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_options:
	kfree(options);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);

	/* give only one another chance */
	if (retry) {
		retry = false;
		shrink_dcache_sb(sb);
		goto try_onemore;
	}
	return err;
}
Пример #9
0
/*
 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
 * @sdev: the device to be evaluated.
 *
 * Evaluate the Target Port Group State.
 * Returns SCSI_DH_DEV_OFFLINED if the path is
 * found to be unuseable.
 */
static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
{
	struct scsi_sense_hdr sense_hdr;
	int len, k, off, valid_states = 0;
	unsigned char *ucp;
	unsigned err;
	bool rtpg_ext_hdr_req = 1;
	unsigned long expiry, interval = 0;
	unsigned int tpg_desc_tbl_off;
	unsigned char orig_transition_tmo;

	if (!h->transition_tmo)
		expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ);
	else
		expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ);

 retry:
	err = submit_rtpg(sdev, h, rtpg_ext_hdr_req);

	if (err == SCSI_DH_IO && h->senselen > 0) {
		err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
					   &sense_hdr);
		if (!err)
			return SCSI_DH_IO;

		/*
		 * submit_rtpg() has failed on existing arrays
		 * when requesting extended header info, and
		 * the array doesn't support extended headers,
		 * even though it shouldn't according to T10.
		 * The retry without rtpg_ext_hdr_req set
		 * handles this.
		 */
		if (rtpg_ext_hdr_req == 1 &&
		    sense_hdr.sense_key == ILLEGAL_REQUEST &&
		    sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
			rtpg_ext_hdr_req = 0;
			goto retry;
		}

		err = alua_check_sense(sdev, &sense_hdr);
		if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry))
			goto retry;
		sdev_printk(KERN_INFO, sdev,
			    "%s: rtpg sense code %02x/%02x/%02x\n",
			    ALUA_DH_NAME, sense_hdr.sense_key,
			    sense_hdr.asc, sense_hdr.ascq);
		err = SCSI_DH_IO;
	}
	if (err != SCSI_DH_OK)
		return err;

	len = (h->buff[0] << 24) + (h->buff[1] << 16) +
		(h->buff[2] << 8) + h->buff[3] + 4;

	if (len > h->bufflen) {
		/* Resubmit with the correct length */
		if (realloc_buffer(h, len)) {
			sdev_printk(KERN_WARNING, sdev,
				    "%s: kmalloc buffer failed\n",__func__);
			/* Temporary failure, bypass */
			return SCSI_DH_DEV_TEMP_BUSY;
		}
		goto retry;
	}

	orig_transition_tmo = h->transition_tmo;
	if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0)
		h->transition_tmo = h->buff[5];
	else
		h->transition_tmo = ALUA_FAILOVER_TIMEOUT;

	if (orig_transition_tmo != h->transition_tmo) {
		sdev_printk(KERN_INFO, sdev,
			    "%s: transition timeout set to %d seconds\n",
			    ALUA_DH_NAME, h->transition_tmo);
		expiry = jiffies + h->transition_tmo * HZ;
	}

	if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
		tpg_desc_tbl_off = 8;
	else
		tpg_desc_tbl_off = 4;

	for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off;
	     k < len;
	     k += off, ucp += off) {

		if (h->group_id == (ucp[2] << 8) + ucp[3]) {
			h->state = ucp[0] & 0x0f;
			h->pref = ucp[0] >> 7;
			valid_states = ucp[1];
		}
		off = 8 + (ucp[7] * 4);
	}
Пример #10
0
static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
{
	DBG("%s", dev_name(gpu->dev));
	mod_timer(&gpu->hangcheck_timer,
		  round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
}