Beispiel #1
0
/**
 * zfcp_adapter_enqueue - enqueue a new adapter to the list
 * @ccw_device: pointer to the struct cc_device
 *
 * Returns:	struct zfcp_adapter*
 * Enqueues an adapter at the end of the adapter list in the driver data.
 * All adapter internal structures are set up.
 * Proc-fs entries are also created.
 */
struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
	struct zfcp_adapter *adapter;

	if (!get_device(&ccw_device->dev))
		return ERR_PTR(-ENODEV);

	adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
	if (!adapter) {
		put_device(&ccw_device->dev);
		return ERR_PTR(-ENOMEM);
	}

	kref_init(&adapter->ref);

	ccw_device->handler = NULL;
	adapter->ccw_device = ccw_device;

	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
	INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
	INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);

	if (zfcp_qdio_setup(adapter))
		goto failed;

	if (zfcp_allocate_low_mem_buffers(adapter))
		goto failed;

	adapter->req_list = zfcp_reqlist_alloc();
	if (!adapter->req_list)
		goto failed;

	if (zfcp_dbf_adapter_register(adapter))
		goto failed;

	if (zfcp_setup_adapter_work_queue(adapter))
		goto failed;

	if (zfcp_fc_gs_setup(adapter))
		goto failed;

	rwlock_init(&adapter->port_list_lock);
	INIT_LIST_HEAD(&adapter->port_list);

	INIT_LIST_HEAD(&adapter->events.list);
	INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
	spin_lock_init(&adapter->events.list_lock);

	init_waitqueue_head(&adapter->erp_ready_wq);
	init_waitqueue_head(&adapter->erp_done_wqh);

	INIT_LIST_HEAD(&adapter->erp_ready_head);
	INIT_LIST_HEAD(&adapter->erp_running_head);

	rwlock_init(&adapter->erp_lock);
	rwlock_init(&adapter->abort_lock);

	if (zfcp_erp_thread_setup(adapter))
		goto failed;

	adapter->service_level.seq_print = zfcp_print_sl;

	dev_set_drvdata(&ccw_device->dev, adapter);

	if (sysfs_create_group(&ccw_device->dev.kobj,
			       &zfcp_sysfs_adapter_attrs))
		goto failed;

	/* report size limit per scatter-gather segment */
	adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
	adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;

	adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;

	if (!zfcp_scsi_adapter_register(adapter))
		return adapter;

failed:
	zfcp_adapter_unregister(adapter);
	return ERR_PTR(-ENOMEM);
}
Beispiel #2
0
static inline void __gnix_ht_init_lk_list_head(gnix_ht_lk_lh_t *lh)
{
	dlist_init(&lh->head);
	rwlock_init(&lh->lh_lock);
}
Beispiel #3
0
/*
 * Build a server list from a VLDB record.
 */
struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
					      struct key *key,
					      struct afs_vldb_entry *vldb,
					      u8 type_mask)
{
	struct afs_server_list *slist;
	struct afs_server *server;
	int ret = -ENOMEM, nr_servers = 0, i, j;

	for (i = 0; i < vldb->nr_servers; i++)
		if (vldb->fs_mask[i] & type_mask)
			nr_servers++;

	slist = kzalloc(sizeof(struct afs_server_list) +
			sizeof(struct afs_server_entry) * nr_servers,
			GFP_KERNEL);
	if (!slist)
		goto error;

	refcount_set(&slist->usage, 1);
	rwlock_init(&slist->lock);

	/* Make sure a records exists for each server in the list. */
	for (i = 0; i < vldb->nr_servers; i++) {
		if (!(vldb->fs_mask[i] & type_mask))
			continue;

		server = afs_lookup_server(cell, key, &vldb->fs_server[i]);
		if (IS_ERR(server)) {
			ret = PTR_ERR(server);
			if (ret == -ENOENT ||
			    ret == -ENOMEDIUM)
				continue;
			goto error_2;
		}

		/* Insertion-sort by UUID */
		for (j = 0; j < slist->nr_servers; j++)
			if (memcmp(&slist->servers[j].server->uuid,
				   &server->uuid,
				   sizeof(server->uuid)) >= 0)
				break;
		if (j < slist->nr_servers) {
			if (slist->servers[j].server == server) {
				afs_put_server(cell->net, server);
				continue;
			}

			memmove(slist->servers + j + 1,
				slist->servers + j,
				(slist->nr_servers - j) * sizeof(struct afs_server_entry));
		}

		slist->servers[j].server = server;
		slist->nr_servers++;
	}

	if (slist->nr_servers == 0) {
		ret = -EDESTADDRREQ;
		goto error_2;
	}

	return slist;

error_2:
	afs_put_serverlist(cell->net, slist);
error:
	return ERR_PTR(ret);
}
Beispiel #4
0
/*
 * Main function of picl daemon
 */
int
main(int argc, char **argv)
{
	struct	sigaction	act;
	int			c;
	sigset_t		ublk;


	(void) setlocale(LC_ALL, "");
	(void) textdomain(TEXT_DOMAIN);

	if (getuid() != 0) {
		syslog(LOG_CRIT, MUST_BE_ROOT);
		return (0);
	}

	(void) rwlock_init(&init_lk, USYNC_THREAD, NULL);
	doreinit = 0;
	logflag = 1;
	dos_req_limit = DOS_PICL_REQUESTS_LIMIT;
	sliding_interval_ms = SLIDING_INTERVAL_MILLISECONDS;
	dos_ms = DOS_SLEEPTIME_MS;
	verbose_level = 0;

	/*
	 * parse arguments
	 */
	while ((c = getopt(argc, argv, "is:t:l:r:v:d:")) != EOF) {
		switch (c) {
		case 'd':
			dos_ms = strtol(optarg, (char **)NULL, 0);
			break;
		case 'i':
			logflag = 0;
			break;
		case 's':
			sliding_interval_ms = strtoll(optarg, (char **)NULL, 0);
			break;
		case 't':
			dos_req_limit = strtol(optarg, (char **)NULL, 0);
			break;
		case 'v':
			verbose_level = strtol(optarg, (char **)NULL, 0);
			logflag = 0;
			break;
		default:
			break;
		}
	}

	orig_time = gethrtime();

	/*
	 * is there a daemon already running?
	 */

	if (daemon_exists()) {
		syslog(LOG_CRIT, DAEMON_RUNNING);
		exit(1);
	}

	/*
	 * Mask off/block SIGALRM signal so that the environmental plug-in
	 * (piclenvd) can use it to simulate sleep() without being affected
	 * by time being set back. No other PICL plug-in should use SIGALRM
	 * or alarm() for now.
	 */
	(void) sigemptyset(&ublk);
	(void) sigaddset(&ublk, SIGALRM);
	(void) sigprocmask(SIG_BLOCK, &ublk, NULL);

	/*
	 * Ignore SIGHUP until all the initialization is done.
	 */
	act.sa_handler = SIG_IGN;
	(void) sigemptyset(&act.sa_mask);
	act.sa_flags = 0;
	if (sigaction(SIGHUP, &act, NULL) == -1)
		syslog(LOG_ERR, SIGACT_FAILED, strsignal(SIGHUP),
		    strerror(errno));

	if (logflag != 0) {	/* daemonize */
		pid_t pid;

		pid = fork();
		if (pid < 0)
			exit(1);
		if (pid > 0)
			/* parent */
			exit(0);

		/* child */
		if (chdir("/") == -1) {
			syslog(LOG_CRIT, CD_ROOT_FAILED);
			exit(1);
		}

		(void) setsid();
		closefrom(0);
		(void) open("/dev/null", O_RDWR, 0);
		(void) dup2(STDIN_FILENO, STDOUT_FILENO);
		(void) dup2(STDIN_FILENO, STDERR_FILENO);
		openlog(PICLD, LOG_PID, LOG_DAEMON);
	}

	/*
	 * Initialize the PICL Tree
	 */
	if (xptree_initialize(NULL) != PICL_SUCCESS) {
		syslog(LOG_CRIT, INIT_FAILED);
		exit(1);
	}

	if (setup_door()) {
		syslog(LOG_CRIT, DOOR_FAILED);
		exit(1);
	}

	/*
	 * setup signal handlers for post-init
	 */
	act.sa_sigaction = hup_handler;
	(void) sigemptyset(&act.sa_mask);
	act.sa_flags = SA_SIGINFO;
	if (sigaction(SIGHUP, &act, NULL) == -1)
		syslog(LOG_ERR, SIGACT_FAILED, strsignal(SIGHUP),
		    strerror(errno));

	/*
	 * wait for requests
	 */
	for (;;) {
		(void) pause();
		if (doreinit) {
			/*
			 * Block SIGHUP during reinitialization.
			 * Also mask off/block SIGALRM signal so that the
			 * environmental plug-in (piclenvd) can use it to
			 * simulate sleep() without being affected by time
			 * being set back. No ohter PICL plug-in should use
			 * SIGALRM or alarm() for now.
			 */
			(void) sigemptyset(&ublk);
			(void) sigaddset(&ublk, SIGHUP);
			(void) sigaddset(&ublk, SIGALRM);
			(void) sigprocmask(SIG_BLOCK, &ublk, NULL);
			(void) sigdelset(&ublk, SIGALRM);
			doreinit = 0;
			(void) rw_wrlock(&init_lk);
			xptree_destroy();
			(void) xptree_reinitialize();
			(void) rw_unlock(&init_lk);
			(void) sigprocmask(SIG_UNBLOCK, &ublk, NULL);
		}
	}
}
Beispiel #5
0
/*
 * Set up a cell record and fill in its name, VL server address list and
 * allocate an anonymous key
 */
static struct afs_cell *afs_alloc_cell(struct afs_net *net,
				       const char *name, unsigned int namelen,
				       const char *addresses)
{
	struct afs_cell *cell;
	int i, ret;

	ASSERT(name);
	if (namelen == 0)
		return ERR_PTR(-EINVAL);
	if (namelen > AFS_MAXCELLNAME) {
		_leave(" = -ENAMETOOLONG");
		return ERR_PTR(-ENAMETOOLONG);
	}
	if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
		return ERR_PTR(-EINVAL);

	_enter("%*.*s,%s", namelen, namelen, name, addresses);

	cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
	if (!cell) {
		_leave(" = -ENOMEM");
		return ERR_PTR(-ENOMEM);
	}

	cell->net = net;
	cell->name_len = namelen;
	for (i = 0; i < namelen; i++)
		cell->name[i] = tolower(name[i]);

	atomic_set(&cell->usage, 2);
	INIT_WORK(&cell->manager, afs_manage_cell);
	cell->flags = ((1 << AFS_CELL_FL_NOT_READY) |
		       (1 << AFS_CELL_FL_NO_LOOKUP_YET));
	INIT_LIST_HEAD(&cell->proc_volumes);
	rwlock_init(&cell->proc_lock);
	rwlock_init(&cell->vl_servers_lock);

	/* Fill in the VL server list if we were given a list of addresses to
	 * use.
	 */
	if (addresses) {
		struct afs_vlserver_list *vllist;

		vllist = afs_parse_text_addrs(net,
					      addresses, strlen(addresses), ':',
					      VL_SERVICE, AFS_VL_PORT);
		if (IS_ERR(vllist)) {
			ret = PTR_ERR(vllist);
			goto parse_failed;
		}

		rcu_assign_pointer(cell->vl_servers, vllist);
		cell->dns_expiry = TIME64_MAX;
	} else {
		cell->dns_expiry = ktime_get_real_seconds();
	}

	_leave(" = %p", cell);
	return cell;

parse_failed:
	if (ret == -EINVAL)
		printk(KERN_ERR "kAFS: bad VL server IP address\n");
	kfree(cell);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
}
Beispiel #6
0
static int __init zfcp_module_init(void)
{
	int retval = -ENOMEM;

	zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
					sizeof(struct ct_iu_gpn_ft_req));
	if (!zfcp_data.gpn_ft_cache)
		goto out;

	zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
					sizeof(struct fsf_qtcb));
	if (!zfcp_data.qtcb_cache)
		goto out_qtcb_cache;

	zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
					sizeof(struct fsf_status_read_buffer));
	if (!zfcp_data.sr_buffer_cache)
		goto out_sr_cache;

	zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
					sizeof(struct zfcp_gid_pn_data));
	if (!zfcp_data.gid_pn_cache)
		goto out_gid_cache;

	mutex_init(&zfcp_data.config_mutex);
	rwlock_init(&zfcp_data.config_lock);

	zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
					sizeof(struct zfcp_els_adisc));
	if (!zfcp_data.adisc_cache)
		goto out_adisc_cache;

	zfcp_data.scsi_transport_template =
		fc_attach_transport(&zfcp_transport_functions);
	if (!zfcp_data.scsi_transport_template)
		goto out_transport;

	retval = misc_register(&zfcp_cfdc_misc);
	if (retval) {
		pr_err("Registering the misc device zfcp_cfdc failed\n");
		goto out_misc;
	}

	retval = zfcp_ccw_register();
	if (retval) {
		pr_err("The zfcp device driver could not register with "
		       "the common I/O layer\n");
		goto out_ccw_register;
	}

	if (init_device)
		zfcp_init_device_setup(init_device);
	return 0;

out_ccw_register:
	misc_deregister(&zfcp_cfdc_misc);
out_misc:
	fc_release_transport(zfcp_data.scsi_transport_template);
out_transport:
	kmem_cache_destroy(zfcp_data.adisc_cache);
out_adisc_cache:
	kmem_cache_destroy(zfcp_data.gid_pn_cache);
out_gid_cache:
	kmem_cache_destroy(zfcp_data.sr_buffer_cache);
out_sr_cache:
	kmem_cache_destroy(zfcp_data.qtcb_cache);
out_qtcb_cache:
	kmem_cache_destroy(zfcp_data.gpn_ft_cache);
out:
	return retval;
}
Beispiel #7
0
int tgt_init(const struct lu_env *env, struct lu_target *lut,
	     struct obd_device *obd, struct dt_device *dt,
	     struct tgt_opc_slice *slice, int request_fail_id,
	     int reply_fail_id)
{
	struct dt_object_format	 dof;
	struct lu_attr		 attr;
	struct lu_fid		 fid;
	struct dt_object	*o;
	int			 rc = 0;

	ENTRY;

	LASSERT(lut);
	LASSERT(obd);
	lut->lut_obd = obd;
	lut->lut_bottom = dt;
	lut->lut_last_rcvd = NULL;
	lut->lut_client_bitmap = NULL;
	obd->u.obt.obt_lut = lut;
	obd->u.obt.obt_magic = OBT_MAGIC;

	/* set request handler slice and parameters */
	lut->lut_slice = slice;
	lut->lut_reply_fail_id = reply_fail_id;
	lut->lut_request_fail_id = request_fail_id;

	/* sptlrcp variables init */
	rwlock_init(&lut->lut_sptlrpc_lock);
	sptlrpc_rule_set_init(&lut->lut_sptlrpc_rset);
	lut->lut_mds_capa = 1;
	lut->lut_oss_capa = 1;

	spin_lock_init(&lut->lut_flags_lock);
	lut->lut_sync_lock_cancel = NEVER_SYNC_ON_CANCEL;

	/* last_rcvd initialization is needed by replayable targets only */
	if (!obd->obd_replayable)
		RETURN(0);

	spin_lock_init(&lut->lut_translock);

	OBD_ALLOC(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
	if (lut->lut_client_bitmap == NULL)
		RETURN(-ENOMEM);

	memset(&attr, 0, sizeof(attr));
	attr.la_valid = LA_MODE;
	attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
	dof.dof_type = dt_mode_to_dft(S_IFREG);

	lu_local_obj_fid(&fid, LAST_RECV_OID);

	o = dt_find_or_create(env, lut->lut_bottom, &fid, &dof, &attr);
	if (IS_ERR(o)) {
		rc = PTR_ERR(o);
		CERROR("%s: cannot open LAST_RCVD: rc = %d\n", tgt_name(lut),
		       rc);
		GOTO(out_bitmap, rc);
	}

	lut->lut_last_rcvd = o;
	rc = tgt_server_data_init(env, lut);
	if (rc < 0)
		GOTO(out_obj, rc);

	/* prepare transactions callbacks */
	lut->lut_txn_cb.dtc_txn_start = tgt_txn_start_cb;
	lut->lut_txn_cb.dtc_txn_stop = tgt_txn_stop_cb;
	lut->lut_txn_cb.dtc_txn_commit = NULL;
	lut->lut_txn_cb.dtc_cookie = lut;
	lut->lut_txn_cb.dtc_tag = LCT_DT_THREAD | LCT_MD_THREAD;
	CFS_INIT_LIST_HEAD(&lut->lut_txn_cb.dtc_linkage);

	dt_txn_callback_add(lut->lut_bottom, &lut->lut_txn_cb);

	RETURN(0);
out_obj:
	lu_object_put(env, &lut->lut_last_rcvd->do_lu);
	lut->lut_last_rcvd = NULL;
out_bitmap:
	OBD_FREE(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
	lut->lut_client_bitmap = NULL;
	return rc;
}
Beispiel #8
0
/*
 * Initialise an AFS network namespace record.
 */
static int __net_init afs_net_init(struct afs_net *net)
{
	struct afs_sysnames *sysnames;
	int ret;

	net->live = true;
	generate_random_uuid((unsigned char *)&net->uuid);

	INIT_WORK(&net->charge_preallocation_work, afs_charge_preallocation);
	mutex_init(&net->socket_mutex);

	net->cells = RB_ROOT;
	seqlock_init(&net->cells_lock);
	INIT_WORK(&net->cells_manager, afs_manage_cells);
	timer_setup(&net->cells_timer, afs_cells_timer, 0);

	spin_lock_init(&net->proc_cells_lock);
	INIT_LIST_HEAD(&net->proc_cells);

	seqlock_init(&net->fs_lock);
	net->fs_servers = RB_ROOT;
	INIT_LIST_HEAD(&net->fs_updates);
	INIT_HLIST_HEAD(&net->fs_proc);

	INIT_HLIST_HEAD(&net->fs_addresses4);
	INIT_HLIST_HEAD(&net->fs_addresses6);
	seqlock_init(&net->fs_addr_lock);

	INIT_WORK(&net->fs_manager, afs_manage_servers);
	timer_setup(&net->fs_timer, afs_servers_timer, 0);

	ret = -ENOMEM;
	sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL);
	if (!sysnames)
		goto error_sysnames;
	sysnames->subs[0] = (char *)&afs_init_sysname;
	sysnames->nr = 1;
	refcount_set(&sysnames->usage, 1);
	net->sysnames = sysnames;
	rwlock_init(&net->sysnames_lock);

	/* Register the /proc stuff */
	ret = afs_proc_init(net);
	if (ret < 0)
		goto error_proc;

	/* Initialise the cell DB */
	ret = afs_cell_init(net, rootcell);
	if (ret < 0)
		goto error_cell_init;

	/* Create the RxRPC transport */
	ret = afs_open_socket(net);
	if (ret < 0)
		goto error_open_socket;

	return 0;

error_open_socket:
	net->live = false;
	afs_cell_purge(net);
	afs_purge_servers(net);
error_cell_init:
	net->live = false;
	afs_proc_cleanup(net);
error_proc:
	afs_put_sysnames(net->sysnames);
error_sysnames:
	net->live = false;
	return ret;
}
Beispiel #9
0
/* Find an unused file structure and return a pointer to it.
 * Returns an error pointer if some error happend e.g. we over file
 * structures limit, run out of memory or operation is not permitted.
 *
 * Be very careful using this.  You are responsible for
 * getting write access to any mount that you might assign
 * to this filp, if it is opened for write.  If this is not
 * done, you will imbalance int the mount's writer count
 * and a warning at __fput() time.
 */
struct file *get_empty_filp(void)
{
	const struct cred *cred = current_cred();
	static long old_max;
	struct file *f;
	int error;

	/*
	 * Privileged users can go above max_files
	 */
	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
		/*
		 * percpu_counters are inaccurate.  Do an expensive check before
		 * we go and fail.
		 */
		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
			goto over;
	}

	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
	if (unlikely(!f))
		return ERR_PTR(-ENOMEM);

	percpu_counter_inc(&nr_files);
	f->f_cred = get_cred(cred);
	error = security_file_alloc(f);
	if (unlikely(error)) {
		file_free(f);
		return ERR_PTR(error);
	}

	INIT_LIST_HEAD(&f->f_u.fu_list);
	atomic_long_set(&f->f_count, 1);
	rwlock_init(&f->f_owner.lock);
	spin_lock_init(&f->f_lock);
	eventpoll_init_file(f);
	/* f->f_version: 0 */
	return f;

over:
	/* Ran out of filps - report that */
	if (get_nr_files() > old_max) {
#ifdef FILE_OVER_MAX
        static int fd_dump_all_files = 0;     
        if(!fd_dump_all_files) { 
	        struct task_struct *p;
	        xlog_printk(ANDROID_LOG_INFO, FS_TAG, "(PID:%d)files %d over old_max:%d", current->pid, get_nr_files(), old_max);
	        for_each_process(p) {
	            pid_t pid = p->pid;
	            struct files_struct *files = p->files;
	            struct fdtable *fdt = files_fdtable(files);
	            if(files && fdt) {
	                fd_show_open_files(pid, files, fdt);
	            }
	        }
	        fd_dump_all_files = 0x1;
        }
#endif
		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
		old_max = get_nr_files();
	}
	return ERR_PTR(-ENFILE);
}
Beispiel #10
0
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
	struct vmw_private *dev_priv;
	int ret;
	uint32_t svga_id;
	enum vmw_res_type i;
	bool refuse_dma = false;

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (unlikely(dev_priv == NULL)) {
		DRM_ERROR("Failed allocating a device private struct.\n");
		return -ENOMEM;
	}

	pci_set_master(dev->pdev);

	dev_priv->dev = dev;
	dev_priv->vmw_chipset = chipset;
	dev_priv->last_read_seqno = (uint32_t) -100;
	mutex_init(&dev_priv->cmdbuf_mutex);
	mutex_init(&dev_priv->release_mutex);
	mutex_init(&dev_priv->binding_mutex);
	rwlock_init(&dev_priv->resource_lock);
	ttm_lock_init(&dev_priv->reservation_sem);
	spin_lock_init(&dev_priv->hw_lock);
	spin_lock_init(&dev_priv->waiter_lock);
	spin_lock_init(&dev_priv->cap_lock);
	spin_lock_init(&dev_priv->svga_lock);

	for (i = vmw_res_context; i < vmw_res_max; ++i) {
		idr_init(&dev_priv->res_idr[i]);
		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
	}

	mutex_init(&dev_priv->init_mutex);
	init_waitqueue_head(&dev_priv->fence_queue);
	init_waitqueue_head(&dev_priv->fifo_queue);
	dev_priv->fence_queue_waiters = 0;
	dev_priv->fifo_queue_waiters = 0;

	dev_priv->used_memory_size = 0;

	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

	dev_priv->enable_fb = enable_fbdev;

	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
	if (svga_id != SVGA_ID_2) {
		ret = -ENOSYS;
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
		goto out_err0;
	}

	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
	ret = vmw_dma_select_mode(dev_priv);
	if (unlikely(ret != 0)) {
		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
		refuse_dma = true;
	}

	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

	vmw_get_initial_size(dev_priv);

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		dev_priv->max_gmr_ids =
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
		dev_priv->max_gmr_pages =
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
		dev_priv->memory_size =
			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
		dev_priv->memory_size -= dev_priv->vram_size;
	} else {
		/*
		 * An arbitrary limit of 512MiB on surface
		 * memory. But all HWV8 hardware supports GMR2.
		 */
		dev_priv->memory_size = 512*1024*1024;
	}
	dev_priv->max_mob_pages = 0;
	dev_priv->max_mob_size = 0;
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		uint64_t mem_size =
			vmw_read(dev_priv,
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
		dev_priv->prim_bb_mem =
			vmw_read(dev_priv,
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
		dev_priv->max_mob_size =
			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
		dev_priv->stdu_max_width =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
		dev_priv->stdu_max_height =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
		dev_priv->texture_max_width = vmw_read(dev_priv,
						       SVGA_REG_DEV_CAP);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
		dev_priv->texture_max_height = vmw_read(dev_priv,
							SVGA_REG_DEV_CAP);
	} else {
		dev_priv->texture_max_width = 8192;
		dev_priv->texture_max_height = 8192;
		dev_priv->prim_bb_mem = dev_priv->vram_size;
	}

	vmw_print_capabilities(dev_priv->capabilities);

	ret = vmw_dma_masks(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		DRM_INFO("Max GMR ids is %u\n",
			 (unsigned)dev_priv->max_gmr_ids);
		DRM_INFO("Max number of GMR pages is %u\n",
			 (unsigned)dev_priv->max_gmr_pages);
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
			 (unsigned)dev_priv->memory_size / 1024);
	}
	DRM_INFO("Maximum display memory size is %u kiB\n",
		 dev_priv->prim_bb_mem / 1024);
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);

	ret = vmw_ttm_global_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;


	vmw_master_init(&dev_priv->fbdev_master);
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	dev_priv->active_master = &dev_priv->fbdev_master;

	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
				       dev_priv->mmio_size, MEMREMAP_WB);

	if (unlikely(dev_priv->mmio_virt == NULL)) {
		ret = -ENOMEM;
		DRM_ERROR("Failed mapping MMIO.\n");
		goto out_err3;
	}

	/* Need mmio memory to check for fifo pitchlock cap. */
	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
	    !vmw_fifo_have_pitchlock(dev_priv)) {
		ret = -ENOSYS;
		DRM_ERROR("Hardware has no pitchlock\n");
		goto out_err4;
	}

	dev_priv->tdev = ttm_object_device_init
		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);

	if (unlikely(dev_priv->tdev == NULL)) {
		DRM_ERROR("Unable to initialize TTM object management.\n");
		ret = -ENOMEM;
		goto out_err4;
	}

	dev->dev_private = dev_priv;

	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
	dev_priv->stealth = (ret != 0);
	if (dev_priv->stealth) {
		/**
		 * Request at least the mmio PCI resource.
		 */

		DRM_INFO("It appears like vesafb is loaded. "
			 "Ignore above error if any.\n");
		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
		if (unlikely(ret != 0)) {
			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
			goto out_no_device;
		}
	}

	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
		ret = drm_irq_install(dev, dev->pdev->irq);
		if (ret != 0) {
			DRM_ERROR("Failed installing irq: %d\n", ret);
			goto out_no_irq;
		}
	}

	dev_priv->fman = vmw_fence_manager_init(dev_priv);
	if (unlikely(dev_priv->fman == NULL)) {
		ret = -ENOMEM;
		goto out_no_fman;
	}

	ret = ttm_bo_device_init(&dev_priv->bdev,
				 dev_priv->bo_global_ref.ref.object,
				 &vmw_bo_driver,
				 dev->anon_inode->i_mapping,
				 VMWGFX_FILE_PAGE_OFFSET,
				 false);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
		goto out_no_bdev;
	}

	/*
	 * Enable VRAM, but initially don't use it until SVGA is enabled and
	 * unhidden.
	 */
	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
			     (dev_priv->vram_size >> PAGE_SHIFT));
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
		goto out_no_vram;
	}
	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

	dev_priv->has_gmr = true;
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
					 VMW_PL_GMR) != 0) {
		DRM_INFO("No GMR memory available. "
			 "Graphics memory resources are very limited.\n");
		dev_priv->has_gmr = false;
	}

	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		dev_priv->has_mob = true;
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
				   VMW_PL_MOB) != 0) {
			DRM_INFO("No MOB memory available. "
				 "3D will be disabled.\n");
			dev_priv->has_mob = false;
		}
	}

	if (dev_priv->has_mob) {
		spin_lock(&dev_priv->cap_lock);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		spin_unlock(&dev_priv->cap_lock);
	}


	ret = vmw_kms_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_no_kms;
	vmw_overlay_init(dev_priv);

	ret = vmw_request_device(dev_priv);
	if (ret)
		goto out_no_fifo;

	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");

	if (dev_priv->enable_fb) {
		vmw_fifo_resource_inc(dev_priv);
		vmw_svga_enable(dev_priv);
		vmw_fb_init(dev_priv);
	}

	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
	register_pm_notifier(&dev_priv->pm_nb);

	return 0;

out_no_fifo:
	vmw_overlay_close(dev_priv);
	vmw_kms_close(dev_priv);
out_no_kms:
	if (dev_priv->has_mob)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
	if (dev_priv->has_gmr)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
	(void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
	vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
		drm_irq_uninstall(dev_priv->dev);
out_no_irq:
	if (dev_priv->stealth)
		pci_release_region(dev->pdev, 2);
	else
		pci_release_regions(dev->pdev);
out_no_device:
	ttm_object_device_release(&dev_priv->tdev);
out_err4:
	memunmap(dev_priv->mmio_virt);
out_err3:
	vmw_ttm_global_release(dev_priv);
out_err0:
	for (i = vmw_res_context; i < vmw_res_max; ++i)
		idr_destroy(&dev_priv->res_idr[i]);

	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
	kfree(dev_priv);
	return ret;
}
Beispiel #11
0
/*ARGSUSED*/
void
rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
{
	(void) rwlock_init(&rwlp->rw_lock, USYNC_THREAD, NULL);
	rwlp->rw_owner = _KTHREAD_INVALID;
}
Beispiel #12
0
/**
 * zfcp_adapter_enqueue - enqueue a new adapter to the list
 * @ccw_device: pointer to the struct cc_device
 *
 * Returns:	0             if a new adapter was successfully enqueued
 *		-ENOMEM       if alloc failed
 * Enqueues an adapter at the end of the adapter list in the driver data.
 * All adapter internal structures are set up.
 * Proc-fs entries are also created.
 * locks:	config_sema must be held to serialise changes to the adapter list
 */
int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
	struct zfcp_adapter *adapter;

	/*
	 * Note: It is safe to release the list_lock, as any list changes
	 * are protected by the config_sema, which must be held to get here
	 */

	adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
	if (!adapter)
		return -ENOMEM;

	ccw_device->handler = NULL;
	adapter->ccw_device = ccw_device;
	atomic_set(&adapter->refcount, 0);

	if (zfcp_qdio_allocate(adapter))
		goto qdio_allocate_failed;

	if (zfcp_allocate_low_mem_buffers(adapter))
		goto failed_low_mem_buffers;

	if (zfcp_reqlist_alloc(adapter))
		goto failed_low_mem_buffers;

	if (zfcp_adapter_debug_register(adapter))
		goto debug_register_failed;

	init_waitqueue_head(&adapter->remove_wq);
	init_waitqueue_head(&adapter->erp_thread_wqh);
	init_waitqueue_head(&adapter->erp_done_wqh);

	INIT_LIST_HEAD(&adapter->port_list_head);
	INIT_LIST_HEAD(&adapter->erp_ready_head);
	INIT_LIST_HEAD(&adapter->erp_running_head);

	spin_lock_init(&adapter->req_list_lock);

	spin_lock_init(&adapter->hba_dbf_lock);
	spin_lock_init(&adapter->san_dbf_lock);
	spin_lock_init(&adapter->scsi_dbf_lock);
	spin_lock_init(&adapter->rec_dbf_lock);
	spin_lock_init(&adapter->req_q_lock);

	rwlock_init(&adapter->erp_lock);
	rwlock_init(&adapter->abort_lock);

	sema_init(&adapter->erp_ready_sem, 0);

	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
	INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);

	/* mark adapter unusable as long as sysfs registration is not complete */
	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);

	dev_set_drvdata(&ccw_device->dev, adapter);

	if (sysfs_create_group(&ccw_device->dev.kobj,
			       &zfcp_sysfs_adapter_attrs))
		goto sysfs_failed;

	write_lock_irq(&zfcp_data.config_lock);
	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
	list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
	write_unlock_irq(&zfcp_data.config_lock);

	zfcp_fc_nameserver_init(adapter);

	return 0;

sysfs_failed:
	zfcp_adapter_debug_unregister(adapter);
debug_register_failed:
	dev_set_drvdata(&ccw_device->dev, NULL);
	kfree(adapter->req_list);
failed_low_mem_buffers:
	zfcp_free_low_mem_buffers(adapter);
qdio_allocate_failed:
	zfcp_qdio_free(adapter);
	kfree(adapter);
	return -ENOMEM;
}
Beispiel #13
0
static int __init zfcp_module_init(void)
{
	int retval = -ENOMEM;

	zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create(
			sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf");
	if (!zfcp_data.fsf_req_qtcb_cache)
		goto out;

	zfcp_data.sr_buffer_cache = zfcp_cache_create(
			sizeof(struct fsf_status_read_buffer), "zfcp_sr");
	if (!zfcp_data.sr_buffer_cache)
		goto out_sr_cache;

	zfcp_data.gid_pn_cache = zfcp_cache_create(
			sizeof(struct zfcp_gid_pn_data), "zfcp_gid");
	if (!zfcp_data.gid_pn_cache)
		goto out_gid_cache;

	zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");

	INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
	sema_init(&zfcp_data.config_sema, 1);
	rwlock_init(&zfcp_data.config_lock);

	zfcp_data.scsi_transport_template =
		fc_attach_transport(&zfcp_transport_functions);
	if (!zfcp_data.scsi_transport_template)
		goto out_transport;

	retval = misc_register(&zfcp_cfdc_misc);
	if (retval) {
		pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n");
		goto out_misc;
	}

	retval = zfcp_ccw_register();
	if (retval) {
		pr_err("zfcp: The zfcp device driver could not register with "
		       "the common I/O layer\n");
		goto out_ccw_register;
	}

	if (zfcp_device_setup(device))
		zfcp_init_device_configure();

	goto out;

out_ccw_register:
	misc_deregister(&zfcp_cfdc_misc);
out_misc:
	fc_release_transport(zfcp_data.scsi_transport_template);
out_transport:
	kmem_cache_destroy(zfcp_data.gid_pn_cache);
out_gid_cache:
	kmem_cache_destroy(zfcp_data.sr_buffer_cache);
out_sr_cache:
	kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
out:
	return retval;
}
Beispiel #14
0
static int lab4fs_fill_super(struct super_block * sb, void * data, int silent)
{
    struct buffer_head * bh;
    int blocksize = BLOCK_SIZE;
    unsigned long logic_sb_block;
    unsigned offset = 0;
    unsigned long sb_block = 1;
    struct lab4fs_super_block *es;
    struct lab4fs_sb_info *sbi;
    struct inode *root;
    int hblock;
    int err = 0;

    sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
    if (!sbi)
        return -ENOMEM;

    sb->s_fs_info = sbi;
    memset(sbi, 0, sizeof(*sbi));
    blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
    if (!blocksize) {
        LAB4ERROR("unable to set blocksize\n");
        err = -EIO;
        goto out_fail;
    }

    /*
     * If the superblock doesn't start on a hardware sector boundary,
     * calculate the offset.  
     */
    if (blocksize != BLOCK_SIZE) {
        logic_sb_block = (sb_block * BLOCK_SIZE) / blocksize;
        offset = (sb_block * BLOCK_SIZE) % blocksize;
    } else {
        logic_sb_block = sb_block;
    }

    if (!(bh = sb_bread(sb, logic_sb_block))) {
        LAB4ERROR("unable to read super block\n");
        goto out_fail;
    }

    es = (struct lab4fs_super_block *) (((char *)bh->b_data) + offset);
    sb->s_magic = le32_to_cpu(es->s_magic);
    if (sb->s_magic != LAB4FS_SUPER_MAGIC) {
        if (!silent)
            LAB4ERROR("VFS: Can't find lab4fs filesystem on dev %s.\n",
                    sb->s_id);
        goto failed_mount;
    }

    sbi->s_sb = es;
    blocksize = le32_to_cpu(es->s_block_size);
    hblock = bdev_hardsect_size(sb->s_bdev);
    if (sb->s_blocksize != blocksize) {
        /*
         * Make sure the blocksize for the filesystem is larger
         * than the hardware sectorsize for the machine.
         */
        if (blocksize < hblock) {
            LAB4ERROR("blocksize %d too small for "
                    "device blocksize %d.\n", blocksize, hblock);
            goto failed_mount;
        }

        brelse (bh);
        sb_set_blocksize(sb, blocksize);
        logic_sb_block = (sb_block * BLOCK_SIZE) / blocksize;
        offset = (sb_block * BLOCK_SIZE) % blocksize;
        bh = sb_bread(sb, logic_sb_block);
        if (!bh) {
            LAB4ERROR("Can't read superblock on 2nd try.\n");
            goto failed_mount;
        }
        es = (struct lab4fs_super_block *)(((char *)bh->b_data) + offset);
        sbi->s_sb = es;
        if (es->s_magic != cpu_to_le32(LAB4FS_SUPER_MAGIC)) {
            LAB4ERROR("Magic mismatch, very weird !\n");
            goto failed_mount;
        }
    }
    sb->s_maxbytes = lab4fs_max_size(es);
    sbi->s_sbh = bh;
    sbi->s_log_block_size = log2(sb->s_blocksize);
    sbi->s_first_ino = le32_to_cpu(es->s_first_inode);
    sbi->s_inode_size = le32_to_cpu(es->s_inode_size);
    sbi->s_log_inode_size = log2(sbi->s_inode_size);
    sbi->s_inode_table = le32_to_cpu(es->s_inode_table);
    sbi->s_data_blocks = le32_to_cpu(es->s_data_blocks);
    sbi->s_next_generation = 0;
    sbi->s_free_inodes_count = le32_to_cpu(es->s_free_inodes_count);
    sbi->s_free_data_blocks_count = le32_to_cpu(es->s_free_data_blocks_count);
    sbi->s_inodes_count = le32_to_cpu(es->s_inodes_count);
    sbi->s_blocks_count = le32_to_cpu(es->s_blocks_count);

    sbi->s_inode_bitmap.nr_valid_bits = le32_to_cpu(es->s_inodes_count);
    sbi->s_data_bitmap.nr_valid_bits = le32_to_cpu(es->s_blocks_count) 
        - le32_to_cpu(es->s_data_blocks);

    rwlock_init(&sbi->rwlock);
    sb->s_op = &lab4fs_super_ops;

    err = bitmap_setup(&sbi->s_inode_bitmap, sb, le32_to_cpu(es->s_inode_bitmap));
    if (err)
        goto out_fail;
    err = bitmap_setup(&sbi->s_data_bitmap, sb, le32_to_cpu(es->s_data_bitmap));
    if (err)
        goto out_fail;

    sbi->s_root_inode = le32_to_cpu(es->s_root_inode);
    root = iget(sb, sbi->s_root_inode);
    LAB4DEBUG("I can get the root inode\n");
    print_inode(root);
    LAB4DEBUG("END\n");
    sb->s_root = d_alloc_root(root);
    if (!sb->s_root) {
        iput(root);
        kfree(sbi);
        return -ENOMEM;
    }
    return 0;

failed_mount:
out_fail:
	kfree(sbi);
	return err;
}
Beispiel #15
0
static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
	const struct firmware *firmware;
	struct usb_device *udev = interface_to_usbdev(intf);
	struct usb_host_endpoint *bulk_out_ep;
	struct usb_host_endpoint *bulk_in_ep;
	struct hci_dev *hdev;
	struct bfusb_data *data;

	BT_DBG("intf %p id %p", intf, id);

	if (ignore)
		return -ENODEV;

	/* Check number of endpoints */
	if (intf->cur_altsetting->desc.bNumEndpoints < 2)
		return -EIO;

	bulk_out_ep = &intf->cur_altsetting->endpoint[0];
	bulk_in_ep  = &intf->cur_altsetting->endpoint[1];

	if (!bulk_out_ep || !bulk_in_ep) {
		BT_ERR("Bulk endpoints not found");
		goto done;
	}

	/* Initialize control structure and load firmware */
	data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL);
	if (!data) {
		BT_ERR("Can't allocate memory for control structure");
		goto done;
	}

	data->udev = udev;
	data->bulk_in_ep    = bulk_in_ep->desc.bEndpointAddress;
	data->bulk_out_ep   = bulk_out_ep->desc.bEndpointAddress;
	data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize);

	rwlock_init(&data->lock);

	data->reassembly = NULL;

	skb_queue_head_init(&data->transmit_q);
	skb_queue_head_init(&data->pending_q);
	skb_queue_head_init(&data->completed_q);

	if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) {
		BT_ERR("Firmware request failed");
		goto error;
	}

	BT_DBG("firmware data %p size %d", firmware->data, firmware->size);

	if (bfusb_load_firmware(data, firmware->data, firmware->size) < 0) {
		BT_ERR("Firmware loading failed");
		goto release;
	}

	release_firmware(firmware);

	/* Initialize and register HCI device */
	hdev = hci_alloc_dev();
	if (!hdev) {
		BT_ERR("Can't allocate HCI device");
		goto error;
	}

	data->hdev = hdev;

	hdev->type = HCI_USB;
	hdev->driver_data = data;
	SET_HCIDEV_DEV(hdev, &intf->dev);

	hdev->open     = bfusb_open;
	hdev->close    = bfusb_close;
	hdev->flush    = bfusb_flush;
	hdev->send     = bfusb_send_frame;
	hdev->destruct = bfusb_destruct;
	hdev->ioctl    = bfusb_ioctl;

	hdev->owner = THIS_MODULE;

	if (hci_register_dev(hdev) < 0) {
		BT_ERR("Can't register HCI device");
		hci_free_dev(hdev);
		goto error;
	}

	usb_set_intfdata(intf, data);

	return 0;

release:
	release_firmware(firmware);

error:
	kfree(data);

done:
	return -EIO;
}
Beispiel #16
0
int
main(void)
{
        uint64_t s_b, e_b, i;
        ck_bytelock_t bytelock = CK_BYTELOCK_INITIALIZER;
        rwlock_t naive;

        for (i = 0; i < STEPS; i++) {
                ck_bytelock_write_lock(&bytelock, 1);
                ck_bytelock_write_unlock(&bytelock);
        }

        s_b = rdtsc();
        for (i = 0; i < STEPS; i++) {
                ck_bytelock_write_lock(&bytelock, 1);
                ck_bytelock_write_unlock(&bytelock);
        }
        e_b = rdtsc();
        printf("WRITE: bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS);

        rwlock_init(&naive);
        for (i = 0; i < STEPS; i++) {
                rwlock_write_lock(&naive);
                rwlock_write_unlock(&naive);
        }

        s_b = rdtsc();
        for (i = 0; i < STEPS; i++) {
                rwlock_write_lock(&naive);
                rwlock_write_unlock(&naive);
        }
        e_b = rdtsc();
        printf("WRITE: naive    %15" PRIu64 "\n", (e_b - s_b) / STEPS);

        for (i = 0; i < STEPS; i++) {
                ck_bytelock_read_lock(&bytelock, 1);
                ck_bytelock_read_unlock(&bytelock, 1);
        }
        s_b = rdtsc();
        for (i = 0; i < STEPS; i++) {
                ck_bytelock_read_lock(&bytelock, 1);
                ck_bytelock_read_unlock(&bytelock, 1);
        }
        e_b = rdtsc();
        printf("READ:  bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS);

        for (i = 0; i < STEPS; i++) {
                rwlock_read_lock(&naive);
                rwlock_read_unlock(&naive);
        }

        s_b = rdtsc();
        for (i = 0; i < STEPS; i++) {
                rwlock_read_lock(&naive);
                rwlock_read_unlock(&naive);
        }
        e_b = rdtsc();
        printf("READ:  naive    %15" PRIu64 "\n", (e_b - s_b) / STEPS);

        return (0);
}
Beispiel #17
0
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	/* get a minor number for the dev */
	r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->io_lock);
	init_MUTEX(&md->suspend_lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);
	atomic_set(&md->event_nr, 0);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	md->queue->unplug_fn = dm_unplug_all;
	md->queue->issue_flush_fn = dm_flush_all;

	md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
				     mempool_free_slab, _io_cache);
 	if (!md->io_pool)
 		goto bad2;

	md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
				      mempool_free_slab, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	return md;

 bad4:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_put_queue(md->queue);
	free_minor(minor);
 bad1:
	kfree(md);
	return NULL;
}
Beispiel #18
0
int tgt_init(const struct lu_env *env, struct lu_target *lut,
	     struct obd_device *obd, struct dt_device *dt,
	     struct tgt_opc_slice *slice, int request_fail_id,
	     int reply_fail_id)
{
	struct dt_object_format	 dof;
	struct lu_attr		 attr;
	struct lu_fid		 fid;
	struct dt_object	*o;
	int			 rc = 0;

	ENTRY;

	LASSERT(lut);
	LASSERT(obd);
	lut->lut_obd = obd;
	lut->lut_bottom = dt;
	lut->lut_last_rcvd = NULL;
	lut->lut_client_bitmap = NULL;
	obd->u.obt.obt_lut = lut;
	obd->u.obt.obt_magic = OBT_MAGIC;

	/* set request handler slice and parameters */
	lut->lut_slice = slice;
	lut->lut_reply_fail_id = reply_fail_id;
	lut->lut_request_fail_id = request_fail_id;

	/* sptlrcp variables init */
	rwlock_init(&lut->lut_sptlrpc_lock);
	sptlrpc_rule_set_init(&lut->lut_sptlrpc_rset);
	lut->lut_mds_capa = 1;
	lut->lut_oss_capa = 1;

	/* last_rcvd initialization is needed by replayable targets only */
	if (!obd->obd_replayable)
		RETURN(0);

	spin_lock_init(&lut->lut_translock);

	OBD_ALLOC(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
	if (lut->lut_client_bitmap == NULL)
		RETURN(-ENOMEM);

	memset(&attr, 0, sizeof(attr));
	attr.la_valid = LA_MODE;
	attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
	dof.dof_type = dt_mode_to_dft(S_IFREG);

	lu_local_obj_fid(&fid, LAST_RECV_OID);

	o = dt_find_or_create(env, lut->lut_bottom, &fid, &dof, &attr);
	if (!IS_ERR(o)) {
		lut->lut_last_rcvd = o;
	} else {
		OBD_FREE(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
		lut->lut_client_bitmap = NULL;
		rc = PTR_ERR(o);
		CERROR("cannot open %s: rc = %d\n", LAST_RCVD, rc);
	}

	RETURN(rc);
}
Beispiel #19
0
/**
 * zfcp_adapter_enqueue - enqueue a new adapter to the list
 * @ccw_device: pointer to the struct cc_device
 *
 * Returns:	0             if a new adapter was successfully enqueued
 *		-ENOMEM       if alloc failed
 * Enqueues an adapter at the end of the adapter list in the driver data.
 * All adapter internal structures are set up.
 * Proc-fs entries are also created.
 * locks: config_mutex must be held to serialize changes to the adapter list
 */
int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
	struct zfcp_adapter *adapter;

	/*
	 * Note: It is safe to release the list_lock, as any list changes
	 * are protected by the config_mutex, which must be held to get here
	 */

	adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
	if (!adapter)
		return -ENOMEM;

	ccw_device->handler = NULL;
	adapter->ccw_device = ccw_device;
	atomic_set(&adapter->refcount, 0);

	if (zfcp_qdio_setup(adapter))
		goto qdio_failed;

	if (zfcp_allocate_low_mem_buffers(adapter))
		goto low_mem_buffers_failed;

	if (zfcp_reqlist_alloc(adapter))
		goto low_mem_buffers_failed;

	if (zfcp_dbf_adapter_register(adapter))
		goto debug_register_failed;

	if (zfcp_setup_adapter_work_queue(adapter))
		goto work_queue_failed;

	if (zfcp_fc_gs_setup(adapter))
		goto generic_services_failed;

	init_waitqueue_head(&adapter->remove_wq);
	init_waitqueue_head(&adapter->erp_ready_wq);
	init_waitqueue_head(&adapter->erp_done_wqh);

	INIT_LIST_HEAD(&adapter->port_list_head);
	INIT_LIST_HEAD(&adapter->erp_ready_head);
	INIT_LIST_HEAD(&adapter->erp_running_head);
	INIT_LIST_HEAD(&adapter->events.list);

	INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
	spin_lock_init(&adapter->events.list_lock);

	spin_lock_init(&adapter->req_list_lock);

	rwlock_init(&adapter->erp_lock);
	rwlock_init(&adapter->abort_lock);

	if (zfcp_erp_thread_setup(adapter))
		goto erp_thread_failed;

	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
	INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);

	adapter->service_level.seq_print = zfcp_print_sl;

	/* mark adapter unusable as long as sysfs registration is not complete */
	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);

	dev_set_drvdata(&ccw_device->dev, adapter);

	if (sysfs_create_group(&ccw_device->dev.kobj,
			       &zfcp_sysfs_adapter_attrs))
		goto sysfs_failed;

	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);

	if (!zfcp_adapter_scsi_register(adapter))
		return 0;

sysfs_failed:
	zfcp_erp_thread_kill(adapter);
erp_thread_failed:
	zfcp_fc_gs_destroy(adapter);
generic_services_failed:
	zfcp_destroy_adapter_work_queue(adapter);
work_queue_failed:
	zfcp_dbf_adapter_unregister(adapter->dbf);
debug_register_failed:
	dev_set_drvdata(&ccw_device->dev, NULL);
	kfree(adapter->req_list);
low_mem_buffers_failed:
	zfcp_free_low_mem_buffers(adapter);
qdio_failed:
	zfcp_qdio_destroy(adapter->qdio);
	kfree(adapter);
	return -ENOMEM;
}
Beispiel #20
0
/*
 * Open and initialize an Interface Adapter.
 *  o initializes fields of struct rpcrdma_ia, including
 *    interface and provider attributes and protection zone.
 */
int
rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
{
	struct rpcrdma_ia *ia = &xprt->rx_ia;
	struct ib_device_attr *devattr = &ia->ri_devattr;
	int rc;

	ia->ri_dma_mr = NULL;

	ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
	if (IS_ERR(ia->ri_id)) {
		rc = PTR_ERR(ia->ri_id);
		goto out1;
	}
	ia->ri_device = ia->ri_id->device;

	ia->ri_pd = ib_alloc_pd(ia->ri_device);
	if (IS_ERR(ia->ri_pd)) {
		rc = PTR_ERR(ia->ri_pd);
		dprintk("RPC:       %s: ib_alloc_pd() failed %i\n",
			__func__, rc);
		goto out2;
	}

	rc = ib_query_device(ia->ri_device, devattr);
	if (rc) {
		dprintk("RPC:       %s: ib_query_device failed %d\n",
			__func__, rc);
		goto out3;
	}

	if (memreg == RPCRDMA_FRMR) {
		if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
		    (devattr->max_fast_reg_page_list_len == 0)) {
			dprintk("RPC:       %s: FRMR registration "
				"not supported by HCA\n", __func__);
			memreg = RPCRDMA_MTHCAFMR;
		}
	}
	if (memreg == RPCRDMA_MTHCAFMR) {
		if (!ia->ri_device->alloc_fmr) {
			dprintk("RPC:       %s: MTHCAFMR registration "
				"not supported by HCA\n", __func__);
			rc = -EINVAL;
			goto out3;
		}
	}

	switch (memreg) {
	case RPCRDMA_FRMR:
		ia->ri_ops = &rpcrdma_frwr_memreg_ops;
		break;
	case RPCRDMA_ALLPHYSICAL:
		ia->ri_ops = &rpcrdma_physical_memreg_ops;
		break;
	case RPCRDMA_MTHCAFMR:
		ia->ri_ops = &rpcrdma_fmr_memreg_ops;
		break;
	default:
		printk(KERN_ERR "RPC: Unsupported memory "
				"registration mode: %d\n", memreg);
		rc = -ENOMEM;
		goto out3;
	}
	dprintk("RPC:       %s: memory registration strategy is '%s'\n",
		__func__, ia->ri_ops->ro_displayname);

	rwlock_init(&ia->ri_qplock);
	return 0;

out3:
	ib_dealloc_pd(ia->ri_pd);
	ia->ri_pd = NULL;
out2:
	rpcrdma_destroy_id(ia->ri_id);
	ia->ri_id = NULL;
out1:
	return rc;
}
Beispiel #21
0
/* create a new SMC link group */
static int smc_lgr_create(struct smc_sock *smc, bool is_smcd,
			  struct smc_ib_device *smcibdev, u8 ibport,
			  char *peer_systemid, unsigned short vlan_id,
			  struct smcd_dev *smcismdev, u64 peer_gid)
{
	struct smc_link_group *lgr;
	struct smc_link *lnk;
	u8 rndvec[3];
	int rc = 0;
	int i;

	if (is_smcd && vlan_id) {
		rc = smc_ism_get_vlan(smcismdev, vlan_id);
		if (rc)
			goto out;
	}

	lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
	if (!lgr) {
		rc = -ENOMEM;
		goto out;
	}
	lgr->is_smcd = is_smcd;
	lgr->sync_err = 0;
	lgr->vlan_id = vlan_id;
	rwlock_init(&lgr->sndbufs_lock);
	rwlock_init(&lgr->rmbs_lock);
	rwlock_init(&lgr->conns_lock);
	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		INIT_LIST_HEAD(&lgr->sndbufs[i]);
		INIT_LIST_HEAD(&lgr->rmbs[i]);
	}
	smc_lgr_list.num += SMC_LGR_NUM_INCR;
	memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
	INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
	lgr->conns_all = RB_ROOT;
	if (is_smcd) {
		/* SMC-D specific settings */
		lgr->peer_gid = peer_gid;
		lgr->smcd = smcismdev;
	} else {
		/* SMC-R specific settings */
		lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
		memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);

		lnk = &lgr->lnk[SMC_SINGLE_LINK];
		/* initialize link */
		lnk->state = SMC_LNK_ACTIVATING;
		lnk->link_id = SMC_SINGLE_LINK;
		lnk->smcibdev = smcibdev;
		lnk->ibport = ibport;
		lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
		if (!smcibdev->initialized)
			smc_ib_setup_per_ibdev(smcibdev);
		get_random_bytes(rndvec, sizeof(rndvec));
		lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
			(rndvec[2] << 16);
		rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
					  vlan_id, lnk->gid, &lnk->sgid_index);
		if (rc)
			goto free_lgr;
		rc = smc_llc_link_init(lnk);
		if (rc)
			goto free_lgr;
		rc = smc_wr_alloc_link_mem(lnk);
		if (rc)
			goto clear_llc_lnk;
		rc = smc_ib_create_protection_domain(lnk);
		if (rc)
			goto free_link_mem;
		rc = smc_ib_create_queue_pair(lnk);
		if (rc)
			goto dealloc_pd;
		rc = smc_wr_create_link(lnk);
		if (rc)
			goto destroy_qp;
	}
	smc->conn.lgr = lgr;
	spin_lock_bh(&smc_lgr_list.lock);
	list_add(&lgr->list, &smc_lgr_list.list);
	spin_unlock_bh(&smc_lgr_list.lock);
	return 0;

destroy_qp:
	smc_ib_destroy_queue_pair(lnk);
dealloc_pd:
	smc_ib_dealloc_protection_domain(lnk);
free_link_mem:
	smc_wr_free_link_mem(lnk);
clear_llc_lnk:
	smc_llc_link_clear(lnk);
free_lgr:
	kfree(lgr);
out:
	return rc;
}
/**
 * extent_map_tree_init - initialize extent map tree
 * @tree:		tree to initialize
 * @mask:		flags for memory allocations during tree operations
 *
 * Initialize the extent tree @tree.  Should be called for each new inode
 * or other user of the extent_map interface.
 */
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
{
	tree->map = RB_ROOT;
	rwlock_init(&tree->lock);
}
Beispiel #23
0
static int __init
init_cifs(void)
{
	int rc = 0;
	cifs_proc_init();
	INIT_LIST_HEAD(&cifs_tcp_ses_list);
#ifdef CONFIG_CIFS_EXPERIMENTAL
	INIT_LIST_HEAD(&GlobalDnotifyReqList);
	INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
#endif
/*
 *  Initialize Global counters
 */
	atomic_set(&sesInfoAllocCount, 0);
	atomic_set(&tconInfoAllocCount, 0);
	atomic_set(&tcpSesAllocCount, 0);
	atomic_set(&tcpSesReconnectCount, 0);
	atomic_set(&tconInfoReconnectCount, 0);

	atomic_set(&bufAllocCount, 0);
	atomic_set(&smBufAllocCount, 0);
#ifdef CONFIG_CIFS_STATS2
	atomic_set(&totBufAllocCount, 0);
	atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */

	atomic_set(&midCount, 0);
	GlobalCurrentXid = 0;
	GlobalTotalActiveXid = 0;
	GlobalMaxActiveXid = 0;
	memset(Local_System_Name, 0, 15);
	rwlock_init(&GlobalSMBSeslock);
	rwlock_init(&cifs_tcp_ses_lock);
	spin_lock_init(&GlobalMid_Lock);

	if (cifs_max_pending < 2) {
		cifs_max_pending = 2;
		cFYI(1, "cifs_max_pending set to min of 2");
	} else if (cifs_max_pending > 256) {
		cifs_max_pending = 256;
		cFYI(1, "cifs_max_pending set to max of 256");
	}

	rc = cifs_fscache_register();
	if (rc)
		goto out;

	rc = cifs_init_inodecache();
	if (rc)
		goto out_clean_proc;

	rc = cifs_init_mids();
	if (rc)
		goto out_destroy_inodecache;

	rc = cifs_init_request_bufs();
	if (rc)
		goto out_destroy_mids;

	rc = register_filesystem(&cifs_fs_type);
	if (rc)
		goto out_destroy_request_bufs;
#ifdef CONFIG_CIFS_UPCALL
	rc = register_key_type(&cifs_spnego_key_type);
	if (rc)
		goto out_unregister_filesystem;
#endif

	return 0;

#ifdef CONFIG_CIFS_UPCALL
 out_unregister_filesystem:
	unregister_filesystem(&cifs_fs_type);
#endif
 out_destroy_request_bufs:
	cifs_destroy_request_bufs();
 out_destroy_mids:
	cifs_destroy_mids();
 out_destroy_inodecache:
	cifs_destroy_inodecache();
 out_clean_proc:
	cifs_proc_clean();
	cifs_fscache_unregister();
 out:
	return rc;
}
Beispiel #24
0
/**
 * zfcp_adapter_enqueue - enqueue a new adapter to the list
 * @ccw_device: pointer to the struct cc_device
 *
 * Returns:	struct zfcp_adapter*
 * Enqueues an adapter at the end of the adapter list in the driver data.
 * All adapter internal structures are set up.
 * Proc-fs entries are also created.
 */
struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
	struct zfcp_adapter *adapter;

	if (!get_device(&ccw_device->dev))
		return ERR_PTR(-ENODEV);

	adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
	if (!adapter) {
		put_device(&ccw_device->dev);
		return ERR_PTR(-ENOMEM);
	}

	kref_init(&adapter->ref);

	ccw_device->handler = NULL;
	adapter->ccw_device = ccw_device;

	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
	INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);

	if (zfcp_qdio_setup(adapter))
		goto failed;

	if (zfcp_allocate_low_mem_buffers(adapter))
		goto failed;

	if (zfcp_reqlist_alloc(adapter))
		goto failed;

	if (zfcp_dbf_adapter_register(adapter))
		goto failed;

	if (zfcp_setup_adapter_work_queue(adapter))
		goto failed;

	if (zfcp_fc_gs_setup(adapter))
		goto failed;

	rwlock_init(&adapter->port_list_lock);
	INIT_LIST_HEAD(&adapter->port_list);

	init_waitqueue_head(&adapter->erp_ready_wq);
	init_waitqueue_head(&adapter->erp_done_wqh);

	INIT_LIST_HEAD(&adapter->erp_ready_head);
	INIT_LIST_HEAD(&adapter->erp_running_head);

	spin_lock_init(&adapter->req_list_lock);

	rwlock_init(&adapter->erp_lock);
	rwlock_init(&adapter->abort_lock);

	if (zfcp_erp_thread_setup(adapter))
		goto failed;

	adapter->service_level.seq_print = zfcp_print_sl;

	dev_set_drvdata(&ccw_device->dev, adapter);

	if (sysfs_create_group(&ccw_device->dev.kobj,
			       &zfcp_sysfs_adapter_attrs))
		goto failed;

	if (!zfcp_adapter_scsi_register(adapter))
		return adapter;

failed:
	zfcp_adapter_unregister(adapter);
	return ERR_PTR(-ENOMEM);
}
Beispiel #25
0
static int __init
init_cifs(void)
{
	int rc = 0;
#ifdef CONFIG_PROC_FS
	cifs_proc_init();
#endif
	INIT_LIST_HEAD(&GlobalServerList);	/* BB not implemented yet */
	INIT_LIST_HEAD(&GlobalSMBSessionList);
	INIT_LIST_HEAD(&GlobalTreeConnectionList);
	INIT_LIST_HEAD(&GlobalOplock_Q);
/*
 *  Initialize Global counters
 */
	atomic_set(&sesInfoAllocCount, 0);
	atomic_set(&tconInfoAllocCount, 0);
	atomic_set(&tcpSesAllocCount,0);
	atomic_set(&tcpSesReconnectCount, 0);
	atomic_set(&tconInfoReconnectCount, 0);

	atomic_set(&bufAllocCount, 0);
	atomic_set(&midCount, 0);
	GlobalCurrentXid = 0;
	GlobalTotalActiveXid = 0;
	GlobalMaxActiveXid = 0;
	rwlock_init(&GlobalSMBSeslock);
	spin_lock_init(&GlobalMid_Lock);

	if(cifs_max_pending < 2) {
		cifs_max_pending = 2;
		cFYI(1,("cifs_max_pending set to min of 2"));
	} else if(cifs_max_pending > 256) {
		cifs_max_pending = 256;
		cFYI(1,("cifs_max_pending set to max of 256"));
	}

	rc = cifs_init_inodecache();
	if (!rc) {
		rc = cifs_init_mids();
		if (!rc) {
			rc = cifs_init_request_bufs();
			if (!rc) {
				rc = register_filesystem(&cifs_fs_type);
				if (!rc) {                
					rc = (int)kernel_thread(cifs_oplock_thread, NULL, 
						CLONE_FS | CLONE_FILES | CLONE_VM);
					if(rc > 0)
						return 0;
					else 
						cERROR(1,("error %d create oplock thread",rc));
				}
				cifs_destroy_request_bufs();
			}
			cifs_destroy_mids();
		}
		cifs_destroy_inodecache();
	}
#ifdef CONFIG_PROC_FS
	cifs_proc_clean();
#endif
	return rc;
}
Beispiel #26
0
/**
 * zfcp_port_enqueue - enqueue port to port list of adapter
 * @adapter: adapter where remote port is added
 * @wwpn: WWPN of the remote port to be enqueued
 * @status: initial status for the port
 * @d_id: destination id of the remote port to be enqueued
 * Returns: pointer to enqueued port on success, ERR_PTR on error
 *
 * All port internal structures are set up and the sysfs entry is generated.
 * d_id is used to enqueue ports with a well known address like the Directory
 * Service for nameserver lookup.
 */
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
				     u32 status, u32 d_id)
{
	struct zfcp_port *port;
	int retval = -ENOMEM;

	kref_get(&adapter->ref);

	port = zfcp_get_port_by_wwpn(adapter, wwpn);
	if (port) {
		put_device(&port->sysfs_device);
		retval = -EEXIST;
		goto err_out;
	}

	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
	if (!port)
		goto err_out;

	rwlock_init(&port->unit_list_lock);
	INIT_LIST_HEAD(&port->unit_list);

	INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
	INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
	INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);

	port->adapter = adapter;
	port->d_id = d_id;
	port->wwpn = wwpn;
	port->rport_task = RPORT_NONE;
	port->sysfs_device.parent = &adapter->ccw_device->dev;
	port->sysfs_device.release = zfcp_port_release;

	if (dev_set_name(&port->sysfs_device, "0x%016llx",
			 (unsigned long long)wwpn)) {
		kfree(port);
		goto err_out;
	}
	retval = -EINVAL;

	if (device_register(&port->sysfs_device)) {
		put_device(&port->sysfs_device);
		goto err_out;
	}

	if (sysfs_create_group(&port->sysfs_device.kobj,
			       &zfcp_sysfs_port_attrs))
		goto err_out_put;

	write_lock_irq(&adapter->port_list_lock);
	list_add_tail(&port->list, &adapter->port_list);
	write_unlock_irq(&adapter->port_list_lock);

	atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);

	return port;

err_out_put:
	device_unregister(&port->sysfs_device);
err_out:
	zfcp_ccw_adapter_put(adapter);
	return ERR_PTR(retval);
}
Beispiel #27
0
struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
{
	struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);

	if (newsk != NULL) {
		struct sk_filter *filter;

		memcpy(newsk, sk, sk->sk_prot->obj_size);

		/* SANITY */
		sk_node_init(&newsk->sk_node);
		sock_lock_init(newsk);
		bh_lock_sock(newsk);

		atomic_set(&newsk->sk_rmem_alloc, 0);
		atomic_set(&newsk->sk_wmem_alloc, 0);
		atomic_set(&newsk->sk_omem_alloc, 0);
		skb_queue_head_init(&newsk->sk_receive_queue);
		skb_queue_head_init(&newsk->sk_write_queue);

		rwlock_init(&newsk->sk_dst_lock);
		rwlock_init(&newsk->sk_callback_lock);

		newsk->sk_dst_cache	= NULL;
		newsk->sk_wmem_queued	= 0;
		newsk->sk_forward_alloc = 0;
		newsk->sk_send_head	= NULL;
		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;

		sock_reset_flag(newsk, SOCK_DONE);
		skb_queue_head_init(&newsk->sk_error_queue);

		filter = newsk->sk_filter;
		if (filter != NULL)
			sk_filter_charge(newsk, filter);

		if (unlikely(xfrm_sk_clone_policy(newsk))) {
			/* It is still raw copy of parent, so invalidate
			 * destructor and make plain sk_free() */
			newsk->sk_destruct = NULL;
			sk_free(newsk);
			newsk = NULL;
			goto out;
		}

		newsk->sk_err	   = 0;
		newsk->sk_priority = 0;
		atomic_set(&newsk->sk_refcnt, 2);

		/*
		 * Increment the counter in the same struct proto as the master
		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
		 * is the same as sk->sk_prot->socks, as this field was copied
		 * with memcpy).
		 *
		 * This _changes_ the previous behaviour, where
		 * tcp_create_openreq_child always was incrementing the
		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
		 * to be taken into account in all callers. -acme
		 */
		sk_refcnt_debug_inc(newsk);
		newsk->sk_socket = NULL;
		newsk->sk_sleep	 = NULL;

		if (newsk->sk_prot->sockets_allocated)
			atomic_inc(newsk->sk_prot->sockets_allocated);
	}
out:
	return newsk;
}
Beispiel #28
0
static struct fblock *fb_crr_rx_ctor(char *name)
{
	int ret = 0;
	unsigned int cpu, *tmp_rx_bitstream;
	unsigned char *tmp_expected_seq_nr, *tmp_rx_win_nr;
	struct sk_buff_head *tmp_list;
	struct fblock *fb;
	struct fb_crr_rx_priv __percpu *fb_priv;
	rwlock_t *tmp_rx_lock;

	

	fb = alloc_fblock(GFP_ATOMIC);
	if (!fb)
		return NULL;


	fb_priv = alloc_percpu(struct fb_crr_rx_priv);
	if (!fb_priv)
		goto err;

	if (unlikely((tmp_rx_bitstream = kzalloc(sizeof(unsigned int), GFP_ATOMIC)) == NULL))
		goto err_;

	if (unlikely((tmp_rx_win_nr = kzalloc(sizeof(unsigned char), GFP_ATOMIC)) == NULL))
		goto err__;

	if (unlikely((tmp_rx_lock = kzalloc(sizeof(rwlock_t), GFP_ATOMIC)) == NULL))
		goto err0;

	if (unlikely((tmp_list = kzalloc(sizeof(struct sk_buff_head), GFP_ATOMIC)) == NULL))
		goto err1;

	if (unlikely((tmp_expected_seq_nr = kzalloc(sizeof(unsigned char), GFP_ATOMIC)) == NULL))
		goto err1a;
	
	rwlock_init(tmp_rx_lock);

	*tmp_rx_bitstream = 0;
	*tmp_rx_win_nr = 0;

	*tmp_expected_seq_nr = 1;
	
	skb_queue_head_init(tmp_list);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		struct fb_crr_rx_priv *fb_priv_cpu;
		fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
		seqlock_init(&fb_priv_cpu->lock);
		//rwlock_init(&fb_priv_cpu->rx_lock);
		fb_priv_cpu->rx_lock = tmp_rx_lock;		
		fb_priv_cpu->port[0] = IDP_UNKNOWN;
		fb_priv_cpu->port[1] = IDP_UNKNOWN;
		fb_priv_cpu->rx_seq_nr = tmp_expected_seq_nr;
		fb_priv_cpu->list = tmp_list;
		fb_priv_cpu->rx_bitstream = tmp_rx_bitstream;
		fb_priv_cpu->rx_win_nr = tmp_rx_win_nr;
	}
	put_online_cpus();

	ret = init_fblock(fb, name, fb_priv);
	if (ret)
		goto err2;
	fb->netfb_rx = fb_crr_rx_netrx;
	fb->event_rx = fb_crr_rx_event;
	ret = register_fblock_namespace(fb);
	if (ret)
		goto err3;
	__module_get(THIS_MODULE);
	printk(KERN_ERR "[CRR_RX] Initialization passed!\n");
	return fb;
err3:
	cleanup_fblock_ctor(fb);
err2:
	kfree(tmp_expected_seq_nr);
err1a:
	kfree(tmp_list);
err1:
	kfree(tmp_rx_lock);
	
err0:
	kfree(tmp_rx_win_nr); 
err__:
	kfree(tmp_rx_bitstream);
err_:
	free_percpu(fb_priv);
err:
	kfree_fblock(fb);
	printk(KERN_ERR "[CRR_RX] Initialization failed!\n");
	return NULL;
}
Beispiel #29
0
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(int minor)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
	void *old_md;

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	if (!try_module_get(THIS_MODULE))
		goto bad0;

	/* get a minor number for the dev */
	if (minor == DM_ANY_MINOR)
		r = next_free_minor(md, &minor);
	else
		r = specific_minor(md, minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->io_lock);
	init_MUTEX(&md->suspend_lock);
	spin_lock_init(&md->pushback_lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);
	atomic_set(&md->open_count, 0);
	atomic_set(&md->event_nr, 0);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1_free_minor;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
	md->queue->unplug_fn = dm_unplug_all;
	md->queue->issue_flush_fn = dm_flush_all;

	md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
	if (!md->io_pool)
		goto bad2;

	md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->bs = bioset_create(16, 16);
	if (!md->bs)
		goto bad_no_bioset;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);
	format_dev_t(md->name, MKDEV(_major, minor));

	/* Populate the mapping, nobody knows we exist yet */
	spin_lock(&_minor_lock);
	old_md = idr_replace(&_minor_idr, md, minor);
	spin_unlock(&_minor_lock);

	BUG_ON(old_md != MINOR_ALLOCED);

	return md;

 bad4:
	bioset_free(md->bs);
 bad_no_bioset:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_cleanup_queue(md->queue);
 bad1_free_minor:
	free_minor(minor);
 bad1:
	module_put(THIS_MODULE);
 bad0:
	kfree(md);
	return NULL;
}
Beispiel #30
0
int
ssh_interceptor_init_kernel_services(void)
{
  /* Interceptor object is always preallocated. */
  SSH_ASSERT(ssh_interceptor_context == NULL);
  memset(&interceptor_struct, 0, sizeof(interceptor_struct));
  ssh_interceptor_context = &interceptor_struct;

#ifdef DEBUG_LIGHT
  spin_lock_init(&ssh_interceptor_context->statistics_lock);
#endif /* DEBUG_LIGHT */

  /* General init */
  ssh_interceptor_context->interceptor_lock = ssh_kernel_mutex_alloc();
  ssh_interceptor_context->packet_lock = ssh_kernel_mutex_alloc();

  if (ssh_interceptor_context->interceptor_lock == NULL
      || ssh_interceptor_context->packet_lock == NULL)
    goto error;

  rwlock_init(&ssh_interceptor_context->if_table_lock);

  /* Init packet data structure */
  if (!ssh_interceptor_packet_freelist_init(ssh_interceptor_context))
    {
      printk(KERN_ERR
             "VPNClient packet processing engine failed to start "
             "(out of memory).\n");
      goto error;
    }

  if (ssh_interceptor_dst_entry_cache_init(ssh_interceptor_context) == FALSE)
    {
      printk(KERN_ERR "VPNClient packet processing engine "
	     "failed to start, dst cache initialization failed.");
      goto error;
    }

  /* Initialize ipm channel */
  if (!ssh_interceptor_ipm_init(ssh_interceptor_context))
    {
      printk(KERN_ERR
	     "VPNClient packet processing engine failed to start "
	     "(proc filesystem initialization error)\n");
      goto error1;
    }

  return 0;

 error1:
  local_bh_disable();
  ssh_interceptor_packet_freelist_uninit(ssh_interceptor_context);
  local_bh_enable();

 error:
  ssh_interceptor_dst_entry_cache_uninit(ssh_interceptor_context);
  ssh_kernel_mutex_free(ssh_interceptor_context->interceptor_lock);
  ssh_interceptor_context->interceptor_lock = NULL;

  ssh_kernel_mutex_free(ssh_interceptor_context->packet_lock);
  ssh_interceptor_context->packet_lock = NULL;

  ssh_interceptor_context = NULL;

  return -ENOMEM;
}