Esempio n. 1
0
static int bcmsdh_sdmmc_probe(struct sdio_func *func,
                              const struct sdio_device_id *id)
{
	int ret = 0;

	if (func == NULL)
		return -EINVAL;

	sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
	sd_info(("sdio_device: 0x%04x\n", func->device));
	sd_info(("Function#: 0x%04x\n", func->num));

	/* 4318 doesn't have function 2 */
	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
		ret = sdioh_probe(func);
#ifdef CONFIG_MACH_NOTLE
	if (func->num == 2) {
		sdmmc_pm_workqueue = create_freezable_workqueue("bcmsdh_sdmmc");
		if (IS_ERR(sdmmc_pm_workqueue)) {
			sd_err(("bcmsdh fail to create workqueue\n"));
			return -EINVAL;
		}
		INIT_DELAYED_WORK(&bcmshd_resume_work, bcmshd_resume_delayed_work_fn);
	}
#endif
	return ret;
}
Esempio n. 2
0
/* dump the information of B-tree */
static void dump_btree(read_node_fn reader, struct sd_inode *inode)
{
#ifdef DEBUG
	sd_info("btree> BEGIN");
	traverse_btree(reader, inode, dump_cb, NULL);
	sd_info("btree> END");
#endif
}
Esempio n. 3
0
static int create_work_queues(void)
{
    struct work_queue *util_wq;

    if (init_work_queue(get_nr_nodes))
        return -1;

    if (wq_net_threads) {
        sd_info("# of threads in net workqueue: %d", wq_net_threads);
        sys->net_wqueue = create_fixed_work_queue("net", wq_net_threads);
    } else {
        sd_info("net workqueue is created as unlimited, it is not recommended!");
        sys->net_wqueue = create_work_queue("net", WQ_UNLIMITED);
    }
    if (wq_gway_threads) {
        sd_info("# of threads in gway workqueue: %d", wq_gway_threads);
        sys->gateway_wqueue = create_fixed_work_queue("gway", wq_gway_threads);
    } else {
        sd_info("gway workqueue is created as unlimited, it is not recommended!");
        sys->gateway_wqueue = create_work_queue("gway", WQ_UNLIMITED);
    }
    if (wq_io_threads) {
        sd_info("# of threads in io workqueue: %d", wq_io_threads);
        sys->io_wqueue = create_fixed_work_queue("io", wq_io_threads);
    } else {
        sd_info("io workqueue is created as unlimited, it is not recommended!");
        sys->io_wqueue = create_work_queue("io", WQ_UNLIMITED);
    }
    if (wq_recovery_threads) {
        sd_info("# of threads in rw workqueue: %d", wq_recovery_threads);
        sys->recovery_wqueue = create_fixed_work_queue("rw", wq_recovery_threads);
    } else {
        sd_info("recovery workqueue is created as unlimited, it is not recommended!");
        sys->recovery_wqueue = create_work_queue("rw", WQ_UNLIMITED);
    }
    sys->deletion_wqueue = create_ordered_work_queue("deletion");
    sys->block_wqueue = create_ordered_work_queue("block");
    sys->md_wqueue = create_ordered_work_queue("md");
    if (wq_async_threads) {
        sd_info("# of threads in async_req workqueue: %d", wq_async_threads);
        sys->areq_wqueue = create_fixed_work_queue("async_req", wq_async_threads);
    } else {
        sd_info("async_req workqueue is created as unlimited, it is not recommended!");
        sys->areq_wqueue = create_work_queue("async_req", WQ_UNLIMITED);
    }
    if (!sys->gateway_wqueue || !sys->io_wqueue || !sys->recovery_wqueue ||
            !sys->deletion_wqueue || !sys->block_wqueue || !sys->md_wqueue ||
            !sys->areq_wqueue)
        return -1;

    util_wq = create_ordered_work_queue("util");
    if (!util_wq)
        return -1;
    register_util_wq(util_wq);

    return 0;
}
static void bcmsdh_sdmmc_remove(struct sdio_func *func)
{
	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
	sd_info(("sdio_device: 0x%04x\n", func->device));
	sd_info(("Function#: 0x%04x\n", func->num));

	if (func->num == 2) {
		sd_trace(("F2 found, calling bcmsdh_remove...\n"));
		bcmsdh_remove(&sdmmc_dev);
	}
}
Esempio n. 5
0
int xio_create_listen_ports(const char *bindaddr, int port,
			    int (*callback)(int fd, void *), bool rdma)
{
	char url[256];
	struct xio_server *server;
	struct server_data *server_data;
	int xio_fd;

	server_data = xzalloc(sizeof(*server_data));
	server_data->ctx = xio_get_main_ctx();

	snprintf(url, 256, rdma ? "rdma://%s:%d" : "tcp://%s:%d",
		bindaddr ? bindaddr : "0.0.0.0", port);
	sd_info("accelio binding url: %s", url);

	/* bind a listener server to a portal/url */
	server = xio_bind(server_data->ctx, &portal_server_ops, url, NULL, 0,
			  server_data);
	if (server == NULL) {
		sd_err("xio_bind() failed");
		return -1;
	}

	xio_fd = xio_context_get_poll_fd(server_data->ctx);
	register_event(xio_fd, xio_server_handler, server_data);

	return 0;
}
Esempio n. 6
0
static int node_log_level_get(int argc, char **argv)
{
	int ret = 0, loglevel = -1;
	struct node_id nid;

	memset(&nid, 0, sizeof(nid));
	memcpy(nid.addr, sdhost, sizeof(sdhost));
	nid.port = sdport;

	ret = do_loglevel_get(&nid, &loglevel);
	switch (ret) {
	case EXIT_FAILURE:
	case EXIT_SYSFAIL:
		sd_err("Failed to execute request");
		ret = -1;
		break;
	case EXIT_SUCCESS:
		sd_info("%s (%d)", loglevel_to_str(loglevel), loglevel);
		break;
	default:
		sd_err("unknown return code of do_loglevel_get(): %d", ret);
		ret = -1;
		break;
	}

	return ret;
}
static void bcmsdh_sdmmc_remove(struct sdio_func *func)
{
	if (func == NULL) {
		sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__));
		return;
	}

	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
	sd_info(("sdio_device: 0x%04x\n", func->device));
	sd_info(("Function#: 0x%04x\n", func->num));

	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
		sdioh_remove(func);
}
Esempio n. 8
0
void dump_loglevels(bool err)
{
	for (int i = 0; i < ARRAY_SIZE(loglevel_table); i++) {
		if (err)
			sd_err("%s\t(%d)", loglevel_table[i], i);
		else
			sd_info("%s\t(%d)", loglevel_table[i], i);
	}
}
static void bcmsdh_sdmmc_remove(struct sdio_func *func)
{
	sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
	sd_info(("sdio_device: 0x%04x\n", func->device));
	sd_info(("Function#: 0x%04x\n", func->num));

	if (func->num == 2) {
		sd_err(("F2 found, calling bcmsdh_remove...\n"));
		bcmsdh_remove(&func->dev);
	} else if (func->num == 1) {
		sdio_claim_host(func);
		sdio_disable_func(func);
		sdio_release_host(func);
		gInstance->func[1] = NULL;
	}
}
static int bcmsdh_sdmmc_probe(struct sdio_func *func,
                              const struct sdio_device_id *id)
{
	int ret = 0;

	if (func == NULL)
		return -EINVAL;

	sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
	sd_info(("sdio_device: 0x%04x\n", func->device));
	sd_info(("Function#: 0x%04x\n", func->num));

	/* 4318 doesn't have function 2 */
	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
		ret = sdioh_probe(func);

	return ret;
}
Esempio n. 11
0
static void check_tmp_config(void)
{
	int ret;
	char tmp_config_path[PATH_MAX];

	snprintf(tmp_config_path, PATH_MAX, "%s.tmp", config_path);

	ret = unlink(tmp_config_path);
	if (!ret || ret != ENOENT)
		return;

	sd_info("removed temporal config file");
}
Esempio n. 12
0
static void dump_cb(void *data, enum btree_node_type type, void *arg)
{
	struct sd_extent_header *header;
	struct sd_extent *ext;
	struct sd_extent_idx *idx;

	switch (type) {
	case BTREE_HEAD:
		header = (struct sd_extent_header *)data;
		sd_info("btree> HEAD: magic %u entries %u depth %u",
			header->magic, header->entries, header->depth);
		break;
	case BTREE_EXT:
		ext = (struct sd_extent *)data;
		sd_info("btree> EXT: idx %u vdi_id %u", ext->idx, ext->vdi_id);
		break;
	case BTREE_IDX:
		idx = (struct sd_extent_idx *)data;
		sd_info("btree> IDX: idx %u oid %lu", idx->idx, idx->oid);
		break;
	}
}
Esempio n. 13
0
static void bcmsdh_sdmmc_remove(struct sdio_func *func)
{
	if (func == NULL) {
		sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__));
		return;
	}

	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
	sd_info(("sdio_device: 0x%04x\n", func->device));
	sd_info(("Function#: 0x%04x\n", func->num));

	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
		sdioh_remove(func);

#ifdef CONFIG_MACH_NOTLE
	if (func->num == 2) {
		cancel_delayed_work_sync(&bcmshd_resume_work);
		destroy_workqueue(sdmmc_pm_workqueue);
	}
#endif
}
Esempio n. 14
0
static int discard(int fd, uint64_t start, uint32_t end)
{
	int ret = xfallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
			     start, end - start);
	if (ret < 0) {
		if (errno == ENOSYS || errno == EOPNOTSUPP)
			sd_info("FALLOC_FL_PUNCH_HOLE is not supported "
				"on this filesystem");
		else
			sd_err("failed to discard object, %m");
	}

	return ret;
}
Esempio n. 15
0
static int convert_ecidx_xattr2path(uint64_t oid, const char *wd,
				    uint32_t epoch, uint8_t ec_index,
				    struct vnode_info *info,
				    void *arg)
{
	int ret = 0;
	uint8_t idx;
	char path[PATH_MAX + 1], new_path[PATH_MAX + 1];
	bool is_stale = *(bool *)arg;

	if (is_stale)
		snprintf(path, PATH_MAX, "%s/%016"PRIx64".%u", wd, oid, epoch);
	else
		snprintf(path, PATH_MAX, "%s/%016"PRIx64, wd, oid);

	if (getxattr(path, OLD_ECNAME, &idx, sizeof(uint8_t)) < 0) {
		sd_info("object: %s doesn't have its ec index in xattr: %m",
			path);
		goto out;
	}

	if (is_stale)
		snprintf(new_path, PATH_MAX, "%s/%016"PRIx64"_%u.%u",
			 wd, oid, idx, epoch);
	else
		snprintf(new_path, PATH_MAX, "%s/%016"PRIx64"_%u",
			 wd, oid, idx);

	if (rename(path, new_path) < 0) {
		sd_emerg("rename from %s to %s failed: %m", path, new_path);
		ret = -1;

		goto out;
	}

	if (removexattr(new_path, OLD_ECNAME) < 0) {
		sd_emerg("remove xattr %s from path %s failed: %m",
			 OLD_ECNAME, new_path);
		ret = -1;
	}

out:
	return ret;
}
static int __init
bcmsdh_module_init(void)
{
	int error = 0;

#ifdef CONFIG_HWCONNECTIVITY
    //For OneTrack, we need check it's the right chip type or not.
    //If it's not the right chip type, don't init the driver
    if (!isMyConnectivityChip(CHIP_TYPE_BCM)) {
        sd_err(("wifi-sdh chip type is not match, skip driver init"));
        return -EINVAL;
    } else {
        sd_info(("wifi-sdh chip type is matched with Broadcom, continue"));
    }
#endif

	error = sdio_function_init();
	return error;
}
Esempio n. 17
0
static void check_host_env(void)
{
	struct rlimit r;

	if (getrlimit(RLIMIT_NOFILE, &r) < 0)
		sd_err("failed to get nofile %m");
	else if (r.rlim_cur < SD_RLIM_NOFILE) {
		r.rlim_cur = SD_RLIM_NOFILE;
		r.rlim_max = SD_RLIM_NOFILE;
		if (setrlimit(RLIMIT_NOFILE, &r) != 0) {
			sd_err("failed to set nofile to suggested %lu, %m",
			       r.rlim_cur);
			sd_err("please increase nofile via sysctl fs.nr_open");
		} else {
			sd_info("allowed open files set to suggested %lu",
				r.rlim_cur);
		}
	}

	if (getrlimit(RLIMIT_CORE, &r) < 0)
		sd_debug("failed to get core %m");
	else if (r.rlim_cur < RLIM_INFINITY)
		sd_debug("allowed core file size %lu, suggested unlimited",
			 r.rlim_cur);

	/*
	 * Disable glibc's dynamic mmap threshold and set it as 512k.
	 *
	 * We have to disable dynamic threshold because its inefficiency to
	 * release freed memory back to OS. Setting it as 512k practically means
	 * allocation larger than or equal to 512k will use mmap() for malloc()
	 * and munmap() for free(), guaranteeing allocated memory will not be
	 * cached in the glibc's ptmalloc internal pool.
	 *
	 * 512k is not a well tested optimal value for IO request size, I choose
	 * it because it is default value for disk drive that it can transfer at
	 * a time. So default installation of guest will issue at most 512K
	 * sized request.
	 */
	mallopt(M_MMAP_THRESHOLD, 512 * 1024);
}
Esempio n. 18
0
static void check_host_env(void)
{
	struct rlimit r;

	if (getrlimit(RLIMIT_NOFILE, &r) < 0)
		sd_err("failed to get nofile %m");
	/*
	 * 1024 is default for NOFILE on most distributions, which is very
	 * dangerous to run Sheepdog cluster.
	 */
	else if (r.rlim_cur == 1024)
		sd_warn("Allowed open files 1024 too small, suggested %u",
			SD_RLIM_NOFILE);
	else if (r.rlim_cur < SD_RLIM_NOFILE)
		sd_info("Allowed open files %lu, suggested %u", r.rlim_cur,
			SD_RLIM_NOFILE);

	if (getrlimit(RLIMIT_CORE, &r) < 0)
		sd_debug("failed to get core %m");
	else if (r.rlim_cur < RLIM_INFINITY)
		sd_debug("Allowed core file size %lu, suggested unlimited",
			 r.rlim_cur);

	/*
	 * Disable glibc's dynamic mmap threshold and set it as 512k.
	 *
	 * We have to disable dynamic threshold because its inefficiency to
	 * release freed memory back to OS. Setting it as 512k practically means
	 * allocation larger than or equal to 512k will use mmap() for malloc()
	 * and munmap() for free(), guaranteeing allocated memory will not be
	 * cached in the glibc's ptmalloc internal pool.
	 *
	 * 512k is not a well tested optimal value for IO request size, I choose
	 * it because it is default value for disk drive that it can transfer at
	 * a time. So default installation of guest will issue at most 512K
	 * sized request.
	 */
	mallopt(M_MMAP_THRESHOLD, 512 * 1024);
}
Esempio n. 19
0
static void swift_handle_request(struct http_request *req,
				 void (*a_handler)(struct http_request *req,
						   const char *account),
				 void (*c_handler)(struct http_request *req,
						   const char *account,
						   const char *container),
				 void (*o_handler)(struct http_request *req,
						   const char *account,
						   const char *container,
						   const char *object))
{
	char *args[4] = {};
	char *version, *account, *container, *object;

	split_path(req->uri, ARRAY_SIZE(args), args);

	version = args[0];
	account = args[1];
	container = args[2];
	object = args[3];

	sd_info("%s", str_http_req(req));

	if (account == NULL) {
		sd_info("invalid uri: %s", req->uri);
		http_response_header(req, NOT_FOUND);
	} else if (container == NULL) {
		sd_info("account operation, %s", account);
		a_handler(req, account);
	} else if (object == NULL) {
		sd_info("container operation, %s, %s", account, container);
		c_handler(req, account, container);
	} else {
		sd_info("object operation, %s, %s, %s", account, container,
			object);
		o_handler(req, account, container, object);
	}

	sd_info("%s", str_http_req(req));

	free(version);
	free(account);
	free(container);
	free(object);
}
Esempio n. 20
0
static int migrate_from_v3_to_v4(void)
{
	bool is_stale = true;
	int ret;

	ret = for_each_object_in_stale(convert_ecidx_xattr2path,
				       (void *)&is_stale);
	if (ret < 0) {
		sd_emerg("converting store format of stale object directory"
			 "failed");
		return ret;
	}

	is_stale = false;
	ret = for_each_object_in_wd(convert_ecidx_xattr2path, false,
				    (void *)&is_stale);
	if (ret < 0) {
		sd_emerg("converting store format of object directory failed");
		return ret;
	}

	sd_info("converting store format v3 to v4 is ended successfully");
	return 0;
}
Esempio n. 21
0
/* Configure PCI-SPI Host Controller's SPI Clock rate as a divisor into the
 * base clock rate.  The base clock is either the PCI Clock (33MHz) or the
 * external clock oscillator at U17 on the PciSpiHost.
 */
bool
spi_start_clock(sdioh_info_t *sd, uint16 div)
{
	spih_info_t *si = (spih_info_t *)sd->controller;
	osl_t *osh = si->osh;
	spih_regs_t *regs = si->regs;
	uint32 t, espr, disp;
	uint32 disp_xtal_freq;
	bool	ext_clock = FALSE;
	char disp_string[5];

	if (div > 2048) {
		sd_err(("%s: divisor %d too large; using max of 2048\n", __FUNCTION__, div));
		div = 2048;
	} else if (div & (div - 1)) {	/* Not a power of 2? */
		/* Round up to a power of 2 */
		while ((div + 1) & div)
			div |= div >> 1;
		div++;
	}

	/* For FPGA Rev >= 5, the use of an external clock oscillator is supported.
	 * If the oscillator is populated, use it to provide the SPI base clock,
	 * otherwise, default to the PCI clock as the SPI base clock.
	 */
	if (si->rev >= 5) {
		uint32 clk_tick;
		/* Enable the External Clock Oscillator as PLL clock source. */
		if (!sdspi_switch_clock(sd, TRUE)) {
			sd_err(("%s: error switching to external clock\n", __FUNCTION__));
		}

		/* Check to make sure the external clock is running.  If not, then it
		 * is not populated on the card, so we will default to the PCI clock.
		 */
		clk_tick = SPIPCI_RREG(osh, &regs->spih_clk_count);
		if (clk_tick == SPIPCI_RREG(osh, &regs->spih_clk_count)) {

			/* Switch back to the PCI clock as the clock source. */
			if (!sdspi_switch_clock(sd, FALSE)) {
				sd_err(("%s: error switching to external clock\n", __FUNCTION__));
			}
		} else {
			ext_clock = TRUE;
		}
	}

	/* Hack to allow hot-swapping oscillators:
	 * 1. Force PCI clock as clock source, using sd_divisor of 0.
	 * 2. Swap oscillator
	 * 3. Set desired sd_divisor (will switch to external oscillator as clock source.
	 */
	if (div == 0) {
		ext_clock = FALSE;
		div = 2;

		/* Select PCI clock as the clock source. */
		if (!sdspi_switch_clock(sd, FALSE)) {
			sd_err(("%s: error switching to external clock\n", __FUNCTION__));
		}

		sd_err(("%s: Ok to hot-swap oscillators.\n", __FUNCTION__));
	}

	/* If using the external oscillator, read the clock frequency from the controller
	 * The value read is in units of 10000Hz, and it's not a nice round number because
	 * it is calculated by the FPGA.  So to make up for that, we round it off.
	 */
	if (ext_clock == TRUE) {
		uint32 xtal_freq;

		OSL_DELAY(1000);
		xtal_freq = SPIPCI_RREG(osh, &regs->spih_xtal_freq) * 10000;

		sd_info(("%s: Oscillator is %dHz\n", __FUNCTION__, xtal_freq));

		disp_xtal_freq = xtal_freq / 10000;

		/* Round it off to a nice number. */
		if ((disp_xtal_freq % 100) > 50) {
			disp_xtal_freq += 100;
		}

		disp_xtal_freq = (disp_xtal_freq / 100) * 100;
	} else {
		sd_err(("%s: no external oscillator installed, using PCI clock.\n", __FUNCTION__));
		disp_xtal_freq = 3333;
	}

	/* Convert the SPI Clock frequency to BCD format. */
	sprintf(disp_string, "%04d", disp_xtal_freq / div);

	disp  = (disp_string[0] - '0') << 12;
	disp |= (disp_string[1] - '0') << 8;
	disp |= (disp_string[2] - '0') << 4;
	disp |= (disp_string[3] - '0');

	/* Select the correct ESPR register value based on the divisor. */
	switch (div) {
		case 1:		espr = 0x0; break;
		case 2:		espr = 0x1; break;
		case 4:		espr = 0x2; break;
		case 8:		espr = 0x5; break;
		case 16:	espr = 0x3; break;
		case 32:	espr = 0x4; break;
		case 64:	espr = 0x6; break;
		case 128:	espr = 0x7; break;
		case 256:	espr = 0x8; break;
		case 512:	espr = 0x9; break;
		case 1024:	espr = 0xa; break;
		case 2048:	espr = 0xb; break;
		default:	espr = 0x0; ASSERT(0); break;
	}

	t = SPIPCI_RREG(osh, &regs->spih_ctrl);
	t &= ~3;
	t |= espr & 3;
	SPIPCI_WREG(osh, &regs->spih_ctrl, t);

	t = SPIPCI_RREG(osh, &regs->spih_ext);
	t &= ~3;
	t |= (espr >> 2) & 3;
	SPIPCI_WREG(osh, &regs->spih_ext, t);

	SPIPCI_WREG(osh, &regs->spih_hex_disp, disp);

	/* For Rev 8, writing to the PLL_CTRL register resets
	 * the PLL, and it can re-acquire in 200uS.  For
	 * Rev 7 and older, we use a software delay to allow
	 * the PLL to re-acquire, which takes more than 2mS.
	 */
	if (si->rev < 8) {
		/* Wait for clock to settle. */
		OSL_DELAY(5000);
	}

	sd_info(("%s: SPI_CTRL=0x%08x SPI_EXT=0x%08x\n",
	         __FUNCTION__,
	         SPIPCI_RREG(osh, &regs->spih_ctrl),
	         SPIPCI_RREG(osh, &regs->spih_ext)));

	return TRUE;
}
Esempio n. 22
0
bool
spi_start_clock(sdioh_info_t *sd, uint16 div)
{
	spih_info_t *si = (spih_info_t *)sd->controller;
	osl_t *osh = si->osh;
	spih_regs_t *regs = si->regs;
	uint32 t, espr, disp;
	uint32 disp_xtal_freq;
	bool	ext_clock = FALSE;
	char disp_string[5];

	if (div > 2048) {
		sd_err(("%s: divisor %d too large; using max of 2048\n", __FUNCTION__, div));
		div = 2048;
	} else if (div & (div - 1)) {	
		
		while ((div + 1) & div)
			div |= div >> 1;
		div++;
	}

	
	if (si->rev >= 5) {
		uint32 clk_tick;
		
		if (!sdspi_switch_clock(sd, TRUE)) {
			sd_err(("%s: error switching to external clock\n", __FUNCTION__));
		}

		
		clk_tick = SPIPCI_RREG(osh, &regs->spih_clk_count);
		if (clk_tick == SPIPCI_RREG(osh, &regs->spih_clk_count)) {

			
			if (!sdspi_switch_clock(sd, FALSE)) {
				sd_err(("%s: error switching to external clock\n", __FUNCTION__));
			}
		} else {
			ext_clock = TRUE;
		}
	}

	
	if (div == 0) {
		ext_clock = FALSE;
		div = 2;

		
		if (!sdspi_switch_clock(sd, FALSE)) {
			sd_err(("%s: error switching to external clock\n", __FUNCTION__));
		}

		sd_err(("%s: Ok to hot-swap oscillators.\n", __FUNCTION__));
	}

	
	if (ext_clock == TRUE) {
		uint32 xtal_freq;

		OSL_DELAY(1000);
		xtal_freq = SPIPCI_RREG(osh, &regs->spih_xtal_freq) * 10000;

		sd_info(("%s: Oscillator is %dHz\n", __FUNCTION__, xtal_freq));


		disp_xtal_freq = xtal_freq / 10000;

		
		if ((disp_xtal_freq % 100) > 50) {
			disp_xtal_freq += 100;
		}

		disp_xtal_freq = (disp_xtal_freq / 100) * 100;
	} else {
		sd_err(("%s: no external oscillator installed, using PCI clock.\n", __FUNCTION__));
		disp_xtal_freq = 3333;
	}

	
	sprintf(disp_string, "%04d", disp_xtal_freq / div);

	disp  = (disp_string[0] - '0') << 12;
	disp |= (disp_string[1] - '0') << 8;
	disp |= (disp_string[2] - '0') << 4;
	disp |= (disp_string[3] - '0');

	
	switch (div) {
		case 1:		espr = 0x0; break;
		case 2:		espr = 0x1; break;
		case 4:		espr = 0x2; break;
		case 8:		espr = 0x5; break;
		case 16:	espr = 0x3; break;
		case 32:	espr = 0x4; break;
		case 64:	espr = 0x6; break;
		case 128:	espr = 0x7; break;
		case 256:	espr = 0x8; break;
		case 512:	espr = 0x9; break;
		case 1024:	espr = 0xa; break;
		case 2048:	espr = 0xb; break;
		default:	espr = 0x0; ASSERT(0); break;
	}

	t = SPIPCI_RREG(osh, &regs->spih_ctrl);
	t &= ~3;
	t |= espr & 3;
	SPIPCI_WREG(osh, &regs->spih_ctrl, t);

	t = SPIPCI_RREG(osh, &regs->spih_ext);
	t &= ~3;
	t |= (espr >> 2) & 3;
	SPIPCI_WREG(osh, &regs->spih_ext, t);

	SPIPCI_WREG(osh, &regs->spih_hex_disp, disp);

	
	if (si->rev < 8) {
		
		OSL_DELAY(5000);
	}

	sd_info(("%s: SPI_CTRL=0x%08x SPI_EXT=0x%08x\n",
	         __FUNCTION__,
	         SPIPCI_RREG(osh, &regs->spih_ctrl),
	         SPIPCI_RREG(osh, &regs->spih_ext)));

	return TRUE;
}
Esempio n. 23
0
/**
 * Init SD card
 * 
 * Init SD card
 * \return  Error code
 * 
 */
int8_t sd_init(void)
{
    // Card not initalized
    
    // setup card detect on PA18
    /*
    pPIO->PIO_PER   = PIN_CARD_DETECT;          // Enable PIO pin
    pPIO->PIO_ODR   = PIN_CARD_DETECT;          // Enable input
    pPIO->PIO_PPUER = PIN_CARD_DETECT;         // Enable pullup
    */
    uint8_t retries;
    uint8_t resp;

    TRACE_SD("Init SD card\n\r");
    
    for(retries = 0, resp = 0; (retries < 5) && (resp != SD_R1_IDLE_STATE) ; retries++)
    {
        // send CMD0 to reset card
    	sd_command(SD_GO_IDLE_STATE,0);
        resp = sd_get_response();
 
        TRACE_SD("go idle resp: %X\n\r", resp);
        delayms(100);
    }
    
    if(resp != SD_R1_IDLE_STATE) return SD_E_IDLE;

    // send CMD8 to check voltage range
    // this also determines if the card is a 2.0 (or later) card
    sd_command(SD_SEND_IF_COND,0x000001AA);
    resp = sd_get_response();

    TRACE_SD("CMD8resp: %02X\n\r",resp);    
    
    if ((resp & SD_R1_ILLEGAL_COM) != SD_R1_ILLEGAL_COM)
    {
    	TRACE_SD("2.0 card\n\r");
        uint32_t r7reply;
        ver2_card = true;  // mark this as a version2 card
        r7reply = sd_get_response();     
        r7reply <<= 8;
        r7reply |= sd_get_response();    
        r7reply <<= 8;
        r7reply |= sd_get_response();    
        r7reply <<= 8;
        r7reply |= sd_get_response();

        TRACE_SD("CMD8REPLY: %08x\n",r7reply);
        
        // verify that we're compatible
        if ( (r7reply & 0x00000fff) != 0x01AA )
        {
            TRACE_SD("Voltage range mismatch\n");
            return SD_E_VOLT;  // voltage range mismatch, unsuable card
        }
    }
    else
    {
         TRACE_SD("Not a 2.0 card\n\r");
    }   

    sd_send_dummys();

    /*
     * send ACMD41 until we get a 0 back, indicating card is done initializing
     * wait for max 5 seconds
     * 
     */
    for (retries=0,resp=0; !resp && retries<50; retries++)
    {
        uint8_t i;
        // send CMD55
        sd_command(SD_APP_CMD, 0);    // CMD55, prepare for APP cmd

        TRACE_SD("Sending CMD55\n");
        
        if ((sd_get_response() & 0xFE) != 0)
        {
             TRACE_SD("CMD55 failed\n");
        }
        // send ACMD41
        TRACE_SD("Sending ACMD41\n");
        
        if(ver2_card)
        	sd_command(SD_ACMD_SEND_OP_COND, 1UL << 30); // ACMD41, HCS bit 1
        else
        	sd_command(SD_ACMD_SEND_OP_COND, 0); // ACMD41, HCS bit 0
        
        i = sd_get_response();
        
        TRACE_SD("response = %02x\n",i);
        
        if (i != 0)
        {
            sd_send_dummys();
            delayms(100);
        }
        else    
            resp = 1;
        
        delayms(500);
    }

    if (!resp)
    {
        TRACE_SD("not valid\n");
        return SD_E_INIT;          // init failure
    }
    sd_send_dummys();     // clean up

    if (ver2_card)
    {
        uint32_t ocr;
        // check for High Cap etc
        
        // send CMD58
        TRACE_SD("sending CMD58\n");

        sd_command(SD_READ_OCR,0);    // CMD58, get OCR
        TRACE_SD(".resp.");
        if (sd_get_response() != 0)
        {
            TRACE_SD("CMD58 failed\n");
        }
        else
        {
            // 0x80, 0xff, 0x80, 0x00 would be expected normally
            // 0xC0 if high cap
            
            ocr = sd_get_response();
            ocr <<= 8;
            ocr |= sd_get_response();
            ocr <<= 8;
            ocr |= sd_get_response();
            ocr <<= 8;
            ocr |= sd_get_response();
             
            TRACE_SD("OCR = %08x\n", ocr);
            
            if((ocr & 0xC0000000) == 0xC0000000)
            {
            	TRACE_SD("SDHC card.\n");
            	sdhc_card = true; // Set HC flag.
            }
        }
    }
    sd_send_dummys();     // clean up

    sd_info();

    TRACE_SD("Init SD card OK\n");
    return SD_OK;   
}
Esempio n. 24
0
static void interpret_msg_pre_join(void)
{
	int ret;
	struct sph_msg snd, rcv;
	struct sph_msg_join_reply *join_reply;

retry:
	read_msg(&rcv);

	if (rcv.type == SPH_SRV_MSG_JOIN_RETRY) {
		sd_info("join request is rejected, retrying");

		do_shepherd_join();
		goto retry;
	} else if (rcv.type == SPH_SRV_MSG_NEW_NODE) {
		struct sph_msg_join *join;
		int join_len;

		join_len = rcv.body_len;
		join = xzalloc(join_len);
		ret = xread(sph_comm_fd, join, join_len);
		if (ret != join_len) {
			sd_err("xread() failed: %m");
			exit(1);
		}

		/*
		 * FIXME: member change events must be ordered with nonblocked
		 *        events
		 */
		if (!sd_join_handler(&join->new_node, NULL, 0, join->opaque))
			panic("sd_accept_handler() failed");

		snd.type = SPH_CLI_MSG_ACCEPT;
		snd.body_len = join_len;

		ret = writev2(sph_comm_fd, &snd, join, join_len);
		if (sizeof(snd) + join_len != ret) {
			sd_err("writev2() failed: %m");
			exit(1);
		}

		free(join);

		read_msg(&rcv);
	}

	if (rcv.type != SPH_SRV_MSG_JOIN_REPLY) {
		sd_err("unexpected message from shepherd, received message: %s",
		       sph_srv_msg_to_str(rcv.type));

		/*
		 * In this case, the state of this sheep in shepherd must be
		 * SHEEP_STATE_CONNECTED. Messages other than SPH_MSG_JOIN_REPLY
		 * mean bugs of shepherd.
		 */
		exit(1);
	}

	join_reply = xzalloc(rcv.body_len);
	ret = xread(sph_comm_fd, join_reply, rcv.body_len);
	if (ret != rcv.body_len) {
		sd_err("xread() failed: %m");
		exit(1);
	}

	sd_info("join reply arrived, nr_nodes: %d", join_reply->nr_nodes);

	memcpy(nodes, join_reply->nodes,
	       join_reply->nr_nodes * sizeof(struct sd_node));
	nr_nodes = join_reply->nr_nodes;

	/* FIXME: member change events must be ordered with nonblocked events */
	sd_accept_handler(&this_node, nodes, nr_nodes, join_reply->opaque);

	free(join_reply);

	sd_info("shepherd_join() succeed");
	state = STATE_JOINED;
}