Exemplo n.º 1
0
void gc_free_pages(struct gcpage *p)
{
	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"container = 0x%08X\n",
		__func__, __LINE__, (unsigned int) p);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"page array=0x%08X\n",
		__func__, __LINE__, (unsigned int) p->pages);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"logical=0x%08X\n",
		__func__, __LINE__, (unsigned int) p->logical);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"physical=0x%08X\n",
		__func__, __LINE__, (unsigned int) p->physical);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"size=%d\n",
		__func__, __LINE__, p->size);

	if (p->logical != NULL) {
		dma_free_coherent(NULL, p->size, p->logical, p->physical);
		p->logical = NULL;
	}

	p->physical = ~0UL;
	p->order = 0;
	p->size = 0;
}
Exemplo n.º 2
0
void GC_attach_current_thread_exceptions_to_handler()
{
  mach_port_t thread_self, exc_port_s;
  mach_msg_type_name_t type;
  kern_return_t retval;

  if (!task_self) return;

  /* get ids for ourself */
  thread_self = mach_thread_self();

  /* extract out the send rights for that port, which the OS needs */
  retval = mach_port_extract_right(task_self, exc_port, MACH_MSG_TYPE_MAKE_SEND,
				   &exc_port_s, &type);
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "Couldn't extract send rights: %s\n", mach_error_string(retval));
    abort();
  }

  /* set the exception ports for this thread to the above */
  retval = thread_set_exception_ports(thread_self, EXC_MASK_BAD_ACCESS,
				      exc_port_s, EXCEPTION_DEFAULT,
				      ARCH_THREAD_STATE);
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "Couldn't set exception ports: %s\n", mach_error_string(retval));
    abort();
  }
#if defined(MZ_USE_PLACES)
  register_mach_thread();
#endif
}
Exemplo n.º 3
0
void gcpwr_disable_pulse_skipping(enum gcpower prevstate)
{
	union gcclockcontrol gcclockcontrol;

	if (!g_clockenabled)
		return;

	if (g_pulseskipping) {
		/* Set the min l3 data throughput */
		omap_pm_set_min_bus_tput(g_bb2d_dev, OCP_INITIATOR_AGENT,
						200*1000*4);

		/* Enable loading and set to maximum value. */
		gcclockcontrol.reg.pulsecount = 64;
		gcclockcontrol.reg.pulseset = true;
		gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address,
				gcclockcontrol.raw);

		/* Disable loading. */
		gcclockcontrol.reg.pulseset = false;
		gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address,
				gcclockcontrol.raw);

		/* Pulse skipping disabled. */
		g_pulseskipping = false;
		GCPRINT(GCDBGFILTER, GCZONE_POWER, GC_MOD_PREFIX
			"pulse skipping disabled.\n",
			__func__, __LINE__);
	} else {
		GCPRINT(GCDBGFILTER, GCZONE_POWER, GC_MOD_PREFIX
			"pulse skipping is already disabled.\n",
			__func__, __LINE__);
	}
}
Exemplo n.º 4
0
int mmu2d_flush(void *logical, u32 address, u32 size)
{
#if MMU_ENABLE
	static const int flushSize = sizeof(struct gcmommuflush);
	struct gcmommuflush *gcmommuflush;
	u32 count;

	GCPRINT(GCDBGFILTER, GCZONE_FLUSH, "++" GC_MOD_PREFIX
		"\n", __func__, __LINE__);

	if (logical != NULL) {
		GCPRINT(GCDBGFILTER, GCZONE_FLUSH, GC_MOD_PREFIX
			"address = 0x%08X\n",
			__func__, __LINE__, address);

		GCPRINT(GCDBGFILTER, GCZONE_FLUSH, GC_MOD_PREFIX
			"size = %d\n",
			__func__, __LINE__, size);

		/* Compute the buffer count. */
		count = (size - flushSize + 7) >> 3;

		gcmommuflush = (struct gcmommuflush *) logical;

		/* Flush 2D PE cache. */
		gcmommuflush->peflush.flush_ldst = gcmoflush_flush_ldst;
		gcmommuflush->peflush.flush.reg = gcregflush_pe2D;

		/* Arm the FE-PE semaphore. */
		gcmommuflush->peflushsema.sema_ldst = gcmosema_sema_ldst;
		gcmommuflush->peflushsema.sema.reg  = gcregsema_fe_pe;

		/* Stall FE until PE is done flushing. */
		gcmommuflush->peflushstall.cmd.fld = gcfldstall;
		gcmommuflush->peflushstall.arg.fld = gcfldstall_fe_pe;

		/* LINK to the next slot to flush FE FIFO. */
		gcmommuflush->feflush.cmd.fld = gcfldlink4;
		gcmommuflush->feflush.address
			= address
			+ offsetof(struct gcmommuflush, mmuflush_ldst);

		/* Flush MMU cache. */
		gcmommuflush->mmuflush_ldst = gcmommuflush_mmuflush_ldst;
		gcmommuflush->mmuflush.reg = gcregmmu_flush;

		/* Arm the FE-PE semaphore. */
		gcmommuflush->mmuflushsema.sema_ldst = gcmosema_sema_ldst;
		gcmommuflush->mmuflushsema.sema.reg  = gcregsema_fe_pe;

		/* Stall FE until PE is done flushing. */
		gcmommuflush->mmuflushstall.cmd.fld = gcfldstall;
		gcmommuflush->mmuflushstall.arg.fld = gcfldstall_fe_pe;

		/* LINK to the next slot to flush FE FIFO. */
		gcmommuflush->link.cmd.fld.opcode
			= GCREG_COMMAND_LINK_COMMAND_OPCODE_LINK;
		gcmommuflush->link.cmd.fld.count = count;
		gcmommuflush->link.address = address + flushSize;
	}
Exemplo n.º 5
0
static void *malloc_pages(size_t len, size_t alignment)
{
  void *r;
  size_t extra = 0;

  if (!page_size)
    page_size = getpagesize();

#ifndef MAP_ANON
  if (!fd_created) {
    fd_created = 1;
    fd = open("/dev/zero", O_RDWR);
  }
#endif

  CHECK_USED_AGAINST_MAX(len);

  /* Round up to nearest page: */
  if (len & (page_size - 1))
    len += page_size - (len & (page_size - 1));

  /* Something from the cache, perhaps? */
  r = find_cached_pages(len, alignment);
  if (r)
    return r;

  extra = alignment;

#ifdef MAP_ANON
  r = mmap(NULL, len + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
#else
  r = mmap(NULL, len + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
#endif

  if (r  == (void *)-1)
    return NULL;

  if (extra) {
    /* We allocated too large so we can choose the alignment. */
    void *real_r;
    long pre_extra;

    real_r = (void *)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1)));
    
    pre_extra = real_r - r;
    if (pre_extra)
      if (munmap(r, pre_extra))
	GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)r, pre_extra, errno);
    if (pre_extra < extra)
      if (munmap(real_r + len, extra - pre_extra))
	GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)r, pre_extra, errno);
    r = real_r;
  }

  ACTUALLY_ALLOCATING_PAGES(len);
  LOGICALLY_ALLOCATING_PAGES(len);

  return r;
}
Exemplo n.º 6
0
static int gc_resume(struct platform_device *pdev)
{
	GCPRINT(GCDBGFILTER, GCZONE_POWER, "++" GC_MOD_PREFIX
		"\n", __func__, __LINE__);

	GCPRINT(GCDBGFILTER, GCZONE_POWER, "--" GC_MOD_PREFIX
		"\n", __func__, __LINE__);

	return 0;
}
Exemplo n.º 7
0
enum gcerror gc_alloc_pages(struct gcpage *p, unsigned int size)
{
	enum gcerror gcerror;
	int order;

	p->pages = NULL;
	p->logical = NULL;
	p->physical = ~0UL;

	order = get_order(size);

	p->order = order;
	p->size = (1 << order) * PAGE_SIZE;

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"requested size=%d\n", __func__, __LINE__, size);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"aligned size=%d\n", __func__, __LINE__, p->size);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"order=%d\n", __func__, __LINE__, order);

	p->logical = dma_alloc_coherent(NULL, p->size, &p->physical,
								GFP_KERNEL);
	if (!p->logical) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"failed to allocate memory\n",
			__func__, __LINE__);

		gcerror = GCERR_OOPM;
		goto fail;
	}

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"container = 0x%08X\n",
		__func__, __LINE__, (unsigned int) p);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"page array=0x%08X\n",
		__func__, __LINE__, (unsigned int) p->pages);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"logical=0x%08X\n",
		__func__, __LINE__, (unsigned int) p->logical);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"physical=0x%08X\n",
		__func__, __LINE__, (unsigned int) p->physical);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"size=%d\n",
		__func__, __LINE__, p->size);

	return GCERR_NONE;

fail:
	gc_free_pages(p);
	return gcerror;
}
Exemplo n.º 8
0
static void print_traced_objects(int path_length_limit,
				 GC_get_type_name_proc get_type_name,
				 GC_print_tagged_value_proc print_tagged_value,
                                 GC_print_traced_filter_proc print_traced_filter)
{
  int i, j, k, dp = 0, counter, each;
# define DITTO_BUFFER_SIZE 16
  void *ditto[DITTO_BUFFER_SIZE];

  memset(ditto, 0, sizeof(void*) * DITTO_BUFFER_SIZE);

  GC_instance->avoid_collection++;
  GCPRINT(GCOUTF, "Begin Trace\n");
  for (i = 0; i < found_object_count; i++) {
    void *p;
    int limit = path_length_limit;
    int kind = 0;
    p = found_objects[i];
    if (print_traced_filter(p)) {
      p = print_out_pointer("==* ", p, get_type_name, print_tagged_value, &kind);

      j = 0; counter = 0; each = 1;
      while (p && limit) {
        for (k = 0; k < DITTO_BUFFER_SIZE; k++) {
          if (ditto[k] == p) {
            GCPRINT(GCOUTF, " <- %p: DITTO\n", p);
            p = NULL;
            break;
          }
        }
        if (p) {
          if (j < DITTO_BUFFER_SIZE) {
            /* Rememebr the 1st 2nd, 4th, 8th, etc. */
            counter++;
            if (counter == each) {
              ditto[(j + dp) % DITTO_BUFFER_SIZE] = p;
              j++;
              each *= 2;
              counter = 0;
            }
          }
          p = print_out_pointer(" <- ", p, get_type_name, print_tagged_value, &kind);
          limit--;
        }
      }
      dp = (j % DITTO_BUFFER_SIZE);
    }
  }
  GCPRINT(GCOUTF, "End Trace\n");
  --GC_instance->avoid_collection;
}
Exemplo n.º 9
0
static void flush_user_buffer(struct mmu2darena *arena)
{
	u32 i;
	struct gcpage gcpage;
	unsigned char *logical;

	if (arena->pages == NULL) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"page array is NULL.\n",
			__func__, __LINE__);
		return;
	}


	logical = arena->logical;
	if (logical == NULL) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"buffer base is NULL.\n",
			__func__, __LINE__);
		return;
	}

	for (i = 0; i < arena->count; i += 1) {
		gcpage.order = get_order(PAGE_SIZE);
		gcpage.size = PAGE_SIZE;

		gcpage.pages = arena->pages[i];
		if (gcpage.pages == NULL) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"page structure %d is NULL.\n",
				__func__, __LINE__, i);
			continue;
		}

		gcpage.physical = page_to_phys(gcpage.pages);
		if (gcpage.physical == 0) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"physical address of page %d is 0.\n",
				__func__, __LINE__, i);
			continue;
		}

		gcpage.logical = (unsigned int *) (logical + i * PAGE_SIZE);
		if (gcpage.logical == NULL) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"virtual address of page %d is NULL.\n",
				__func__, __LINE__, i);
			continue;
		}
	}
}
Exemplo n.º 10
0
static int gc_suspend(struct platform_device *pdev, pm_message_t s)
{
	GCPRINT(GCDBGFILTER, GCZONE_POWER, "++" GC_MOD_PREFIX
		"\n", __func__, __LINE__);
	if (gc_set_power(GCPWR_OFF))
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"suspend failure.\n",
			__func__, __LINE__);

	GCPRINT(GCDBGFILTER, GCZONE_POWER, "--" GC_MOD_PREFIX
		"\n", __func__, __LINE__);

	return 0;
}
Exemplo n.º 11
0
void gc_unmap(struct gcmap *gcmap)
{
	struct gccontextmap *context;

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, "++" GC_MOD_PREFIX
		"\n", __func__, __LINE__);

	mutex_lock(&mtx);

	/* Locate the client entry. */
	gcmap->gcerror = find_context(&context, true);
	if (gcmap->gcerror != GCERR_NONE)
		goto exit;

	context->context->mmu_dirty = true;

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"unmap client buffer\n",
		__func__, __LINE__);

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  size = %d\n",
		__func__, __LINE__, gcmap->size);

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  handle = 0x%08X\n",
		__func__, __LINE__, gcmap->handle);

	/* Map the buffer. */
	gcmap->gcerror = mmu2d_unmap(&context->context->mmu,
					(struct mmu2darena *) gcmap->handle);
	if (gcmap->gcerror != GCERR_NONE)
		goto exit;

	/* Invalidate the MMU. */
	context->context->mmu_dirty = true;

	/* Invalidate the handle. */
	gcmap->handle = ~0U;

exit:
	mutex_unlock(&mtx);

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, "--" GC_MOD_PREFIX
		"gc%s = 0x%08X\n", __func__, __LINE__,
		(gcmap->gcerror == GCERR_NONE) ? "result" : "error",
		gcmap->gcerror);
}
Exemplo n.º 12
0
static int gc_probe(struct platform_device *pdev)
{
	int ret;

	g_gcxplat = (struct omap_gcx_platform_data *)pdev->dev.platform_data;
	g_reg_base = g_gcxplat->regbase;
	gcirq = platform_get_irq(pdev, pdev->id);

	ret = request_irq(gcirq, gc_irq, IRQF_SHARED,
				GC_DEV_NAME, &gcdevice);
	if (ret < 0) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"failed to install IRQ (%d).\n",
			__func__, __LINE__, ret);
		return -ENODEV;
	}

	g_irqinstalled = true;

	/* Disable IRQ. */
	disable_irq(gcirq);
	g_irqenabled = false;

	gcdevice.dev = &pdev->dev;

	pm_runtime_enable(gcdevice.dev);
	(void)g_gcxplat->was_context_lost(gcdevice.dev);
	return 0;
}
Exemplo n.º 13
0
enum gcerror gcpwr_enable_clock(enum gcpower prevstate)
{
	bool ctxlost = g_gcxplat->was_context_lost(gcdevice.dev);
	if (!g_clockenabled) {
		/* Enable the clock. */
		pm_runtime_get_sync(gcdevice.dev);

		/* Signal software not idle. */
		gc_write_reg(GC_GP_OUT0_Address, 0);

		/* Clock enabled. */
		g_clockenabled = true;
	} else if (ctxlost) {
		u32 reg;
		dev_info(gcdevice.dev, "unexpected context\n");
		reg = gc_read_reg(GC_GP_OUT0_Address);
		if (reg) {
			dev_info(gcdevice.dev, "reset gchold\n");
			gc_write_reg(GC_GP_OUT0_Address, 0);
		}
	}
	GCPRINT(GCDBGFILTER, GCZONE_POWER, GC_MOD_PREFIX
		"clock %s.\n",
		__func__, __LINE__, g_clockenabled ? "enabled" : "disabled");

	if (ctxlost || prevstate == GCPWR_UNKNOWN)
		gc_reset_gpu();

	return GCERR_NONE;
}
Exemplo n.º 14
0
/* this is the thread which forwards of exceptions read from the exception
   server off to our exception catchers and then back out to the other
   thread */
void exception_thread(void *shared_thread_state)
{
  mach_msg_header_t *message;
  mach_msg_header_t *reply;
  kern_return_t retval;

#ifdef USE_THREAD_LOCAL
  pthread_setspecific(scheme_thread_local_key, shared_thread_state);
#endif

  /* allocate the space for the message and reply */
  message = (mach_msg_header_t*)malloc(sizeof(mach_exc_msg_t));
  reply = (mach_msg_header_t*)malloc(sizeof(mach_reply_msg_t));
  /* do this loop forever */
  while(1) {
    /* block until we get an exception message */
    retval = mach_msg(message, MACH_RCV_MSG, 0, sizeof(mach_exc_msg_t), 
		      exc_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
    /* forward off the handling of this message */
    if(!exc_server(message, reply)) {
      GCPRINT(GCOUTF, "INTERNAL ERROR: exc_server() didn't like something\n");
      abort();
    }
    /* send the message back out to the thread */
    retval = mach_msg(reply, MACH_SEND_MSG, sizeof(mach_reply_msg_t), 0, 
		      MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  }
}
Exemplo n.º 15
0
static void *print_out_pointer(const char *prefix, void *p,
			       GC_get_type_name_proc get_type_name,
			       GC_print_tagged_value_proc print_tagged_value,
                               int *_kind)
{
  trace_page_t *page;
  const char *what;

  page = pagemap_find_page(GC_instance->page_maps, p);
  if (!page || (trace_page_type(page) == TRACE_PAGE_BAD)) {
    GCPRINT(GCOUTF, "%s%s %p\n", prefix, trace_source_kind(*_kind), p);
    return NULL;
  }
  p = trace_pointer_start(page, p);

  if ((trace_page_type(page) == TRACE_PAGE_TAGGED)
      || (trace_page_type(page) == TRACE_PAGE_PAIR)) {
    Type_Tag tag;
    tag = *(Type_Tag *)p;
    if ((tag >= 0) && get_type_name && get_type_name(tag)) {
      print_tagged_value(prefix, p, 0, 1000, "\n");
    } else {
      GCPRINT(GCOUTF, "%s<#%d> %p\n", prefix, tag, p);
    }
    what = NULL;
  } else if (trace_page_type(page) == TRACE_PAGE_ARRAY) {
    what = "ARRAY";
  } else if (trace_page_type(page) == TRACE_PAGE_ATOMIC) {
    what = "ATOMIC";
  } else if (trace_page_type(page) == TRACE_PAGE_MALLOCFREE) {
    what = "MALLOCED";
  } else {
    what = "?!?";
  }

  if (what) {
    GCPRINT(GCOUTF, "%s%s%s %p\n", 
	    prefix, what, 
	    (trace_page_is_big(page) ? "b" : ""),
	    p);
  }

  return trace_backpointer(page, p, _kind);
}
Exemplo n.º 16
0
static int __init gc_init(void)
{
	/* check if hardware is available */
	if (!cpu_is_omap447x())
		return 0;

	/* Initialize context mutex. */
	mutex_init(&mtx);

	/* Initialize interrupt completion. */
	init_completion(&g_gccoreint);

	g_bb2d_dev = omap_hwmod_name_get_dev("bb2d");
	if (g_bb2d_dev == NULL) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"cannot find bb2d_fck.\n",
			 __func__, __LINE__);
		goto fail;
	}

	/* Initialize the command buffer. */
	if (cmdbuf_init() != GCERR_NONE) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"failed to initialize command buffer.\n",
			 __func__, __LINE__);
		goto fail;
	}

	/* Create debugfs entry */
	g_debugRoot = debugfs_create_dir("gcx", NULL);
	if (g_debugRoot)
		gc_debug_init(g_debugRoot);

	mutex_init(&g_maplock);

#if defined(CONFIG_HAS_EARLYSUSPEND)
	register_early_suspend(&early_suspend_info);
#endif

	return platform_driver_register(&plat_drv);
fail:

	return -EINVAL;
}
Exemplo n.º 17
0
static void os_free_pages(void *p, size_t len)
{
  kern_return_t retval;

  retval = vm_deallocate(task_self, (vm_address_t)p, len);
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "WARNING: couldn't deallocate page %p: %s\n", p,
	   mach_error_string(retval));
  }
}
Exemplo n.º 18
0
static enum gcerror get_fixup(struct gcfixup **gcfixup)
{
	enum gcerror gcerror;
	int bufferlocked = 0;
	struct gcfixup *temp;

	/* Acquire fixup access mutex. */
	gcerror = gc_acquire_mutex(&g_bufferlock, GC_INFINITE);
	if (gcerror != GCERR_NONE) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"failed to acquire mutex (0x%08X).\n",
			__func__, __LINE__, gcerror);
		gcerror = GCERR_SETGRP(gcerror, GCERR_IOCTL_FIXUP_ALLOC);
		goto exit;
	}
	bufferlocked = 1;

	if (g_fixupvacant == NULL) {
		temp = kmalloc(sizeof(struct gcfixup), GFP_KERNEL);
		if (temp == NULL) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"out of memory.\n",
				__func__, __LINE__);
			gcerror = GCERR_SETGRP(GCERR_OODM,
						GCERR_IOCTL_FIXUP_ALLOC);
			goto exit;
		}
	} else {
		temp = g_fixupvacant;
		g_fixupvacant = g_fixupvacant->next;
	}

	*gcfixup = temp;

exit:
	if (bufferlocked)
		mutex_unlock(&g_bufferlock);

	return gcerror;
}
Exemplo n.º 19
0
enum gcerror gc_set_power(enum gcpower gcpower)
{
	enum gcerror gcerror = GCERR_NONE;

	if (gcpower == GCPWR_UNKNOWN) {
		gcerror = GCERR_POWER_MODE;
		goto exit;
	}

	if (gcpower != g_gcpower) {
		GCPRINT(GCDBGFILTER, GCZONE_POWER, GC_MOD_PREFIX
			"power state %d --> %d\n",
			__func__, __LINE__, g_gcpower, gcpower);

		switch (gcpower) {
		case GCPWR_ON:
			gcerror = gcpwr_enable_clock(g_gcpower);
			if (gcerror != GCERR_NONE)
				goto exit;

			gcpwr_disable_pulse_skipping(g_gcpower);

			if (!g_irqenabled) {
				enable_irq(gcirq);
				g_irqenabled = true;
			}
			break;

		case GCPWR_LOW:
			gcpwr_enable_pulse_skipping(g_gcpower);
			break;

		case GCPWR_OFF:
			gcpwr_disable_clock(g_gcpower);
			if (g_irqenabled) {
				disable_irq(gcirq);
				g_irqenabled = false;
			}
			break;

		default:
			gcerror = GCERR_POWER_MODE;
			goto exit;
		}

		/* Set new power state. */
		g_gcpower = gcpower;
	}

exit:
	return gcerror;
}
Exemplo n.º 20
0
static void os_protect_pages(void *p, size_t len, int writeable)
{
  kern_return_t retval;

  if(len & (page_size - 1)) {
    len += page_size - (len & (page_size - 1));
  }

  retval = vm_protect(task_self, (vm_address_t)p, len, FALSE,
		      writeable ? VM_PROT_ALL 
		      : (VM_PROT_READ | VM_PROT_EXECUTE));
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "WARNING: couldn't protect %li bytes of page %p%s\n",
	   len, p, mach_error_string(retval));
  }
}
Exemplo n.º 21
0
/* the VM subsystem as defined by the GC files */
static void *os_alloc_pages(size_t len)
{
  kern_return_t retval;
  void *r;

  if(!task_self) task_self = mach_task_self();

  /* round up to the nearest page: */
  if(len & (page_size - 1))
    len += page_size - (len & (page_size - 1));

  retval = vm_allocate(task_self, (vm_address_t*)&r, len, TRUE);
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "Couldn't allocate memory: %s\n", mach_error_string(retval));
    abort();
  }

  return r;
}
Exemplo n.º 22
0
void gcpwr_disable_clock(enum gcpower prevstate)
{
	if (!g_clockenabled)
		return;

	gc_debug_poweroff_cache();

	/* Signal software idle. */
	gc_write_reg(GC_GP_OUT0_Address, 1);

	/* Disable the clock. */
	pm_runtime_put_sync(gcdevice.dev);

	/* Clock disabled. */
	g_clockenabled = false;
	GCPRINT(GCDBGFILTER, GCZONE_POWER, GC_MOD_PREFIX
			"clock disabled.\n",
			__func__, __LINE__);
}
Exemplo n.º 23
0
void BTC_register_root_custodian(void *_c)
{
  NewGC *gc = GC_get_GC();
  Scheme_Custodian *c = (Scheme_Custodian *)_c;

  if (gc->owner_table) {
    /* Reset */
    ofm_free(gc->owner_table, sizeof(OTEntry*) * gc->owner_table_size);
    gc->owner_table = NULL;
    gc->owner_table_size = 0;
  }

  if (create_blank_owner_set(gc) != 1) {
    GCPRINT(GCOUTF, "Something extremely weird (and bad) has happened.\n");
    abort();
  }

  gc->owner_table[1]->originator = c;
  c->gc_owner_set = 1;
}
Exemplo n.º 24
0
static enum gcerror put_buffer_tree(struct gcbuffer *gcbuffer)
{
	enum gcerror gcerror;
	int bufferlocked = 0;
	struct gcbuffer *prev;
	struct gcbuffer *curr;

	/* Acquire buffer access mutex. */
	gcerror = gc_acquire_mutex(&g_bufferlock, GC_INFINITE);
	if (gcerror != GCERR_NONE) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"failed to acquire mutex (0x%08X).\n",
			__func__, __LINE__, gcerror);
		gcerror = GCERR_SETGRP(gcerror, GCERR_IOCTL_BUF_ALLOC);
		goto exit;
	}
	bufferlocked = 1;

	prev = NULL;
	curr = gcbuffer;
	while (curr != NULL) {
		if (curr->fixuphead != NULL) {
			curr->fixuptail->next = g_fixupvacant;
			g_fixupvacant = curr->fixuphead;
		}

		prev = curr;
		curr = curr->next;
	}

	prev->next = g_buffervacant;
	g_buffervacant = gcbuffer;

exit:
	if (bufferlocked)
		mutex_unlock(&g_bufferlock);

	return gcerror;
}
Exemplo n.º 25
0
static void system_free_pages(void *p, size_t len)
{
  if (munmap(p, len)) {
    GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)p, (long)len, errno);
  }
}
Exemplo n.º 26
0
static void os_protect_pages(void *p, size_t len, int writeable)
{
  if (mprotect(p, len, (writeable ? (PROT_READ | PROT_WRITE) : PROT_READ)))
    GCPRINT(GCOUTF, "mprotect failed: %lx, %ld, %d, %d\n", (long)p, (long)len, writeable, errno);
}
Exemplo n.º 27
0
static void os_free_pages(void *p, size_t len)
{
  if (munmap(p, len)) {
    GCPRINT(GCOUTF, "unmap failed: %lx, %ld, %d\n", (long)p, (long)len, errno);
  }
}
Exemplo n.º 28
0
void gc_commit(struct gccommit *gccommit, int fromuser)
{
	struct gcbuffer *gcbuffer;
	unsigned int cmdflushsize;
	unsigned int mmuflushsize;
	unsigned int buffersize;
	unsigned int allocsize;
	unsigned int *logical;
	unsigned int address;
	struct gcmopipesel *gcmopipesel;
	struct gccontextmap *context;

	GCPRINT(GCDBGFILTER, GCZONE_COMMIT, "++" GC_MOD_PREFIX
		"\n", __func__, __LINE__);

	mutex_lock(&mtx);

	/* Enable power to the chip. */
	gc_set_power(GCPWR_ON);

	/* Locate the client entry. */
	gccommit->gcerror = find_context(&context, true);
	if (gccommit->gcerror != GCERR_NONE)
		goto exit;

	context->context->mmu_dirty = true;

	/* Set the client's master table. */
	gccommit->gcerror = mmu2d_set_master(&context->context->mmu);
	if (gccommit->gcerror != GCERR_NONE)
		goto exit;

	/* Set 2D pipe. */
	gccommit->gcerror = cmdbuf_alloc(sizeof(struct gcmopipesel),
					(void **) &gcmopipesel, NULL);
	if (gccommit->gcerror != GCERR_NONE)
		goto exit;

	gcmopipesel->pipesel_ldst = gcmopipesel_pipesel_ldst;
	gcmopipesel->pipesel.reg = gcregpipeselect_2D;

	/* Determine command buffer flush size. */
	cmdflushsize = cmdbuf_flush(NULL);

	/* Go through all buffers one at a time. */
	gcbuffer = gccommit->buffer;
	while (gcbuffer != NULL) {
		GCPRINT(GCDBGFILTER, GCZONE_COMMIT, GC_MOD_PREFIX
			"gcbuffer = 0x%08X\n",
			__func__, __LINE__, gcbuffer);

		/* Compute the size of the command buffer. */
		buffersize
			= (unsigned char *) gcbuffer->tail
			- (unsigned char *) gcbuffer->head;

		GCPRINT(GCDBGFILTER, GCZONE_COMMIT, GC_MOD_PREFIX
			"buffersize = %d\n",
			__func__, __LINE__, buffersize);

		/* Determine MMU flush size. */
		mmuflushsize = context->context->mmu_dirty
			? mmu2d_flush(NULL, 0, 0) : 0;

		/* Reserve command buffer space. */
		allocsize = mmuflushsize + buffersize + cmdflushsize;
		gccommit->gcerror = cmdbuf_alloc(allocsize,
						(void **) &logical, &address);
		if (gccommit->gcerror != GCERR_NONE)
			goto exit;

		/* Append MMU flush. */
		if (context->context->mmu_dirty) {
			mmu2d_flush(logical, address, allocsize);

			/* Skip MMU flush. */
			logical = (unsigned int *)
				((unsigned char *) logical + mmuflushsize);

			/* Validate MMU state. */
			context->context->mmu_dirty = false;
		}

		if (fromuser) {
			/* Copy command buffer. */
			if (copy_from_user(logical, gcbuffer->head,
						buffersize)) {
				GCPRINT(NULL, 0, GC_MOD_PREFIX
					"failed to read data.\n",
					__func__, __LINE__);
				gccommit->gcerror = GCERR_USER_READ;
				goto exit;
			}
		} else {
			memcpy(logical, gcbuffer->head, buffersize);
		}

		/* Process fixups. */
		gccommit->gcerror = mmu2d_fixup(gcbuffer->fixuphead, logical);
		if (gccommit->gcerror != GCERR_NONE)
			goto exit;

		/* Skip the command buffer. */
		logical = (unsigned int *)
			((unsigned char *) logical + buffersize);

		/* Execute the current command buffer. */
		cmdbuf_flush(logical);

		/* Get the next buffer. */
		gcbuffer = gcbuffer->next;
	}

exit:
	gc_set_power(GCPWR_LOW);
	if (gforceoff)
		gc_set_power(GCPWR_OFF);

	mutex_unlock(&mtx);

	GCPRINT(GCDBGFILTER, GCZONE_COMMIT, "--" GC_MOD_PREFIX
		"gc%s = 0x%08X\n", __func__, __LINE__,
		(gccommit->gcerror == GCERR_NONE) ? "result" : "error",
		gccommit->gcerror);
}
Exemplo n.º 29
0
void gc_map(struct gcmap *gcmap)
{
	struct mmu2dphysmem mem;
	struct mmu2darena *mapped = NULL;
	struct gccontextmap *context;

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, "++" GC_MOD_PREFIX
		"\n", __func__, __LINE__);

	mutex_lock(&mtx);

	/* Locate the client entry. */
	gcmap->gcerror = find_context(&context, true);
	if (gcmap->gcerror != GCERR_NONE)
		goto exit;

	context->context->mmu_dirty = true;

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"map client buffer\n",
		__func__, __LINE__);

	/* Initialize the mapping parameters. */
	if (gcmap->pagearray == NULL) {
		mem.base = ((u32) gcmap->buf.logical) & ~(PAGE_SIZE - 1);
		mem.offset = ((u32) gcmap->buf.logical) & (PAGE_SIZE - 1);
		mem.pages = NULL;

		GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
			"  logical = 0x%08X\n",
			__func__, __LINE__, (unsigned int) gcmap->buf.logical);
	} else {
		mem.base = 0;
		mem.offset = gcmap->buf.offset;
		mem.pages = gcmap->pagearray;

		GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
			"  pagearray = 0x%08X\n",
			__func__, __LINE__, (unsigned int) gcmap->pagearray);
	}

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  size = %d\n",
		__func__, __LINE__, gcmap->size);

	mem.count = DIV_ROUND_UP(gcmap->size + mem.offset, PAGE_SIZE);
	mem.pagesize = gcmap->pagesize ? gcmap->pagesize : PAGE_SIZE;

	/* Map the buffer. */
	gcmap->gcerror = mmu2d_map(&context->context->mmu, &mem, &mapped);
	if (gcmap->gcerror != GCERR_NONE)
		goto exit;

	/* Invalidate the MMU. */
	context->context->mmu_dirty = true;

	gcmap->handle = (unsigned int) mapped;

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  mapped address = 0x%08X\n",
		__func__, __LINE__, mapped->address);

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  handle = 0x%08X\n",
		__func__, __LINE__, (unsigned int) mapped);

exit:
	mutex_unlock(&mtx);

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, "--" GC_MOD_PREFIX
		"gc%s = 0x%08X\n", __func__, __LINE__,
		(gcmap->gcerror == GCERR_NONE) ? "result" : "error",
		gcmap->gcerror);
}
Exemplo n.º 30
0
static enum gcerror find_context(struct gccontextmap **context, int create)
{
	enum gcerror gcerror = GCERR_NONE;
	struct gccontextmap *prev;
	struct gccontextmap *curr;
	pid_t pid;

	/* Get current PID. */
	pid = 0;

	/* Search the list. */
	prev = NULL;
	curr = g_map;

	GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
		"scanning existing records for pid %d.\n",
		__func__, __LINE__, pid);

	/* Try to locate the record. */
	while (curr != NULL) {
		/* Found the record? */
		if (curr->pid == pid) {
			/* Move to the top of the list. */
			if (prev != NULL) {
				prev->next = curr->next;
				curr->next = g_map;
				g_map = curr;
			}

			/* Success. */
			GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
				"record is found @ 0x%08X\n",
				__func__, __LINE__, (unsigned int) curr);

			*context = curr;
			goto exit;
		}

		/* Get the next record. */
		prev = curr;
		curr = curr->next;
	}

	/* Not found, do we need to create a new one? */
	if (!create) {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"not found, exiting.\n",
			__func__, __LINE__);
		gcerror = GCERR_NOT_FOUND;
		goto exit;
	}

	/* Get new record. */
	if (g_mapvacant == NULL) {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"not found, allocating.\n",
			__func__, __LINE__);

		curr = kmalloc(sizeof(struct gccontextmap), GFP_KERNEL);
		if (curr == NULL) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"out of memory.\n",
				__func__, __LINE__);
			gcerror = GCERR_SETGRP(GCERR_OODM,
						GCERR_IOCTL_CTX_ALLOC);
			goto exit;
		}

		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"allocated @ 0x%08X\n",
			__func__, __LINE__, (unsigned int) curr);
	} else {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"not found, reusing record @ 0x%08X\n",
			__func__, __LINE__, (unsigned int) g_mapvacant);

		curr = g_mapvacant;
		g_mapvacant = g_mapvacant->next;
	}

	GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
		"creating new context.\n",
		__func__, __LINE__);

	curr->context = kzalloc(sizeof(*curr->context), GFP_KERNEL);
	if (curr->context == NULL) {
		gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_CTX_ALLOC);
		goto exit;
	}

	gcerror = mmu2d_create_context(&curr->context->mmu);
	if (gcerror != GCERR_NONE)
		goto free_map_ctx;

#if MMU_ENABLE
	gcerror = cmdbuf_map(&curr->context->mmu);
	if (gcerror != GCERR_NONE)
		goto free_2d_ctx;
#endif

	curr->context->mmu_dirty = true;

	g_clientref += 1;

	/* Success? */
	if (gcerror == GCERR_NONE) {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"new context created @ 0x%08X\n",
			__func__, __LINE__, (unsigned int) curr->context);

		/* Set the PID. */
		curr->pid = pid;

		/* Add to the list. */
		curr->prev = NULL;
		curr->next = g_map;
		if (g_map != NULL)
			g_map->prev = curr;
		g_map = curr;

		/* Set return value. */
		*context = curr;
	} else {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"failed to create a context.\n",
			__func__, __LINE__);

		/* Add the record to the vacant list. */
		curr->next = g_mapvacant;
		g_mapvacant = curr;
	}
	goto exit;

free_2d_ctx:
	mmu2d_destroy_context(&curr->context->mmu);
free_map_ctx:
	kfree(curr->context);
exit:
	return gcerror;
}