Beispiel #1
0
static int disp_aal_set_init_reg(DISP_AAL_INITREG __user *user_regs, void *cmdq)
{
    int ret = -EFAULT;
    DISP_AAL_INITREG *init_regs;

    init_regs = (DISP_AAL_INITREG*)kmalloc(sizeof(DISP_AAL_INITREG), GFP_KERNEL);
    if (init_regs == NULL) {
        AAL_ERR("disp_aal_set_init_reg: insufficient memory");
        return -EFAULT;
    }

    ret = copy_from_user(init_regs, user_regs, sizeof(DISP_AAL_INITREG));
    if (ret == 0) {
        int i, j;
        int *gain;

        DISP_REG_MASK(cmdq, DISP_AAL_DRE_MAPPING_00, (init_regs->dre_map_bypass << 4), 1 << 4);

        gain = init_regs->cabc_gainlmt;
        j = 0;
        for (i = 0; i <= 10; i++) {
            DISP_REG_SET(cmdq, DISP_AAL_CABC_GAINLMT_TBL(i),
                CABC_GAINLMT(gain[j], gain[j + 1], gain[j + 2]));
            j += 3;
        }
    } else {
        AAL_ERR("disp_aal_set_init_reg: copy_from_user() failed");
    }

    AAL_DBG("disp_aal_set_init_reg: %d", ret);

    kfree(init_regs);

    return ret;
}
static long AAL_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ 
		long err = 0;
		void __user *ptr = (void __user*) arg;
		int dat;
		uint32_t enable;
		
		switch (cmd)
		{
			
			case AAL_SET_ALS_MODE:
				if(copy_from_user(&enable, ptr, sizeof(enable)))
				{
					err = -EFAULT;
					goto err_out;
				}

                if (enable) 
				    aal_use = 1;
				else 
				    aal_use = 0; 
				
				if((err = alsps_aal_enable(enable)) != 0)
				{
					AAL_LOG("als driver don't support new arch, goto execute old arch: %ld\n", err); 
				    if((err = hwmsen_aal_enable(enable)) != 0)
						AAL_ERR("Enable als driver fail %ld\n", err);
				}
				break;
	
			case AAL_GET_ALS_MODE:
				AAL_LOG("AAL_GET_ALS_MODE do nothing\n");
				break;
	
			case AAL_GET_ALS_DATA: 
	            if ((dat = alsps_aal_get_data()) < 0){
					AAL_LOG("alsps_aal_get_data fail\n");
					dat = hwmsen_aal_get_data();
	            }
				
				AAL_LOG("Get als dat :%d\n", dat);
				
				if(copy_to_user(ptr, &dat, sizeof(dat)))
				{
					err = -EFAULT;
					goto err_out;
				}			   
				break;
			
			default:
				AAL_ERR("%s not supported = 0x%04x", __FUNCTION__, cmd);
				err = -ENOIOCTLCMD;
				break;
		}
	
		err_out:
		return err;    
	}
static int aal_io(DISP_MODULE_ENUM module, int msg, unsigned long arg, void *cmdq)
{
	int ret = 0;

	switch (msg) {
	case DISP_IOCTL_AAL_EVENTCTL:
		{
			int enabled;

			if (copy_from_user(&enabled, (void *)arg, sizeof(enabled))) {
				AAL_ERR("DISP_IOCTL_AAL_EVENTCTL: copy_from_user() failed");
				return -EFAULT;
			}

			disp_aal_set_interrupt(enabled);

			if (enabled)
				disp_aal_trigger_refresh();

			break;
		}
	case DISP_IOCTL_AAL_GET_HIST:
		{
			disp_aal_wait_hist(60);

			if (disp_aal_copy_hist_to_user((DISP_AAL_HIST *) arg) < 0) {
				AAL_ERR("DISP_IOCTL_AAL_GET_HIST: copy_to_user() failed");
				return -EFAULT;
			}
			break;
		}
	case DISP_IOCTL_AAL_INIT_REG:
		{
			if (disp_aal_set_init_reg((DISP_AAL_INITREG *) arg, cmdq) < 0) {
				AAL_ERR("DISP_IOCTL_AAL_INIT_REG: failed");
				return -EFAULT;
			}
			break;
		}
	case DISP_IOCTL_AAL_SET_PARAM:
		{
			if (disp_aal_set_param((DISP_AAL_PARAM *) arg, cmdq) < 0) {
				AAL_ERR("DISP_IOCTL_AAL_SET_PARAM: failed");
				return -EFAULT;
			}
			break;
		}
	}

	return ret;
}
//
// Construct MPFVTP object. Check for WRO feature presence.
//
MPFWRO::MPFWRO( IALIMMIO   *pMMIOService,
                btCSROffset vtpDFHOffset ) : m_pALIMMIO( pMMIOService ),
                                             m_dfhOffset( vtpDFHOffset ),
                                             m_isOK( false )
{
   ali_errnum_e err;
   btBool ret;                                // for error checking

   ASSERT( m_pALIMMIO != NULL );
   ASSERT( m_dfhOffset != -1 );

   // Check BBB GUID (are we really a WRO?)
   btcString sGUID = MPF_WRO_BBB_GUID;
   AAL_GUID_t structGUID;
   btUnsigned64bitInt readBuf[2];

   ret = m_pALIMMIO->mmioRead64(m_dfhOffset + 8, &readBuf[0]);
   ASSERT(ret);
   ret = m_pALIMMIO->mmioRead64(m_dfhOffset + 16, &readBuf[1]);
   ASSERT(ret);
   if ( 0 != strncmp( sGUID, GUIDStringFromStruct(GUIDStructFrom2xU64(readBuf[1], readBuf[0])).c_str(), 36 ) ) {
      AAL_ERR(LM_AFU, "Feature GUID does not match WRO GUID.");
      m_isOK = false;
      return;
   }

   AAL_INFO(LM_AFU, "Found and successfully identified WRO feature." << std::endl);

   m_isOK = true;
}
static void disp_aal_set_interrupt(int enabled)
{
#ifdef CONFIG_MTK_AAL_SUPPORT
	if (enabled) {
		if (DISP_REG_GET(DISP_AAL_EN) == 0) {
			AAL_DBG("[WARNING] DISP_AAL_EN not enabled!");
		}

		/* Enable output frame end interrupt */
		DISP_CPU_REG_SET(DISP_AAL_INTEN, 0x2);
		AAL_DBG("Interrupt enabled");
	} else {
		if (g_aal_dirty_frame_retrieved) {
			DISP_CPU_REG_SET(DISP_AAL_INTEN, 0x0);
			AAL_DBG("Interrupt disabled");
		} else {	/* Dirty histogram was not retrieved */
			/* Only if the dirty hist was retrieved, interrupt can be disabled.
			   Continue interrupt until AALService can get the latest histogram. */
		}
	}

#else
	AAL_ERR("AAL driver is not enabled");
#endif
}
/*----------------------------------------------------------------------------*/
static void __exit AAL_exit(void)
{
   int err;
   if((err = misc_deregister(&AAL_device)))
	{
		AAL_ERR("AAL_device misc_deregister fail: %d\n", err);    
	}
  return;
}
/*----------------------------------------------------------------------------*/
static int __init AAL_init(void)
{
	int err;
    if((err = misc_register(&AAL_device)))
	{
		AAL_ERR("AAL_device misc_register failed: %d\n", err);
	}
	AAL_FUN("OK!\n");
	return 0;
}
//
// Free buffer (not supported).
//
ali_errnum_e MPFVTP::bufferFree(btVirtAddr Address)
{
   btVirtAddr va = Address;
   btPhysAddr pa;
   MPFVTP_PAGE_SIZE size;
   uint32_t flags;
   bool ret;

   // Is the address the beginning of an allocation region?
   if (! ptTranslateVAtoPA(va, &pa, &flags)) {
      AAL_ERR(LM_All, "VA not allocated" << std::endl);
      return ali_errnumNoMem;
   }
   if ((flags & MPFVTP_PT_FLAG_ALLOC_START) == 0) {
      AAL_ERR(LM_All, "VA not start of allocated region" << std::endl);
      return ali_errnumNoMem;
   }

   while (ptRemovePageMapping(va, NULL, NULL, &size, &flags)) {
      ret = ptInvalVAMapping(va);
      MPF_ASSERT_RET(ret, ali_errnumNoMem);
      if (!ret) {
         return ali_errnumNoMem;
      }

      m_pALIBuffer->bufferFree(va);

      // Done?
      if (flags & MPFVTP_PT_FLAG_ALLOC_END) {
#if defined(ENABLE_DEBUG) && (0 != ENABLE_DEBUG)
         ptDumpPageTable();
#endif
         return ali_errnumOK;
      }

      // Next page address
      va += (size == MPFVTP_PAGE_2MB ? LARGE_PAGE_SIZE : SMALL_PAGE_SIZE);
   }

   AAL_ERR(LM_All, "bufferFree translation error" << std::endl);
   return ali_errnumNoMem;
}
//
// allocate page of size pageSize to virtual address va and add entry to VTP
// page table
//
ali_errnum_e MPFVTP::_allocate(btVirtAddr va, size_t pageSize, uint32_t flags)
{
   ali_errnum_e err;
   MPFVTP_PAGE_SIZE mapType;

   // FIXME: can we reuse this? expensive! static?
   NamedValueSet *bufAllocArgs = new NamedValueSet();
   btVirtAddr alloc;

   AAL_DEBUG(LM_AFU, "_allocate(" << std::hex << std::setw(2) << std::setfill('0') << (void *)va << ", " << std::dec << (unsigned int)pageSize << ")" << std::endl);

   // determine mapping type for page table entry
   if (pageSize == LARGE_PAGE_SIZE) {
      mapType = MPFVTP_PAGE_2MB;
   } else if (pageSize == SMALL_PAGE_SIZE) {
      mapType = MPFVTP_PAGE_4KB;
   } else {
      AAL_ERR(LM_AFU, "Invalid page size." << std::endl);
      return ali_errnumBadParameter;
   }

   // allocate buffer at va
   bufAllocArgs->Add(ALI_MMAP_TARGET_VADDR_KEY, static_cast<ALI_MMAP_TARGET_VADDR_DATATYPE>(va));
   err = m_pALIBuffer->bufferAllocate(pageSize, &alloc, *bufAllocArgs);

   // insert VTP page table entry
   if (err == ali_errnumOK) {
      MPF_ASSERT_RET(va == alloc, ali_errnumNoMem);

      if (! ptInsertPageMapping(btVirtAddr(va),
                                m_pALIBuffer->bufferGetIOVA((unsigned char *)va),
                                mapType,
                                flags)) {
         AAL_ERR(LM_All, "Page table insertion error." << std::endl);
         err = ali_errnumBadMapping;
      }
   }

   delete bufAllocArgs;
   return err;
}
static int disp_aal_set_init_reg(DISP_AAL_INITREG __user *user_regs, void *cmdq)
{
	int ret = -EFAULT;
#ifdef CONFIG_MTK_AAL_SUPPORT
	DISP_AAL_INITREG *init_regs;

	init_regs = &g_aal_init_regs;

	ret = copy_from_user(init_regs, user_regs, sizeof(DISP_AAL_INITREG));
	if (ret == 0) {
		g_aal_is_init_regs_valid = 1;
		ret = disp_aal_write_init_regs(cmdq);
	} else {
		AAL_ERR("disp_aal_set_init_reg: copy_from_user() failed");
	}

	AAL_DBG("disp_aal_set_init_reg: %d", ret);
#else
	AAL_ERR("disp_aal_set_init_reg: AAL not supported");
#endif

	return ret;
}
//
// Construct MPFVTP object. Check for VTP feature presence, and initialize page
// table.
//
MPFVTP::MPFVTP( IALIBuffer *pBufferService,
                IALIMMIO   *pMMIOService,
                btCSROffset vtpDFHOffset ) : m_pALIBuffer( pBufferService),
                                             m_pALIMMIO( pMMIOService ),
                                             m_dfhOffset( vtpDFHOffset ),
                                             m_isOK( false )
{
   ali_errnum_e err;
   btBool ret;                                // for error checking

   ASSERT( m_pALIBuffer != NULL );
   ASSERT( m_pALIMMIO != NULL );
   ASSERT( m_dfhOffset != -1 );

   // Check BBB GUID (are we really a VTP?)
   btcString sGUID = MPF_VTP_BBB_GUID;
   AAL_GUID_t structGUID;
   btUnsigned64bitInt readBuf[2];

   ret = m_pALIMMIO->mmioRead64(m_dfhOffset + 8, &readBuf[0]);
   ASSERT(ret);
   ret = m_pALIMMIO->mmioRead64(m_dfhOffset + 16, &readBuf[1]);
   ASSERT(ret);
   if ( 0 != strncmp( sGUID, GUIDStringFromStruct(GUIDStructFrom2xU64(readBuf[1], readBuf[0])).c_str(), 36 ) ) {
      AAL_ERR(LM_AFU, "Feature GUID does not match VTP GUID.");
      m_isOK = false;
      return;
   }

   AAL_INFO(LM_AFU, "Found and successfully identified VTP feature." << std::endl);

   // Allocate the page table.  The size of the page table is a function
   // of the PTE index space.
   ret = ptInitialize();
   ASSERT(ret);

   // Invalidate TLB and tell the hardware the address of the table
   ret = vtpReset();
   ASSERT(ret);

   m_isOK = true;
}
//
// Allocate virtual buffer, potentially assembling it from several physical
// buffers.
//
ali_errnum_e MPFVTP::bufferAllocate( btWSSize             Length,
                                     btVirtAddr          *pBufferptr,
                                     NamedValueSet const &rInputArgs,
                                     NamedValueSet       &rOutputArgs )
{
   AutoLock(this);

   AAL_DEBUG(LM_AFU, "Trying to allocate virtual buffer of size " << std::dec << Length << std::endl);

   btBool ret;
   void *pRet;                      // for error checking
   ali_errnum_e err;

   // FIXME: Input/OUtputArgs are ignored here...

   // Round request size to proper page size
   // If the tail (= remainder of Length that doesn't fill a large buffer)
   // is large enough, extend Length to fit large buffers. Otherwise, make sure
   // it at least fills 4k pages.
   size_t tail = Length % LARGE_PAGE_SIZE;
   AAL_DEBUG(LM_AFU, "tail: " << std::dec << tail << std::endl);
   if (tail > CCI_MPF_VTP_LARGE_PAGE_THRESHOLD) {
      // if tail is large enough, align with large page size
      Length = (Length + LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK;
      tail = 0;
   } else {
      // otherwise, align with small page size
      Length = (Length + SMALL_PAGE_SIZE - 1) & SMALL_PAGE_MASK;
      tail = Length % LARGE_PAGE_SIZE;
   }
   size_t nLargeBuffers = Length / LARGE_PAGE_SIZE;
   size_t nSmallBuffers = (Length % LARGE_PAGE_SIZE) / SMALL_PAGE_SIZE;
   MPF_ASSERT_RET( Length % SMALL_PAGE_SIZE == 0, ali_errnumNoMem );

   AAL_DEBUG(LM_AFU, "padded Length: " << std::dec << Length << std::endl);
   AAL_DEBUG(LM_AFU, std::dec << nLargeBuffers << " large and " << nSmallBuffers << " small buffers" << std::endl);

   // Map a region of the requested size.  This will reserve a virtual
   // memory address space.  As pages are allocated they will be
   // mapped into this space.
   //
   // An extra page is added to the request in order to enable alignment
   // of the base address.  Linux is only guaranteed to return 4 KB aligned
   // addresses and we want large page aligned virtual addresses.
   // TODO: Assumption is still that virtual buffer needs to be large-page
   //        (2MB) aligned, even smaller ones. Make that configurable.
   void* va_base;
   size_t va_base_len = Length + LARGE_PAGE_SIZE;
   va_base = mmap(NULL, va_base_len,
                  PROT_READ | PROT_WRITE,
                  MAP_SHARED | MAP_ANONYMOUS, -1, 0);
   MPF_ASSERT_RET(va_base != MAP_FAILED, ali_errnumNoMem);
   AAL_DEBUG(LM_AFU, "va_base " << std::hex << std::setw(2) << std::setfill('0') << va_base << std::endl);

   void* va_aligned = (void*)((size_t(va_base) + LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK);
   AAL_DEBUG(LM_AFU, "va_aligned " << std::hex << std::setw(2) << std::setfill('0') << va_aligned << std::endl);

   // Trim off the unnecessary extra space after alignment
   size_t trim = LARGE_PAGE_SIZE - (size_t(va_aligned) - size_t(va_base));
   AAL_DEBUG(LM_AFU, "va_base_len trimmed by " << std::hex << std::setw(2) << std::setfill('0') << trim << " to " << va_base_len - trim << std::endl);
   pRet = mremap(va_base, va_base_len, va_base_len - trim, 0);
   MPF_ASSERT_RET(va_base == pRet, ali_errnumNoMem);
   va_base_len -= trim;

   // start at the end of the virtual buffer and work backwards
   // start with small buffers until we are done or  hit a large buffer
   // alingment boundary. Then continue with large buffers. If a large buffer
   // allocation fails, fall back to small pages.
   // TODO: make large page allocation threshold configurable

   void * va_alloc = (void *)(size_t(va_aligned) + Length);

   // Flags to indicate first/last page in an allocated region, stored in
   // the page table
   uint32_t pt_flags = MPFVTP_PT_FLAG_ALLOC_END;

   // -------------------------------------------------------------------------
   // small buffer allocation loop
   // -------------------------------------------------------------------------
   // Run to allocate small buffers until we can cover the remaining space with
   // large buffers.
   while ((size_t(va_alloc) & ( LARGE_PAGE_SIZE-1 )) != 0) {

      va_alloc = (void *)(size_t(va_alloc) - SMALL_PAGE_SIZE);

      // Shrink the reserved area in order to make a hole in the virtual
      // address space.
      pRet = mremap(va_base, va_base_len, va_base_len - SMALL_PAGE_SIZE, 0);
      MPF_ASSERT_RET(va_base == pRet, ali_errnumNoMem);
      va_base_len -= SMALL_PAGE_SIZE;

      // allocate buffer
      if (Length <= SMALL_PAGE_SIZE) {
         pt_flags |= MPFVTP_PT_FLAG_ALLOC_START;
      }
      err = _allocate((btVirtAddr)va_alloc, SMALL_PAGE_SIZE, pt_flags);
      if (err != ali_errnumOK) {
         AAL_ERR(LM_AFU, "Unable to allocate buffer. Err: " << err);
         return err;
         // FIXME: leaking already allocated pages!
      }

      pt_flags = 0;
      Length -= SMALL_PAGE_SIZE;
   }

   AAL_DEBUG(LM_AFU, "len remaining: " << std::dec << Length << std::endl);


   // -------------------------------------------------------------------------
   // large buffer allocation loop
   // -------------------------------------------------------------------------
   // Run for the remaining space, which should be an integer multiple of the
   // large buffer size in size, and aligned to large buffer boundaries. If
   // large buffer allocation fails, fall back to small buffers.
   size_t effPageSize = LARGE_PAGE_SIZE;     // page size used for actual allocations

   while (Length > 0) {

      va_alloc = (void *)(size_t(va_alloc) - effPageSize);

      // Shrink the reserved area in order to make a hole in the virtual
      // address space. If this is the last buffer to allocate, unmap reserved
      // area.
      if (va_base_len == effPageSize) {
         munmap(va_base, va_base_len);
         va_base_len = 0;
      } else {
         pRet = mremap(va_base, va_base_len, va_base_len - effPageSize, 0);
         MPF_ASSERT_RET(va_base == pRet, ali_errnumNoMem);
         va_base_len -= effPageSize;
      }

      // allocate buffer
      if (Length <= effPageSize) {
         pt_flags |= MPFVTP_PT_FLAG_ALLOC_START;
      }
      err = _allocate((btVirtAddr)va_alloc, effPageSize, pt_flags);
      if (err != ali_errnumOK) {
         if (effPageSize == LARGE_PAGE_SIZE) {
            // fall back to small buffers:
            // restore last large mapping
            if (va_base_len == 0) {
               // corner case: this was the last mapping - we destroyed it, so
               // try to restore it.
               va_base = mmap(va_alloc, LARGE_PAGE_SIZE,
                              PROT_READ | PROT_WRITE,
                              MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
               MPF_ASSERT_RET(va_base == va_alloc, ali_errnumNoMem);
            } else {
               // this was not the last mapping (or va_base is not aligned), so
               // we still have a valid reserved space. Just resize it back up.
               pRet = mremap(va_base, va_base_len, va_base_len + LARGE_PAGE_SIZE, 0);
               MPF_ASSERT_RET(pRet == va_base, ali_errnumNoMem);
            }
            va_base_len += LARGE_PAGE_SIZE;
            va_alloc = (void *)(size_t(va_alloc) + LARGE_PAGE_SIZE);
            effPageSize = SMALL_PAGE_SIZE;
            continue;    // try again with smal buffers
         } else {
            // already using small buffers, nowhere to fall back to.
            AAL_ERR(LM_AFU, "Unable to allocate buffer. Err: " << err);
            return err;
            // FIXME: leaking already allocated pages!
         }
      }

      // mapping successful, on to the next
      pt_flags = 0;
      Length -= effPageSize;
   }

   // clean up
   if (va_base_len != 0)
   {
       munmap(va_base, va_base_len);
   }

#if defined(ENABLE_DEBUG) && (0 != ENABLE_DEBUG)
   ptDumpPageTable();
#endif

   *pBufferptr = (btVirtAddr)va_aligned;
   return ali_errnumOK;
}