Advertisement
Guest User

Untitled

a guest
Feb 21st, 2010
110
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 35.58 KB | None | 0 0
  1. From a95f49b0c480da1a9d9cdbe0cf1aee6de3828eb5 Mon Sep 17 00:00:00 2001
  2. From: Brendan Shanks <mrpippy@gmail.com>
  3. Date: Sat, 6 Feb 2010 23:53:54 -0800
  4. Subject: [PATCH] Make donut/eclair hw3d a config choice
  5.  
  6. Make donut/eclair hw3d driver a config choice. Donut hw3d is tested and
  7. working, and Eclair hw3d is also tested and working. For Eclair hw3d, I
  8. had to make gpu0 smaller so it wouldn't run into ramconsole.
  9. ---
  10. arch/arm/configs/htc_msm_android_defconfig | 6 +-
  11. arch/arm/mach-msm/Kconfig | 14 +-
  12. arch/arm/mach-msm/Makefile | 1 +
  13. arch/arm/mach-msm/hw3d.c | 832 +++++++++++++++++++++++-----
  14. arch/arm/mach-msm/hw3d_donut.c | 213 +++++++
  15. arch/arm/mach-msm/pmem.c | 51 ++-
  16. 6 files changed, 985 insertions(+), 132 deletions(-)
  17. create mode 100644 arch/arm/mach-msm/hw3d_donut.c
  18.  
  19. diff --git a/arch/arm/configs/htc_msm_android_defconfig b/arch/arm/configs/htc_msm_android_defconfig
  20. index 4bb2457..f4fa3f2 100644
  21. --- a/arch/arm/configs/htc_msm_android_defconfig
  22. +++ b/arch/arm/configs/htc_msm_android_defconfig
  23. @@ -1,7 +1,7 @@
  24. #
  25. # Automatically generated make config: don't edit
  26. # Linux kernel version: 2.6.27
  27. -# Mon Feb 1 22:52:21 2010
  28. +# Sun Feb 7 00:18:24 2010
  29. #
  30. CONFIG_ARM=y
  31. CONFIG_SYS_SUPPORTS_APM_EMULATION=y
  32. @@ -246,6 +246,7 @@ CONFIG_MSM_CPU_FREQ_ONDEMAND=y
  33. # CONFIG_MSM_CPU_FREQ_SCREEN is not set
  34. CONFIG_MSM_CPU_FREQ_ONDEMAND_MAX=528000
  35. CONFIG_MSM_CPU_FREQ_ONDEMAND_MIN=128000
  36. +# CONFIG_MSM_HW3D_DONUT is not set
  37. CONFIG_MSM_HW3D=y
  38. CONFIG_MSM_ADSP=y
  39. # CONFIG_HTC_FB_CONSOLE is not set
  40. @@ -293,7 +294,8 @@ CONFIG_PREEMPT=y
  41. CONFIG_HZ=100
  42. CONFIG_AEABI=y
  43. # CONFIG_OABI_COMPAT is not set
  44. -CONFIG_ARCH_FLATMEM_HAS_HOLES=y
  45. +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
  46. +CONFIG_HOLES_IN_ZONE=y
  47. # CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
  48. CONFIG_SELECT_MEMORY_MODEL=y
  49. CONFIG_FLATMEM_MANUAL=y
  50. diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
  51. index 4df8fbf..5e359c8 100644
  52. --- a/arch/arm/mach-msm/Kconfig
  53. +++ b/arch/arm/mach-msm/Kconfig
  54. @@ -397,13 +397,21 @@ config MSM_CPU_FREQ_ONDEMAND_MIN
  55.  
  56. endif # MSM_CPU_FREQ_ONDEMAND
  57.  
  58. +choice
  59. +prompt "MSM Hardware 3D Register Driver"
  60. +config MSM_HW3D_DONUT
  61. + depends on ANDROID_PMEM
  62. + bool "Android 1.6 (Cupcake or Donut)"
  63. + help
  64. + Provides access to registers needed by the userspace OpenGL|ES
  65. + library for Cupcake and Donut.
  66. config MSM_HW3D
  67. depends on ANDROID_PMEM
  68. - tristate "MSM Hardware 3D Register Driver"
  69. - default y
  70. + bool "Android 2.0+ (Eclair)"
  71. help
  72. Provides access to registers needed by the userspace OpenGL|ES
  73. - library.
  74. + library for Eclair.
  75. +endchoice
  76.  
  77. config MSM_ADSP
  78. tristate "MSM ADSP driver"
  79. diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
  80. index a109e7f..090265c 100644
  81. --- a/arch/arm/mach-msm/Makefile
  82. +++ b/arch/arm/mach-msm/Makefile
  83. @@ -27,6 +27,7 @@ obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter_servers.o
  84. #obj-$(CONFIG_MSM_RPCSERVERS) += rpc_server_dog_keepalive.o
  85. obj-$(CONFIG_MSM_RPCSERVERS) += rpc_server_time_remote.o
  86. obj-$(CONFIG_MSM_ADSP) += qdsp5/
  87. +obj-$(CONFIG_MSM_HW3D_DONUT) += hw3d_donut.o
  88. obj-$(CONFIG_MSM_HW3D) += hw3d.o
  89. obj-$(CONFIG_PM) += pm.o
  90. obj-$(CONFIG_MSM_CPU_FREQ) += cpufreq.o
  91. diff --git a/arch/arm/mach-msm/hw3d.c b/arch/arm/mach-msm/hw3d.c
  92. index b013b57..8fd46c5 100644
  93. --- a/arch/arm/mach-msm/hw3d.c
  94. +++ b/arch/arm/mach-msm/hw3d.c
  95. @@ -4,6 +4,7 @@
  96. *
  97. * Copyright (C) 2007 Google, Inc.
  98. * Author: Brian Swetland <swetland@google.com>
  99. + * Heavily modified: Dima Zavin <dima@android.com>
  100. *
  101. * This software is licensed under the terms of the GNU General Public
  102. * License version 2, as published by the Free Software Foundation, and
  103. @@ -16,80 +17,189 @@
  104. *
  105. */
  106.  
  107. -#include <linux/module.h>
  108. +#define DEBUG
  109. +#define VERBOSE
  110. +
  111. +#include <linux/clk.h>
  112. +#include <linux/earlysuspend.h>
  113. +#include <linux/file.h>
  114. #include <linux/fs.h>
  115. +#include <linux/interrupt.h>
  116. +#include <linux/irq.h>
  117. #include <linux/miscdevice.h>
  118. -#include <linux/uaccess.h>
  119. +#include <linux/mm.h>
  120. +#include <linux/module.h>
  121. +#include <linux/msm_hw3d.h>
  122. +#include <linux/mutex.h>
  123. +#include <linux/platform_device.h>
  124. #include <linux/poll.h>
  125. +#include <linux/sched.h>
  126. #include <linux/time.h>
  127. -#include <linux/irq.h>
  128. -#include <linux/interrupt.h>
  129. +#include <linux/uaccess.h>
  130. #include <linux/wait.h>
  131. -#include <linux/mm.h>
  132. -#include <linux/clk.h>
  133. -#include <linux/android_pmem.h>
  134. +#include <linux/wakelock.h>
  135. +#include <asm/io.h>
  136. +
  137. #include <mach/board.h>
  138.  
  139. -static DEFINE_SPINLOCK(hw3d_lock);
  140. -static DECLARE_WAIT_QUEUE_HEAD(hw3d_queue);
  141. -static int hw3d_pending;
  142. -static int hw3d_disabled;
  143. +#if defined(VERBOSE)
  144. +#define VDBG(x...) pr_debug(x)
  145. +#else
  146. +#define VDBG(x...) do {} while(0)
  147. +#endif
  148.  
  149. -static struct clk *grp_clk;
  150. -static struct clk *imem_clk;
  151. -DECLARE_MUTEX(hw3d_sem);
  152. -static unsigned int hw3d_granted;
  153. -static struct file *hw3d_granted_file;
  154. +struct mem_region {
  155. + unsigned long pbase;
  156. + unsigned long size;
  157. + void __iomem *vbase;
  158. +};
  159.  
  160. -static irqreturn_t hw3d_irq_handler(int irq, void *data)
  161. +struct hw3d_info {
  162. + struct miscdevice master_dev;
  163. + struct miscdevice client_dev;
  164. +
  165. + struct clk *grp_clk;
  166. + struct clk *imem_clk;
  167. + int irq;
  168. +
  169. + struct mem_region regions[HW3D_NUM_REGIONS];
  170. +
  171. + wait_queue_head_t irq_wq;
  172. + bool irq_pending;
  173. + bool irq_en;
  174. + bool suspending;
  175. + bool revoking;
  176. + bool enabled;
  177. +
  178. + struct timer_list revoke_timer;
  179. + wait_queue_head_t revoke_wq;
  180. + wait_queue_head_t revoke_done_wq;
  181. +
  182. + spinlock_t lock;
  183. +
  184. + struct file *client_file;
  185. + struct task_struct *client_task;
  186. +
  187. + struct early_suspend early_suspend;
  188. + struct wake_lock wake_lock;
  189. +};
  190. +static struct hw3d_info *hw3d_info;
  191. +
  192. +struct hw3d_data {
  193. + struct vm_area_struct *vmas[HW3D_NUM_REGIONS];
  194. + struct mutex mutex;
  195. + bool closing;
  196. +};
  197. +
  198. +#define REGION_PAGE_ID(addr) \
  199. + ((((uint32_t)(addr)) >> (28 - PAGE_SHIFT)) & 0xf)
  200. +#define REGION_PAGE_OFFS(addr) \
  201. + ((((uint32_t)(addr)) & ~(0xf << (28 - PAGE_SHIFT))))
  202. +
  203. +static int hw3d_open(struct inode *, struct file *);
  204. +static int hw3d_release(struct inode *, struct file *);
  205. +static int hw3d_mmap(struct file *, struct vm_area_struct *);
  206. +static int hw3d_flush(struct file *, fl_owner_t);
  207. +static long hw3d_ioctl(struct file *, unsigned int, unsigned long);
  208. +
  209. +static void hw3d_vma_open(struct vm_area_struct *);
  210. +static void hw3d_vma_close(struct vm_area_struct *);
  211. +
  212. +static struct file_operations hw3d_fops = {
  213. + .open = hw3d_open,
  214. + .release = hw3d_release,
  215. + .mmap = hw3d_mmap,
  216. + .flush = hw3d_flush,
  217. + .unlocked_ioctl = hw3d_ioctl,
  218. +};
  219. +
  220. +static struct vm_operations_struct hw3d_vm_ops = {
  221. + .open = hw3d_vma_open,
  222. + .close = hw3d_vma_close,
  223. +};
  224. +
  225. +static bool is_master(struct hw3d_info *info, struct file *file)
  226. {
  227. - unsigned long flags;
  228. + int fmin = MINOR(file->f_dentry->d_inode->i_rdev);
  229. + return fmin == info->master_dev.minor;
  230. +}
  231. +
  232. +static bool is_client(struct hw3d_info *info, struct file *file)
  233. +{
  234. + int fmin = MINOR(file->f_dentry->d_inode->i_rdev);
  235. + return fmin == info->client_dev.minor;
  236. +}
  237.  
  238. - spin_lock_irqsave(&hw3d_lock, flags);
  239. - if (!hw3d_disabled) {
  240. - disable_irq(INT_GRAPHICS);
  241. - hw3d_disabled = 1;
  242. +inline static void locked_hw3d_irq_disable(struct hw3d_info *info)
  243. +{
  244. + if (info->irq_en) {
  245. + disable_irq_nosync(info->irq);
  246. + info->irq_en = 0;
  247. }
  248. - hw3d_pending = 1;
  249. - spin_unlock_irqrestore(&hw3d_lock, flags);
  250. +}
  251.  
  252. - wake_up(&hw3d_queue);
  253. +inline static void locked_hw3d_irq_enable(struct hw3d_info *info)
  254. +{
  255. + if (!info->irq_en) {
  256. + enable_irq(info->irq);
  257. + info->irq_en = 1;
  258. + }
  259. +}
  260.  
  261. - return IRQ_HANDLED;
  262. +static void hw3d_disable_interrupt(struct hw3d_info *info)
  263. +{
  264. + unsigned long flags;
  265. +
  266. + spin_lock_irqsave(&info->lock, flags);
  267. + locked_hw3d_irq_disable(info);
  268. + spin_unlock_irqrestore(&info->lock, flags);
  269. }
  270.  
  271. -static void hw3d_disable_interrupt(void)
  272. +static irqreturn_t hw3d_irq_handler(int irq, void *data)
  273. {
  274. + struct hw3d_info *info = data;
  275. unsigned long flags;
  276. - spin_lock_irqsave(&hw3d_lock, flags);
  277. - if (!hw3d_disabled) {
  278. - disable_irq(INT_GRAPHICS);
  279. - hw3d_disabled = 1;
  280. - }
  281. - spin_unlock_irqrestore(&hw3d_lock, flags);
  282. +
  283. + spin_lock_irqsave(&info->lock, flags);
  284. + locked_hw3d_irq_disable(info);
  285. + info->irq_pending = 1;
  286. + spin_unlock_irqrestore(&info->lock, flags);
  287. +
  288. + wake_up(&info->irq_wq);
  289. +
  290. + return IRQ_HANDLED;
  291. }
  292.  
  293. -static long hw3d_wait_for_interrupt(void)
  294. +static long hw3d_wait_for_interrupt(struct hw3d_info *info, struct file *filp)
  295. {
  296. + struct hw3d_data *data = filp->private_data;
  297. unsigned long flags;
  298. int ret;
  299.  
  300. + if (is_master(info, filp)) {
  301. + pr_err("%s: cannot wait for irqs on master node\n", __func__);
  302. + return -EPERM;
  303. + }
  304. +
  305. for (;;) {
  306. - spin_lock_irqsave(&hw3d_lock, flags);
  307. - if (hw3d_pending) {
  308. - hw3d_pending = 0;
  309. - spin_unlock_irqrestore(&hw3d_lock, flags);
  310. + spin_lock_irqsave(&info->lock, flags);
  311. + if (info->irq_pending) {
  312. + info->irq_pending = 0;
  313. + spin_unlock_irqrestore(&info->lock, flags);
  314. return 0;
  315. }
  316. - if (hw3d_disabled) {
  317. - hw3d_disabled = 0;
  318. - enable_irq(INT_GRAPHICS);
  319. - }
  320. - spin_unlock_irqrestore(&hw3d_lock, flags);
  321. + locked_hw3d_irq_enable(info);
  322. + spin_unlock_irqrestore(&info->lock, flags);
  323.  
  324. - ret = wait_event_interruptible(hw3d_queue, hw3d_pending);
  325. + ret = wait_event_interruptible(info->irq_wq,
  326. + info->irq_pending ||
  327. + info->revoking ||
  328. + data->closing);
  329. + /* always make sure the irq gets disabled */
  330. + if (ret == 0 && !info->irq_pending)
  331. + ret = -EPIPE;
  332. if (ret < 0) {
  333. - hw3d_disable_interrupt();
  334. + hw3d_disable_interrupt(info);
  335. return ret;
  336. }
  337. }
  338. @@ -97,117 +207,589 @@ static long hw3d_wait_for_interrupt(void)
  339. return 0;
  340. }
  341.  
  342. -#define HW3D_REGS_LEN 0x100000
  343. +static long hw3d_wait_for_revoke(struct hw3d_info *info, struct file *filp)
  344. +{
  345. + struct hw3d_data *data = filp->private_data;
  346. + int ret;
  347. +
  348. + if (is_master(info, filp)) {
  349. + pr_err("%s: cannot revoke on master node\n", __func__);
  350. + return -EPERM;
  351. + }
  352. +
  353. + ret = wait_event_interruptible(info->revoke_wq,
  354. + info->revoking ||
  355. + data->closing);
  356. + if (ret == 0 && data->closing)
  357. + ret = -EPIPE;
  358. + if (ret < 0)
  359. + return ret;
  360. + return 0;
  361. +}
  362. +
  363. +static void locked_hw3d_client_done(struct hw3d_info *info, int had_timer)
  364. +{
  365. + if (info->enabled) {
  366. + pr_debug("hw3d: was enabled\n");
  367. + info->enabled = 0;
  368. + clk_disable(info->grp_clk);
  369. + clk_disable(info->imem_clk);
  370. + }
  371. + info->revoking = 0;
  372. + info->client_file = NULL;
  373. +
  374. + /* double check that the irqs are disabled */
  375. + locked_hw3d_irq_disable(info);
  376. +
  377. + if (had_timer)
  378. + wake_unlock(&info->wake_lock);
  379. + wake_up(&info->revoke_done_wq);
  380. +}
  381. +
  382. +static void do_force_revoke(struct hw3d_info *info)
  383. +{
  384. + unsigned long flags;
  385. +
  386. + /* at this point, the task had a chance to relinquish the gpu, but
  387. + * it hasn't. So, we kill it */
  388. + spin_lock_irqsave(&info->lock, flags);
  389. + pr_debug("hw3d: forcing revoke\n");
  390. + locked_hw3d_irq_disable(info);
  391. + if (info->client_task) {
  392. + pr_info("hw3d: force revoke from pid=%d\n",
  393. + info->client_task->pid);
  394. + force_sig(SIGKILL, info->client_task);
  395. + put_task_struct(info->client_task);
  396. + info->client_task = NULL;
  397. + }
  398. + locked_hw3d_client_done(info, 1);
  399. + pr_debug("hw3d: done forcing revoke\n");
  400. + spin_unlock_irqrestore(&info->lock, flags);
  401. +}
  402. +
  403. +#define REVOKE_TIMEOUT (HZ / 2)
  404. +static void locked_hw3d_revoke(struct hw3d_info *info)
  405. +{
  406. + /* force us to wait to suspend until the revoke is done. If the
  407. + * user doesn't release the gpu, the timer will turn off the gpu,
  408. + * and force kill the process. */
  409. + wake_lock(&info->wake_lock);
  410. + info->revoking = 1;
  411. + wake_up(&info->revoke_wq);
  412. + mod_timer(&info->revoke_timer, jiffies + REVOKE_TIMEOUT);
  413. +}
  414.  
  415. -static long hw3d_revoke_gpu(struct file *file)
  416. +bool is_msm_hw3d_file(struct file *file)
  417. {
  418. + struct hw3d_info *info = hw3d_info;
  419. + if(!file || !file->f_dentry || !file->f_dentry->d_inode)
  420. + return 0;
  421. + if (MAJOR(file->f_dentry->d_inode->i_rdev) == MISC_MAJOR &&
  422. + (is_master(info, file) || is_client(info, file)))
  423. + return 1;
  424. + return 0;
  425. +}
  426. +
  427. +void put_msm_hw3d_file(struct file *file)
  428. +{
  429. + if (!is_msm_hw3d_file(file))
  430. + return;
  431. + fput(file);
  432. +}
  433. +
  434. +int get_msm_hw3d_file(int fd, int region, uint32_t offs, unsigned long *pbase,
  435. + unsigned long *len, struct file **filp)
  436. +{
  437. + struct hw3d_info *info = hw3d_info;
  438. + struct file *file;
  439. + struct hw3d_data *data;
  440. int ret = 0;
  441. - unsigned long user_start, user_len;
  442. - struct pmem_region region = {.offset = 0x0, .len = HW3D_REGS_LEN};
  443. -
  444. - down(&hw3d_sem);
  445. - if (!hw3d_granted) {
  446. - goto end;
  447. - }
  448. - /* revoke the pmem region completely */
  449. - if ((ret = pmem_remap(&region, file, PMEM_UNMAP)))
  450. - goto end;
  451. - get_pmem_user_addr(file, &user_start, &user_len);
  452. - /* reset the gpu */
  453. - clk_disable(grp_clk);
  454. - clk_disable(imem_clk);
  455. - hw3d_granted = 0;
  456. -end:
  457. - up(&hw3d_sem);
  458. +
  459. + if (unlikely(region >= HW3D_NUM_REGIONS)) {
  460. + VDBG("hw3d: invalid region %d requested\n", region);
  461. + return -EINVAL;
  462. + } else if (unlikely(offs >= info->regions[region].size)) {
  463. + VDBG("hw3d: offset %08x outside of the requested region %d\n",
  464. + offs, region);
  465. + return -EINVAL;
  466. + }
  467. +
  468. + file = fget(fd);
  469. + if (unlikely(file == NULL)) {
  470. + pr_info("%s: requested data from file descriptor that doesn't "
  471. + "exist.", __func__);
  472. + return -EINVAL;
  473. + } else if (!is_msm_hw3d_file(file)) {
  474. + ret = -1;
  475. + goto err;
  476. + }
  477. +
  478. + data = file->private_data;
  479. + if (unlikely(!data)) {
  480. + VDBG("hw3d: invalid file\n");
  481. + ret = -EINVAL;
  482. + goto err;
  483. + }
  484. +
  485. + mutex_lock(&data->mutex);
  486. + if (unlikely(!data->vmas[region])) {
  487. + mutex_unlock(&data->mutex);
  488. + VDBG("hw3d: requested hw3d region is not mapped\n");
  489. + ret = -ENOENT;
  490. + goto err;
  491. + }
  492. +
  493. + *pbase = info->regions[region].pbase;
  494. + *filp = file;
  495. + *len = data->vmas[region]->vm_end - data->vmas[region]->vm_start;
  496. + mutex_unlock(&data->mutex);
  497. + return 0;
  498. +
  499. +err:
  500. + fput(file);
  501. return ret;
  502. }
  503.  
  504. -static long hw3d_grant_gpu(struct file *file)
  505. +static int hw3d_flush(struct file *filp, fl_owner_t id)
  506. {
  507. + struct hw3d_info *info = hw3d_info;
  508. + struct hw3d_data *data = filp->private_data;
  509. +
  510. + if (!data) {
  511. + pr_err("%s: no private data\n", __func__);
  512. + return -EINVAL;
  513. + }
  514. +
  515. + if (is_master(info, filp))
  516. + return 0;
  517. + pr_debug("hw3d: closing\n");
  518. + /* releases any blocked ioctls */
  519. + data->closing = 1;
  520. + wake_up(&info->revoke_wq);
  521. + wake_up(&info->irq_wq);
  522. + return 0;
  523. +}
  524. +
  525. +
  526. +static int hw3d_open(struct inode *inode, struct file *file)
  527. +{
  528. + struct hw3d_info *info = hw3d_info;
  529. + struct hw3d_data *data;
  530. + unsigned long flags;
  531. int ret = 0;
  532. - struct pmem_region region = {.offset = 0x0, .len = HW3D_REGS_LEN};
  533.  
  534. - down(&hw3d_sem);
  535. - if (hw3d_granted) {
  536. - ret = -1;
  537. - goto end;
  538. - }
  539. - /* map the registers */
  540. - if ((ret = pmem_remap(&region, file, PMEM_MAP)))
  541. - goto end;
  542. - clk_enable(grp_clk);
  543. - clk_enable(imem_clk);
  544. - hw3d_granted = 1;
  545. - hw3d_granted_file = file;
  546. -end:
  547. - up(&hw3d_sem);
  548. + pr_info("%s: pid %d tid %d opening %s node\n", __func__,
  549. + current->group_leader->pid, current->pid,
  550. + is_master(info, file) ? "master" : "client");
  551. +
  552. + if (file->private_data != NULL)
  553. + return -EINVAL;
  554. +
  555. + data = kzalloc(sizeof(struct hw3d_data), GFP_KERNEL);
  556. + if (!data) {
  557. + pr_err("%s: unable to allocate memory for hw3d_data.\n",
  558. + __func__);
  559. + return -ENOMEM;
  560. + }
  561. +
  562. + mutex_init(&data->mutex);
  563. + file->private_data = data;
  564. +
  565. + /* master always succeeds, so we are done */
  566. + if (is_master(info, file))
  567. + return 0;
  568. +
  569. + spin_lock_irqsave(&info->lock, flags);
  570. + if (info->suspending) {
  571. + pr_warning("%s: can't open hw3d while suspending\n", __func__);
  572. + ret = -EPERM;
  573. + spin_unlock_irqrestore(&info->lock, flags);
  574. + goto err;
  575. + }
  576. +
  577. + if (info->client_file) {
  578. + pr_debug("hw3d: have client_file, need revoke\n");
  579. + locked_hw3d_revoke(info);
  580. + spin_unlock_irqrestore(&info->lock, flags);
  581. + ret = wait_event_interruptible(info->revoke_done_wq,
  582. + !info->client_file);
  583. + if (ret < 0)
  584. + goto err;
  585. + spin_lock_irqsave(&info->lock, flags);
  586. + if (info->client_file) {
  587. + /* between is waking up and grabbing the lock,
  588. + * someone else tried to open the gpu, and got here
  589. + * first, let them have it. */
  590. + spin_unlock_irqrestore(&info->lock, flags);
  591. + ret = -EBUSY;
  592. + goto err;
  593. + }
  594. + }
  595. +
  596. + info->client_file = file;
  597. + get_task_struct(current->group_leader);
  598. + info->client_task = current->group_leader;
  599. +
  600. + /* XXX: we enable these clocks if the client connects..
  601. + * probably not right? Should only turn the clocks on when the user
  602. + * tries to map the registers? */
  603. + clk_enable(info->imem_clk);
  604. + clk_enable(info->grp_clk);
  605. + info->enabled = 1;
  606. +
  607. + spin_unlock_irqrestore(&info->lock, flags);
  608. + return 0;
  609. +
  610. +err:
  611. + file->private_data = NULL;
  612. + kfree(data);
  613. return ret;
  614. +
  615. }
  616.  
  617. static int hw3d_release(struct inode *inode, struct file *file)
  618. {
  619. - down(&hw3d_sem);
  620. - /* if the gpu is in use, and its inuse by the file that was released */
  621. - if (hw3d_granted && (file == hw3d_granted_file)) {
  622. - clk_disable(grp_clk);
  623. - clk_disable(imem_clk);
  624. - hw3d_granted = 0;
  625. - hw3d_granted_file = NULL;
  626. + struct hw3d_info *info = hw3d_info;
  627. + struct hw3d_data *data = file->private_data;
  628. + unsigned long flags;
  629. +
  630. + BUG_ON(!data);
  631. +
  632. + file->private_data = NULL;
  633. +
  634. + if (is_master(info, file))
  635. + goto done;
  636. +
  637. + pr_info("%s: in release for pid=%d tid=%d\n", __func__,
  638. + current->group_leader->pid, current->pid);
  639. + spin_lock_irqsave(&info->lock, flags);
  640. +
  641. + if (info->client_task && info->client_task == current->group_leader) {
  642. + pr_debug("hw3d: releasing %d\n", info->client_task->pid);
  643. + put_task_struct(info->client_task);
  644. + info->client_task = NULL;
  645. }
  646. - up(&hw3d_sem);
  647. +
  648. + if (info->client_file && info->client_file == file) {
  649. + int pending;
  650. + /* this will be true if we are still the "owner" of the gpu */
  651. + pr_debug("hw3d: had file\n");
  652. + pending = del_timer(&info->revoke_timer);
  653. + locked_hw3d_client_done(info, pending);
  654. + }
  655. + spin_unlock_irqrestore(&info->lock, flags);
  656. +
  657. +done:
  658. + kfree(data);
  659. return 0;
  660. }
  661.  
  662. +static void hw3d_vma_open(struct vm_area_struct *vma)
  663. +{
  664. + /* XXX: should the master be allowed to fork and keep the mappings? */
  665. +
  666. + /* TODO: remap garbage page into here.
  667. + *
  668. + * For now, just pull the mapping. The user shouldn't be forking
  669. + * and using it anyway. */
  670. + zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
  671. +}
  672. +
  673. +static void hw3d_vma_close(struct vm_area_struct *vma)
  674. +{
  675. + struct file *file = vma->vm_file;
  676. + struct hw3d_data *data = file->private_data;
  677. + int i;
  678. +
  679. + pr_debug("hw3d: current %u ppid %u file %p count %ld\n",
  680. + current->pid, current->parent->pid, file, file_count(file));
  681. +
  682. + BUG_ON(!data);
  683. +
  684. + mutex_lock(&data->mutex);
  685. + for (i = 0; i < HW3D_NUM_REGIONS; ++i) {
  686. + if (data->vmas[i] == vma) {
  687. + data->vmas[i] = NULL;
  688. + goto done;
  689. + }
  690. + }
  691. + pr_warning("%s: vma %p not of ours during vma_close\n", __func__, vma);
  692. +done:
  693. + mutex_unlock(&data->mutex);
  694. +}
  695. +
  696. +static int hw3d_mmap(struct file *file, struct vm_area_struct *vma)
  697. +{
  698. + struct hw3d_info *info = hw3d_info;
  699. + struct hw3d_data *data = file->private_data;
  700. + unsigned long vma_size = vma->vm_end - vma->vm_start;
  701. + int ret = 0;
  702. + int region = REGION_PAGE_ID(vma->vm_pgoff);
  703. +
  704. + if (region >= HW3D_NUM_REGIONS) {
  705. + pr_err("%s: Trying to mmap unknown region %d\n", __func__,
  706. + region);
  707. + return -EINVAL;
  708. + } else if (REGION_PAGE_OFFS(vma->vm_pgoff) != 0 ||
  709. + (vma_size & ~PAGE_MASK)) {
  710. + pr_err("%s: Can't remap part of the region %d\n", __func__,
  711. + region);
  712. + return -EINVAL;
  713. + } else if (!is_master(info, file) &&
  714. + current->group_leader != info->client_task) {
  715. + pr_err("%s: current(%d) != client_task(%d)\n", __func__,
  716. + current->group_leader->pid, info->client_task->pid);
  717. + return -EPERM;
  718. + } else if (!is_master(info, file) &&
  719. + (info->revoking || info->suspending)) {
  720. + pr_err("%s: cannot mmap while revoking(%d) or suspending(%d)\n",
  721. + __func__, info->revoking, info->suspending);
  722. + return -EPERM;
  723. + }
  724. +
  725. + mutex_lock(&data->mutex);
  726. + if (data->vmas[region] != NULL) {
  727. + pr_err("%s: Region %d already mapped (pid=%d tid=%d)\n",
  728. + __func__, region, current->group_leader->pid,
  729. + current->pid);
  730. + ret = -EBUSY;
  731. + goto done;
  732. + }
  733. +
  734. + /* our mappings are always noncached */
  735. +#ifdef pgprot_noncached
  736. + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  737. +#endif
  738. +
  739. + ret = io_remap_pfn_range(vma, vma->vm_start,
  740. + info->regions[region].pbase >> PAGE_SHIFT,
  741. + vma_size, vma->vm_page_prot);
  742. + if (ret) {
  743. + pr_err("%s: Cannot remap page range for region %d!\n", __func__,
  744. + region);
  745. + ret = -EAGAIN;
  746. + goto done;
  747. + }
  748. +
  749. + vma->vm_ops = &hw3d_vm_ops;
  750. +
  751. + /* mark this region as mapped */
  752. + data->vmas[region] = vma;
  753. +
  754. +done:
  755. + mutex_unlock(&data->mutex);
  756. + return ret;
  757. +}
  758. +
  759. static long hw3d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  760. {
  761. + struct hw3d_info *info = hw3d_info;
  762. + struct hw3d_region regions[HW3D_NUM_REGIONS];
  763. + int i;
  764. +
  765. + if (!file->private_data)
  766. + return -EINVAL;
  767. +
  768. switch (cmd) {
  769. - case HW3D_REVOKE_GPU:
  770. - return hw3d_revoke_gpu(file);
  771. - break;
  772. - case HW3D_GRANT_GPU:
  773. - return hw3d_grant_gpu(file);
  774. - break;
  775. - case HW3D_WAIT_FOR_INTERRUPT:
  776. - return hw3d_wait_for_interrupt();
  777. - break;
  778. - default:
  779. - return -EINVAL;
  780. + case HW3D_WAIT_FOR_REVOKE:
  781. + return hw3d_wait_for_revoke(info, file);
  782. +
  783. + case HW3D_WAIT_FOR_INTERRUPT:
  784. + return hw3d_wait_for_interrupt(info, file);
  785. +
  786. + case HW3D_GET_REGIONS:
  787. + for (i = 0; i < HW3D_NUM_REGIONS; ++i) {
  788. + regions[i].phys = info->regions[i].pbase;
  789. + regions[i].map_offset = HW3D_REGION_OFFSET(i);
  790. + regions[i].len = info->regions[i].size;
  791. + }
  792. + if (copy_to_user((void __user *)arg, regions, sizeof(regions)))
  793. + return -EFAULT;
  794. + break;
  795. +
  796. + default:
  797. + return -EINVAL;
  798. }
  799. +
  800. return 0;
  801. }
  802.  
  803. -static struct android_pmem_platform_data pmem_data = {
  804. - .name = "hw3d",
  805. - .start = 0xA0000000,
  806. - .size = 0x100000,
  807. - .no_allocator = 1,
  808. - .cached = 0,
  809. -};
  810. +static void hw3d_early_suspend(struct early_suspend *h)
  811. +{
  812. + unsigned long flags;
  813. + struct hw3d_info *info;
  814. + info = container_of(h, struct hw3d_info, early_suspend);
  815.  
  816. -static int __init hw3d_init(void)
  817. + spin_lock_irqsave(&info->lock, flags);
  818. + info->suspending = 1;
  819. + if (info->client_file) {
  820. + pr_debug("hw3d: Requesting revoke for suspend\n");
  821. + locked_hw3d_revoke(info);
  822. + }
  823. + spin_unlock_irqrestore(&info->lock, flags);
  824. +}
  825. +
  826. +static void hw3d_late_resume(struct early_suspend *h)
  827. {
  828. - int ret;
  829. + unsigned long flags;
  830. + struct hw3d_info *info;
  831. + info = container_of(h, struct hw3d_info, early_suspend);
  832. +
  833. + spin_lock_irqsave(&info->lock, flags);
  834. + pr_info("%s: resuming\n", __func__);
  835. + info->suspending = 0;
  836. + spin_unlock_irqrestore(&info->lock, flags);
  837. +}
  838. +
  839. +static int hw3d_resume(struct platform_device *pdev)
  840. +{
  841. + struct hw3d_info *info = platform_get_drvdata(pdev);
  842. + unsigned long flags;
  843. +
  844. + spin_lock_irqsave(&info->lock, flags);
  845. + pr_info("%s: resuming\n", __func__);
  846. + info->suspending = 0;
  847. + spin_unlock_irqrestore(&info->lock, flags);
  848. + return 0;
  849. +}
  850. +
  851. +static int __init hw3d_probe(struct platform_device *pdev)
  852. +{
  853. + struct hw3d_info *info;
  854. + struct resource *res[HW3D_NUM_REGIONS];
  855. + int i;
  856. + int irq;
  857. + int ret = 0;
  858. +
  859. + res[HW3D_REGS] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  860. + "regs");
  861. + res[HW3D_SMI] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  862. + "smi");
  863. + res[HW3D_EBI] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  864. + "ebi");
  865. + irq = platform_get_irq(pdev, 0);
  866. + if (!res[HW3D_REGS] || !res[HW3D_SMI] || !res[HW3D_EBI] || irq < 0) {
  867. + pr_err("%s: incomplete resources\n", __func__);
  868. + return -EINVAL;
  869. + }
  870. +
  871. + info = kzalloc(sizeof(struct hw3d_info), GFP_KERNEL);
  872. + if (info == NULL) {
  873. + pr_err("%s: Cannot allocate memory for hw3d_info\n", __func__);
  874. + ret = -ENOMEM;
  875. + goto err_alloc;
  876. + }
  877. +
  878. + info->irq = irq;
  879. + wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND, "hw3d_revoke_lock");
  880. + spin_lock_init(&info->lock);
  881. + init_waitqueue_head(&info->irq_wq);
  882. + init_waitqueue_head(&info->revoke_wq);
  883. + init_waitqueue_head(&info->revoke_done_wq);
  884. + setup_timer(&info->revoke_timer,
  885. + (void (*)(unsigned long))do_force_revoke,
  886. + (unsigned long)info);
  887. +
  888. + platform_set_drvdata(pdev, info);
  889. +
  890. + info->grp_clk = clk_get(NULL, "grp_clk");
  891. + if (IS_ERR(info->grp_clk)) {
  892. + pr_err("%s: Cannot get grp_clk\n", __func__);
  893. + ret = PTR_ERR(info->grp_clk);
  894. + goto err_get_grp_clk;
  895. + }
  896. +
  897. + info->imem_clk = clk_get(NULL, "imem_clk");
  898. + if (IS_ERR(info->imem_clk)) {
  899. + pr_err("%s: Cannot get imem_clk\n", __func__);
  900. + ret = PTR_ERR(info->imem_clk);
  901. + goto err_get_imem_clk;
  902. + }
  903.  
  904. - grp_clk = clk_get(NULL, "grp_clk");
  905. - if (IS_ERR(grp_clk))
  906. - return PTR_ERR(grp_clk);
  907. -
  908. - imem_clk = clk_get(NULL, "imem_clk");
  909. - if (IS_ERR(imem_clk)) {
  910. - clk_put(grp_clk);
  911. - return PTR_ERR(imem_clk);
  912. - }
  913. - ret = request_irq(INT_GRAPHICS, hw3d_irq_handler,
  914. - IRQF_TRIGGER_HIGH, "hw3d", 0);
  915. + for (i = 0; i < HW3D_NUM_REGIONS; ++i) {
  916. + info->regions[i].pbase = res[i]->start;
  917. + info->regions[i].size = res[i]->end - res[i]->start + 1;
  918. + info->regions[i].vbase = ioremap(info->regions[i].pbase,
  919. + info->regions[i].size);
  920. + if (info->regions[i].vbase == 0) {
  921. + pr_err("%s: Cannot remap region %d\n", __func__, i);
  922. + goto err_remap_region;
  923. + }
  924. + }
  925. +
  926. + /* register the master/client devices */
  927. + info->master_dev.name = "msm_hw3dm";
  928. + info->master_dev.minor = MISC_DYNAMIC_MINOR;
  929. + info->master_dev.fops = &hw3d_fops;
  930. + info->master_dev.parent = &pdev->dev;
  931. + ret = misc_register(&info->master_dev);
  932. if (ret) {
  933. - clk_put(grp_clk);
  934. - clk_put(imem_clk);
  935. - return ret;
  936. + pr_err("%s: Cannot register master device node\n", __func__);
  937. + goto err_misc_reg_master;
  938. + }
  939. +
  940. + info->client_dev.name = "msm_hw3dc";
  941. + info->client_dev.minor = MISC_DYNAMIC_MINOR;
  942. + info->client_dev.fops = &hw3d_fops;
  943. + info->client_dev.parent = &pdev->dev;
  944. + ret = misc_register(&info->client_dev);
  945. + if (ret) {
  946. + pr_err("%s: Cannot register client device node\n", __func__);
  947. + goto err_misc_reg_client;
  948. }
  949. - hw3d_disable_interrupt();
  950. - hw3d_granted = 0;
  951.  
  952. - return pmem_setup(&pmem_data, hw3d_ioctl, hw3d_release);
  953. + info->early_suspend.suspend = hw3d_early_suspend;
  954. + info->early_suspend.resume = hw3d_late_resume;
  955. + info->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
  956. + register_early_suspend(&info->early_suspend);
  957. +
  958. + info->irq_en = 1;
  959. + ret = request_irq(info->irq, hw3d_irq_handler, IRQF_TRIGGER_HIGH,
  960. + "hw3d", info);
  961. + if (ret != 0) {
  962. + pr_err("%s: Cannot request irq\n", __func__);
  963. + goto err_req_irq;
  964. + }
  965. + hw3d_disable_interrupt(info);
  966. +
  967. + hw3d_info = info;
  968. +
  969. + return 0;
  970. +
  971. +err_req_irq:
  972. + unregister_early_suspend(&info->early_suspend);
  973. + misc_deregister(&info->client_dev);
  974. +err_misc_reg_client:
  975. + misc_deregister(&info->master_dev);
  976. +err_misc_reg_master:
  977. +err_remap_region:
  978. + for (i = 0; i < HW3D_NUM_REGIONS; ++i)
  979. + if (info->regions[i].vbase != 0)
  980. + iounmap(info->regions[i].vbase);
  981. + clk_put(info->imem_clk);
  982. +err_get_imem_clk:
  983. + clk_put(info->grp_clk);
  984. +err_get_grp_clk:
  985. + wake_lock_destroy(&info->wake_lock);
  986. + kfree(info);
  987. + platform_set_drvdata(pdev, NULL);
  988. +err_alloc:
  989. + hw3d_info = NULL;
  990. + return ret;
  991. +}
  992. +
  993. +static struct platform_driver msm_hw3d_driver = {
  994. + .probe = hw3d_probe,
  995. + .resume = hw3d_resume,
  996. + .driver = {
  997. + .name = "msm_hw3d",
  998. + .owner = THIS_MODULE,
  999. + },
  1000. +};
  1001. +
  1002. +static int __init hw3d_init(void)
  1003. +{
  1004. + return platform_driver_register(&msm_hw3d_driver);
  1005. }
  1006.  
  1007. device_initcall(hw3d_init);
  1008. diff --git a/arch/arm/mach-msm/hw3d_donut.c b/arch/arm/mach-msm/hw3d_donut.c
  1009. new file mode 100644
  1010. index 0000000..b013b57
  1011. --- /dev/null
  1012. +++ b/arch/arm/mach-msm/hw3d_donut.c
  1013. @@ -0,0 +1,213 @@
  1014. +/* arch/arm/mach-msm/hw3d.c
  1015. + *
  1016. + * Register/Interrupt access for userspace 3D library.
  1017. + *
  1018. + * Copyright (C) 2007 Google, Inc.
  1019. + * Author: Brian Swetland <swetland@google.com>
  1020. + *
  1021. + * This software is licensed under the terms of the GNU General Public
  1022. + * License version 2, as published by the Free Software Foundation, and
  1023. + * may be copied, distributed, and modified under those terms.
  1024. + *
  1025. + * This program is distributed in the hope that it will be useful,
  1026. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  1027. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  1028. + * GNU General Public License for more details.
  1029. + *
  1030. + */
  1031. +
  1032. +#include <linux/module.h>
  1033. +#include <linux/fs.h>
  1034. +#include <linux/miscdevice.h>
  1035. +#include <linux/uaccess.h>
  1036. +#include <linux/poll.h>
  1037. +#include <linux/time.h>
  1038. +#include <linux/irq.h>
  1039. +#include <linux/interrupt.h>
  1040. +#include <linux/wait.h>
  1041. +#include <linux/mm.h>
  1042. +#include <linux/clk.h>
  1043. +#include <linux/android_pmem.h>
  1044. +#include <mach/board.h>
  1045. +
  1046. +static DEFINE_SPINLOCK(hw3d_lock);
  1047. +static DECLARE_WAIT_QUEUE_HEAD(hw3d_queue);
  1048. +static int hw3d_pending;
  1049. +static int hw3d_disabled;
  1050. +
  1051. +static struct clk *grp_clk;
  1052. +static struct clk *imem_clk;
  1053. +DECLARE_MUTEX(hw3d_sem);
  1054. +static unsigned int hw3d_granted;
  1055. +static struct file *hw3d_granted_file;
  1056. +
  1057. +static irqreturn_t hw3d_irq_handler(int irq, void *data)
  1058. +{
  1059. + unsigned long flags;
  1060. +
  1061. + spin_lock_irqsave(&hw3d_lock, flags);
  1062. + if (!hw3d_disabled) {
  1063. + disable_irq(INT_GRAPHICS);
  1064. + hw3d_disabled = 1;
  1065. + }
  1066. + hw3d_pending = 1;
  1067. + spin_unlock_irqrestore(&hw3d_lock, flags);
  1068. +
  1069. + wake_up(&hw3d_queue);
  1070. +
  1071. + return IRQ_HANDLED;
  1072. +}
  1073. +
  1074. +static void hw3d_disable_interrupt(void)
  1075. +{
  1076. + unsigned long flags;
  1077. + spin_lock_irqsave(&hw3d_lock, flags);
  1078. + if (!hw3d_disabled) {
  1079. + disable_irq(INT_GRAPHICS);
  1080. + hw3d_disabled = 1;
  1081. + }
  1082. + spin_unlock_irqrestore(&hw3d_lock, flags);
  1083. +}
  1084. +
  1085. +static long hw3d_wait_for_interrupt(void)
  1086. +{
  1087. + unsigned long flags;
  1088. + int ret;
  1089. +
  1090. + for (;;) {
  1091. + spin_lock_irqsave(&hw3d_lock, flags);
  1092. + if (hw3d_pending) {
  1093. + hw3d_pending = 0;
  1094. + spin_unlock_irqrestore(&hw3d_lock, flags);
  1095. + return 0;
  1096. + }
  1097. + if (hw3d_disabled) {
  1098. + hw3d_disabled = 0;
  1099. + enable_irq(INT_GRAPHICS);
  1100. + }
  1101. + spin_unlock_irqrestore(&hw3d_lock, flags);
  1102. +
  1103. + ret = wait_event_interruptible(hw3d_queue, hw3d_pending);
  1104. + if (ret < 0) {
  1105. + hw3d_disable_interrupt();
  1106. + return ret;
  1107. + }
  1108. + }
  1109. +
  1110. + return 0;
  1111. +}
  1112. +
  1113. +#define HW3D_REGS_LEN 0x100000
  1114. +
  1115. +static long hw3d_revoke_gpu(struct file *file)
  1116. +{
  1117. + int ret = 0;
  1118. + unsigned long user_start, user_len;
  1119. + struct pmem_region region = {.offset = 0x0, .len = HW3D_REGS_LEN};
  1120. +
  1121. + down(&hw3d_sem);
  1122. + if (!hw3d_granted) {
  1123. + goto end;
  1124. + }
  1125. + /* revoke the pmem region completely */
  1126. + if ((ret = pmem_remap(&region, file, PMEM_UNMAP)))
  1127. + goto end;
  1128. + get_pmem_user_addr(file, &user_start, &user_len);
  1129. + /* reset the gpu */
  1130. + clk_disable(grp_clk);
  1131. + clk_disable(imem_clk);
  1132. + hw3d_granted = 0;
  1133. +end:
  1134. + up(&hw3d_sem);
  1135. + return ret;
  1136. +}
  1137. +
  1138. +static long hw3d_grant_gpu(struct file *file)
  1139. +{
  1140. + int ret = 0;
  1141. + struct pmem_region region = {.offset = 0x0, .len = HW3D_REGS_LEN};
  1142. +
  1143. + down(&hw3d_sem);
  1144. + if (hw3d_granted) {
  1145. + ret = -1;
  1146. + goto end;
  1147. + }
  1148. + /* map the registers */
  1149. + if ((ret = pmem_remap(&region, file, PMEM_MAP)))
  1150. + goto end;
  1151. + clk_enable(grp_clk);
  1152. + clk_enable(imem_clk);
  1153. + hw3d_granted = 1;
  1154. + hw3d_granted_file = file;
  1155. +end:
  1156. + up(&hw3d_sem);
  1157. + return ret;
  1158. +}
  1159. +
  1160. +static int hw3d_release(struct inode *inode, struct file *file)
  1161. +{
  1162. + down(&hw3d_sem);
  1163. + /* if the gpu is in use, and its inuse by the file that was released */
  1164. + if (hw3d_granted && (file == hw3d_granted_file)) {
  1165. + clk_disable(grp_clk);
  1166. + clk_disable(imem_clk);
  1167. + hw3d_granted = 0;
  1168. + hw3d_granted_file = NULL;
  1169. + }
  1170. + up(&hw3d_sem);
  1171. + return 0;
  1172. +}
  1173. +
  1174. +static long hw3d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1175. +{
  1176. + switch (cmd) {
  1177. + case HW3D_REVOKE_GPU:
  1178. + return hw3d_revoke_gpu(file);
  1179. + break;
  1180. + case HW3D_GRANT_GPU:
  1181. + return hw3d_grant_gpu(file);
  1182. + break;
  1183. + case HW3D_WAIT_FOR_INTERRUPT:
  1184. + return hw3d_wait_for_interrupt();
  1185. + break;
  1186. + default:
  1187. + return -EINVAL;
  1188. + }
  1189. + return 0;
  1190. +}
  1191. +
  1192. +static struct android_pmem_platform_data pmem_data = {
  1193. + .name = "hw3d",
  1194. + .start = 0xA0000000,
  1195. + .size = 0x100000,
  1196. + .no_allocator = 1,
  1197. + .cached = 0,
  1198. +};
  1199. +
  1200. +static int __init hw3d_init(void)
  1201. +{
  1202. + int ret;
  1203. +
  1204. + grp_clk = clk_get(NULL, "grp_clk");
  1205. + if (IS_ERR(grp_clk))
  1206. + return PTR_ERR(grp_clk);
  1207. +
  1208. + imem_clk = clk_get(NULL, "imem_clk");
  1209. + if (IS_ERR(imem_clk)) {
  1210. + clk_put(grp_clk);
  1211. + return PTR_ERR(imem_clk);
  1212. + }
  1213. + ret = request_irq(INT_GRAPHICS, hw3d_irq_handler,
  1214. + IRQF_TRIGGER_HIGH, "hw3d", 0);
  1215. + if (ret) {
  1216. + clk_put(grp_clk);
  1217. + clk_put(imem_clk);
  1218. + return ret;
  1219. + }
  1220. + hw3d_disable_interrupt();
  1221. + hw3d_granted = 0;
  1222. +
  1223. + return pmem_setup(&pmem_data, hw3d_ioctl, hw3d_release);
  1224. +}
  1225. +
  1226. +device_initcall(hw3d_init);
  1227. diff --git a/arch/arm/mach-msm/pmem.c b/arch/arm/mach-msm/pmem.c
  1228. index bdc3989..dd5d828 100644
  1229. --- a/arch/arm/mach-msm/pmem.c
  1230. +++ b/arch/arm/mach-msm/pmem.c
  1231. @@ -15,6 +15,7 @@
  1232.  
  1233. #include <linux/kernel.h>
  1234. #include <linux/init.h>
  1235. +#include <linux/fs.h>
  1236. #include <linux/platform_device.h>
  1237. #include <linux/android_pmem.h>
  1238. #include <mach/board_htc.h>
  1239. @@ -104,6 +105,38 @@ static struct platform_device ram_console_device = {
  1240. .resource = ram_console_resource,
  1241. };
  1242.  
  1243. +/* Eclair hw3d */
  1244. +static struct resource resources_hw3d[] = {
  1245. + {
  1246. + .start = 0xA0000000,
  1247. + .end = 0xA00fffff,
  1248. + .flags = IORESOURCE_MEM,
  1249. + .name = "regs",
  1250. + },
  1251. + {
  1252. + .flags = IORESOURCE_MEM,
  1253. + .name = "smi",
  1254. + },
  1255. + {
  1256. + .flags = IORESOURCE_MEM,
  1257. + .name = "ebi",
  1258. + },
  1259. + {
  1260. + .start = INT_GRAPHICS,
  1261. + .end = INT_GRAPHICS,
  1262. + .flags = IORESOURCE_IRQ,
  1263. + .name = "gfx",
  1264. + },
  1265. +};
  1266. +
  1267. +static struct platform_device hw3d_device = {
  1268. + .name = "msm_hw3d",
  1269. + .id = 0,
  1270. + .num_resources = ARRAY_SIZE(resources_hw3d),
  1271. + .resource = resources_hw3d,
  1272. +};
  1273. +
  1274. +
  1275. void __init msm_add_mem_devices(struct msm_pmem_setting *setting)
  1276. {
  1277. if (setting->pmem_size) {
  1278. @@ -130,6 +163,18 @@ void __init msm_add_mem_devices(struct msm_pmem_setting *setting)
  1279. platform_device_register(&pmem_gpu1_device);
  1280. }
  1281.  
  1282. + /* Eclair hw3d */
  1283. + if (setting->pmem_gpu0_size || setting->pmem_gpu1_size) {
  1284. + struct resource *res;
  1285. + res=platform_get_resource_byname(&hw3d_device, IORESOURCE_MEM, "smi");
  1286. + res->start=setting->pmem_gpu0_start;
  1287. + res->end=setting->pmem_gpu0_start+setting->pmem_gpu0_size-1;
  1288. + res=platform_get_resource_byname(&hw3d_device, IORESOURCE_MEM, "ebi");
  1289. + res->start=setting->pmem_gpu1_start;
  1290. + res->end=setting->pmem_gpu1_start+setting->pmem_gpu1_size-1;
  1291. + platform_device_register(&hw3d_device);
  1292. + }
  1293. +
  1294. if (setting->pmem_camera_size) {
  1295. pmem_camera_pdata.start = setting->pmem_camera_start;
  1296. pmem_camera_pdata.size = setting->pmem_camera_size;
  1297. @@ -172,7 +217,7 @@ struct resource resources_msm_fb[]={
  1298. pmem_setting.name## _size = size;
  1299.  
  1300.  
  1301. -static void __init msm_pmem_init() {
  1302. +static int __init msm_pmem_init() {
  1303. switch(__machine_arch_type) {
  1304. case MACH_TYPE_HTCDIAMOND:
  1305. //SMI 64 + EBI 128
  1306. @@ -226,11 +271,13 @@ static void __init msm_pmem_init() {
  1307. }
  1308. //GPU0 must be in SMI1
  1309. pmem_setting.pmem_gpu0_start=MSM_SMI_BASE+0x100000;//1MB for wince SPL
  1310. - pmem_setting.pmem_gpu0_size=0x800000;
  1311. + pmem_setting.pmem_gpu0_size=0x700000;
  1312. resources_msm_fb[0].start=pmem_setting.fb_start;
  1313. resources_msm_fb[0].end=pmem_setting.fb_start+pmem_setting.fb_size;
  1314. resources_msm_fb[0].flags=IORESOURCE_MEM;
  1315. msm_add_mem_devices(&pmem_setting);
  1316. +
  1317. + return 0;
  1318. }
  1319. module_init(msm_pmem_init);
  1320.  
  1321. --
  1322. 1.6.1
  1323.  
  1324.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement