tdmb.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182
  1. /*
  2. *
  3. * drivers/media/tdmb/tdmb.c
  4. *
  5. * tdmb driver
  6. *
  7. * Copyright (C) (2011, Samsung Electronics)
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation version 2.
  12. *
  13. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  14. * kind, whether express or implied; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/fs.h>
  21. #include <linux/errno.h>
  22. #include <linux/device.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/string.h>
  27. #include <linux/types.h>
  28. #include <linux/fcntl.h>
  29. /* for delay(sleep) */
  30. #include <linux/delay.h>
  31. /* for mutex */
  32. #include <linux/mutex.h>
  33. /*using copy to user */
  34. #include <linux/uaccess.h>
  35. #include <linux/clk.h>
  36. #include <linux/mm.h>
  37. #include <linux/slab.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/irq.h>
  40. #include <asm/mach/irq.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/vmalloc.h>
  43. #include <linux/io.h>
  44. #include <linux/gpio.h>
  45. #include <linux/wakelock.h>
  46. #include <linux/input.h>
  47. #include <linux/pm_qos.h>
  48. #include <linux/regulator/consumer.h>
  49. #include <linux/of_gpio.h>
  50. #if defined(CONFIG_TDMB_QUALCOMM)
  51. #include <mach/gpiomux.h>
  52. #include <mach/cpuidle.h>
  53. #endif
  54. #if defined(CONFIG_TDMB_EXYNOS)
  55. #include <plat/gpio-cfg.h>
  56. #endif
  57. #if defined(CONFIG_TDMB_ANT_DET)
  58. static struct wake_lock tdmb_ant_wlock;
  59. #endif
  60. #define TDMB_WAKE_LOCK_ENABLE
  61. #ifdef TDMB_WAKE_LOCK_ENABLE
  62. #if defined(CONFIG_TDMB_QUALCOMM)
  63. static struct pm_qos_request tdmb_pm_qos_req;
  64. #endif
  65. static struct wake_lock tdmb_wlock;
  66. #endif
  67. #include "tdmb.h"
  68. #define TDMB_PRE_MALLOC 1
  69. #ifndef VM_RESERVED /* for kernel 3.10 */
  70. #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
  71. #endif
  72. static struct class *tdmb_class;
  73. /* ring buffer */
  74. char *ts_ring;
  75. unsigned int *tdmb_ts_head;
  76. unsigned int *tdmb_ts_tail;
  77. char *tdmb_ts_buffer;
  78. unsigned int tdmb_ts_size;
  79. unsigned int *cmd_head;
  80. unsigned int *cmd_tail;
  81. static char *cmd_buffer;
  82. static unsigned int cmd_size;
  83. static unsigned long tdmb_last_ch;
  84. static struct tdmb_drv_func *tdmbdrv_func;
  85. static struct tdmb_dt_platform_data *dt_pdata;
  86. static struct device *dmb_device;
  87. static bool tdmb_pwr_on;
  88. #ifdef CONFIG_TDMB_VREG_SUPPORT
  89. static int tdmb_vreg_init(struct device *dev)
  90. {
  91. int rc = 0;
  92. DPRINTK("vdd_name : %s", dt_pdata->tdmb_vreg_name);
  93. dt_pdata->tdmb_vreg = regulator_get(dev, dt_pdata->tdmb_vreg_name);
  94. if (IS_ERR(dt_pdata->tdmb_vreg)) {
  95. DPRINTK("Failed to get tdmb_vreg\n");
  96. rc = -ENXIO;
  97. return rc;
  98. }
  99. rc = regulator_set_voltage(dt_pdata->tdmb_vreg, 1800000, 1800000);
  100. if (rc) {
  101. DPRINTK("regulator set_vtg failed rc=%d\n", rc);
  102. regulator_put(dt_pdata->tdmb_vreg);
  103. return rc;
  104. }
  105. return rc;
  106. }
  107. static void tdmb_vreg_onoff(bool onoff)
  108. {
  109. int rc;
  110. if (onoff) {
  111. if (!regulator_is_enabled(dt_pdata->tdmb_vreg)) {
  112. rc = regulator_enable(dt_pdata->tdmb_vreg);
  113. if (rc)
  114. DPRINTK("tdmb_vreg enable failed rc=%d\n", rc);
  115. }
  116. } else {
  117. if (regulator_is_enabled(dt_pdata->tdmb_vreg)) {
  118. rc = regulator_disable(dt_pdata->tdmb_vreg);
  119. if (rc)
  120. DPRINTK("tdmb_vreg disable failed rc=%d\n", rc);
  121. }
  122. }
  123. DPRINTK("%s : (%d)\n", __func__, onoff);
  124. }
  125. #endif
  126. static void tdmb_set_config_poweron(void)
  127. {
  128. #if defined(CONFIG_TDMB_QUALCOMM)
  129. gpio_tlmm_config(GPIO_CFG(dt_pdata->tdmb_en, GPIOMUX_FUNC_GPIO,
  130. GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
  131. GPIO_CFG_ENABLE);
  132. if (dt_pdata->tdmb_use_rst) {
  133. gpio_tlmm_config(GPIO_CFG(dt_pdata->tdmb_rst, GPIOMUX_FUNC_GPIO,
  134. GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
  135. GPIO_CFG_ENABLE);
  136. }
  137. if (dt_pdata->tdmb_use_irq) {
  138. gpio_tlmm_config(GPIO_CFG(dt_pdata->tdmb_irq, GPIOMUX_FUNC_GPIO,
  139. GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
  140. GPIO_CFG_ENABLE);
  141. }
  142. #elif defined(CONFIG_TDMB_SLSI)
  143. struct pinctrl *pinctrl;
  144. pinctrl = devm_pinctrl_get_select(dmb_device, "tdmb_int_on");
  145. if (IS_ERR(pinctrl))
  146. DPRINTK("%s: no config tdmb_int_on\n", __func__);
  147. #else
  148. #error : select AP
  149. #endif
  150. }
  151. static void tdmb_set_config_poweroff(void)
  152. {
  153. #if defined(CONFIG_TDMB_QUALCOMM)
  154. gpio_tlmm_config(GPIO_CFG(dt_pdata->tdmb_en, GPIOMUX_FUNC_GPIO,
  155. GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
  156. GPIO_CFG_ENABLE);
  157. if (dt_pdata->tdmb_use_rst) {
  158. gpio_tlmm_config(GPIO_CFG(dt_pdata->tdmb_rst, GPIOMUX_FUNC_GPIO,
  159. GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
  160. GPIO_CFG_ENABLE);
  161. }
  162. if (dt_pdata->tdmb_use_irq) {
  163. gpio_tlmm_config(GPIO_CFG(dt_pdata->tdmb_irq, GPIOMUX_FUNC_GPIO,
  164. GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
  165. GPIO_CFG_ENABLE);
  166. }
  167. #elif defined(CONFIG_TDMB_SLSI)
  168. struct pinctrl *pinctrl;
  169. pinctrl = devm_pinctrl_get_select(dmb_device, "tdmb_int_off");
  170. if (IS_ERR(pinctrl))
  171. DPRINTK("%s: no config tdmb_int_off\n", __func__);
  172. #else
  173. #error : select AP
  174. #endif
  175. }
  176. static void tdmb_gpio_on(void)
  177. {
  178. DPRINTK("tdmb_gpio_on\n");
  179. #ifdef CONFIG_TDMB_VREG_SUPPORT
  180. tdmb_vreg_onoff(true);
  181. #endif
  182. tdmb_set_config_poweron();
  183. gpio_set_value(dt_pdata->tdmb_en, 0);
  184. usleep_range(1000, 1000);
  185. gpio_set_value(dt_pdata->tdmb_en, 1);
  186. usleep_range(25000, 25000);
  187. if (dt_pdata->tdmb_use_rst) {
  188. gpio_set_value(dt_pdata->tdmb_rst, 0);
  189. usleep_range(2000, 2000);
  190. gpio_set_value(dt_pdata->tdmb_rst, 1);
  191. usleep_range(10000, 10000);
  192. }
  193. }
  194. static void tdmb_gpio_off(void)
  195. {
  196. DPRINTK("tdmb_gpio_off\n");
  197. #ifdef CONFIG_TDMB_VREG_SUPPORT
  198. tdmb_vreg_onoff(false);
  199. #endif
  200. tdmb_set_config_poweroff();
  201. gpio_set_value(dt_pdata->tdmb_en, 0);
  202. usleep_range(1000, 1000);
  203. if (dt_pdata->tdmb_use_rst) {
  204. gpio_set_value(dt_pdata->tdmb_rst, 0);
  205. }
  206. }
  207. static bool tdmb_power_on(void)
  208. {
  209. if (tdmb_create_databuffer(tdmbdrv_func->get_int_size()) == false) {
  210. DPRINTK("tdmb_create_databuffer fail\n");
  211. goto create_databuffer_fail;
  212. }
  213. if (tdmb_create_workqueue() == false) {
  214. DPRINTK("tdmb_create_workqueue fail\n");
  215. goto create_workqueue_fail;
  216. }
  217. if (tdmbdrv_func->power_on() == false) {
  218. DPRINTK("power_on fail\n");
  219. goto power_on_fail;
  220. }
  221. DPRINTK("power_on success\n");
  222. #ifdef TDMB_WAKE_LOCK_ENABLE
  223. #if defined(CONFIG_TDMB_QUALCOMM)
  224. pm_qos_update_request(&tdmb_pm_qos_req,
  225. msm_cpuidle_get_deep_idle_latency());
  226. #endif
  227. wake_lock(&tdmb_wlock);
  228. #endif
  229. tdmb_pwr_on = true;
  230. return true;
  231. power_on_fail:
  232. tdmb_destroy_workqueue();
  233. create_workqueue_fail:
  234. tdmb_destroy_databuffer();
  235. create_databuffer_fail:
  236. tdmb_pwr_on = false;
  237. return false;
  238. }
  239. static DEFINE_MUTEX(tdmb_lock);
  240. static bool tdmb_power_off(void)
  241. {
  242. DPRINTK("%s : tdmb_pwr_on(%d)\n", __func__, tdmb_pwr_on);
  243. mutex_lock(&tdmb_lock);
  244. if (tdmb_pwr_on) {
  245. tdmbdrv_func->power_off();
  246. tdmb_destroy_workqueue();
  247. tdmb_destroy_databuffer();
  248. #ifdef TDMB_WAKE_LOCK_ENABLE
  249. wake_unlock(&tdmb_wlock);
  250. #if defined(CONFIG_TDMB_QUALCOMM)
  251. pm_qos_update_request(&tdmb_pm_qos_req, PM_QOS_DEFAULT_VALUE);
  252. #endif
  253. #endif
  254. tdmb_pwr_on = false;
  255. }
  256. tdmb_last_ch = 0;
  257. mutex_unlock(&tdmb_lock);
  258. return true;
  259. }
  260. static int tdmb_open(struct inode *inode, struct file *filp)
  261. {
  262. DPRINTK("tdmb_open!\n");
  263. return 0;
  264. }
  265. static ssize_t
  266. tdmb_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
  267. {
  268. DPRINTK("tdmb_read\n");
  269. return 0;
  270. }
  271. static int tdmb_release(struct inode *inode, struct file *filp)
  272. {
  273. DPRINTK("tdmb_release\n");
  274. tdmb_power_off();
  275. #if TDMB_PRE_MALLOC
  276. tdmb_ts_size = 0;
  277. cmd_size = 0;
  278. #else
  279. if (ts_ring != 0) {
  280. kfree(ts_ring);
  281. ts_ring = 0;
  282. tdmb_ts_size = 0;
  283. cmd_size = 0;
  284. }
  285. #endif
  286. return 0;
  287. }
  288. #if TDMB_PRE_MALLOC
  289. static void tdmb_make_ring_buffer(void)
  290. {
  291. size_t size = TDMB_RING_BUFFER_MAPPING_SIZE;
  292. /* size should aligned in PAGE_SIZE */
  293. if (size % PAGE_SIZE) /* klaatu hard coding */
  294. size = size + size % PAGE_SIZE;
  295. ts_ring = kmalloc(size, GFP_KERNEL);
  296. DPRINTK("RING Buff Create OK\n");
  297. }
  298. #endif
  299. static int tdmb_mmap(struct file *filp, struct vm_area_struct *vma)
  300. {
  301. size_t size;
  302. unsigned long pfn;
  303. DPRINTK("%s\n", __func__);
  304. vma->vm_flags |= VM_RESERVED;
  305. size = vma->vm_end - vma->vm_start;
  306. DPRINTK("size given : %x\n", size);
  307. #if TDMB_PRE_MALLOC
  308. size = TDMB_RING_BUFFER_MAPPING_SIZE;
  309. if (!ts_ring) {
  310. DPRINTK("RING Buff ReAlloc(%d)!!\n", size);
  311. #endif
  312. /* size should aligned in PAGE_SIZE */
  313. if (size % PAGE_SIZE) /* klaatu hard coding */
  314. size = size + size % PAGE_SIZE;
  315. ts_ring = kmalloc(size, GFP_KERNEL);
  316. #if TDMB_PRE_MALLOC
  317. }
  318. #endif
  319. pfn = virt_to_phys(ts_ring) >> PAGE_SHIFT;
  320. if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot))
  321. return -EAGAIN;
  322. DPRINTK("succeeded\n");
  323. tdmb_ts_head = (unsigned int *)ts_ring;
  324. tdmb_ts_tail = (unsigned int *)(ts_ring + 4);
  325. tdmb_ts_buffer = ts_ring + 8;
  326. *tdmb_ts_head = 0;
  327. *tdmb_ts_tail = 0;
  328. tdmb_ts_size = size-8; /* klaatu hard coding */
  329. tdmb_ts_size
  330. = ((tdmb_ts_size / DMB_TS_SIZE) * DMB_TS_SIZE) - (30 * DMB_TS_SIZE);
  331. DPRINTK("head : %x, tail : %x, buffer : %x, size : %x\n",
  332. (unsigned int)tdmb_ts_head, (unsigned int)tdmb_ts_tail,
  333. (unsigned int)tdmb_ts_buffer, tdmb_ts_size);
  334. cmd_buffer = tdmb_ts_buffer + tdmb_ts_size + 8;
  335. cmd_head = (unsigned int *)(cmd_buffer - 8);
  336. cmd_tail = (unsigned int *)(cmd_buffer - 4);
  337. *cmd_head = 0;
  338. *cmd_tail = 0;
  339. cmd_size = 30 * DMB_TS_SIZE - 8; /* klaatu hard coding */
  340. DPRINTK("cmd head : %x, tail : %x, buffer : %x, size : %x\n",
  341. (unsigned int)cmd_head, (unsigned int)cmd_tail,
  342. (unsigned int)cmd_buffer, cmd_size);
  343. return 0;
  344. }
  345. static int _tdmb_cmd_update(
  346. unsigned char *cmd_header,
  347. unsigned char cmd_header_size,
  348. unsigned char *data,
  349. unsigned short data_size)
  350. {
  351. unsigned int size;
  352. unsigned int head;
  353. unsigned int tail;
  354. unsigned int dist;
  355. unsigned int temp_size;
  356. unsigned int data_size_tmp;
  357. if (data_size > cmd_size) {
  358. DPRINTK(" Error - cmd size too large\n");
  359. return false;
  360. }
  361. head = *cmd_head;
  362. tail = *cmd_tail;
  363. size = cmd_size;
  364. data_size_tmp = data_size + cmd_header_size;
  365. if (head >= tail)
  366. dist = head-tail;
  367. else
  368. dist = size + head-tail;
  369. if (size - dist <= data_size_tmp) {
  370. DPRINTK("too small space is left in Cmd Ring Buffer!!\n");
  371. return false;
  372. }
  373. DPRINTK("%x head %d tail %d\n", (unsigned int)cmd_buffer, head, tail);
  374. if (head+data_size_tmp <= size) {
  375. memcpy((cmd_buffer + head),
  376. (char *)cmd_header, cmd_header_size);
  377. memcpy((cmd_buffer + head + cmd_header_size),
  378. (char *)data, data_size);
  379. head += data_size_tmp;
  380. if (head == size)
  381. head = 0;
  382. } else {
  383. temp_size = size - head;
  384. if (temp_size < cmd_header_size) {
  385. memcpy((cmd_buffer+head),
  386. (char *)cmd_header, temp_size);
  387. memcpy((cmd_buffer),
  388. (char *)cmd_header+temp_size,
  389. (cmd_header_size - temp_size));
  390. head = cmd_header_size - temp_size;
  391. } else {
  392. memcpy((cmd_buffer+head),
  393. (char *)cmd_header, cmd_header_size);
  394. head += cmd_header_size;
  395. if (head == size)
  396. head = 0;
  397. }
  398. temp_size = size - head;
  399. if (temp_size < data_size) {
  400. memcpy((cmd_buffer+head),
  401. (char *)data, temp_size);
  402. memcpy((cmd_buffer),
  403. (char *)data+temp_size,
  404. (data_size - temp_size));
  405. head = data_size - temp_size;
  406. } else {
  407. memcpy((cmd_buffer+head),
  408. (char *)data, data_size);
  409. head += data_size;
  410. if (head == size)
  411. head = 0;
  412. }
  413. }
  414. *cmd_head = head;
  415. return true;
  416. }
  417. unsigned char tdmb_make_result(
  418. unsigned char cmd,
  419. unsigned short data_len,
  420. unsigned char *data)
  421. {
  422. unsigned char cmd_header[4] = {0,};
  423. cmd_header[0] = TDMB_CMD_START_FLAG;
  424. cmd_header[1] = cmd;
  425. cmd_header[2] = (data_len>>8)&0xff;
  426. cmd_header[3] = data_len&0xff;
  427. _tdmb_cmd_update(cmd_header, 4 , data, data_len);
  428. return true;
  429. }
  430. unsigned long tdmb_get_chinfo(void)
  431. {
  432. return tdmb_last_ch;
  433. }
  434. void tdmb_pull_data(struct work_struct *work)
  435. {
  436. tdmbdrv_func->pull_data();
  437. }
  438. bool tdmb_control_irq(bool set)
  439. {
  440. bool ret = true;
  441. int irq_ret;
  442. if (!dt_pdata->tdmb_use_irq)
  443. return false;
  444. if (set) {
  445. irq_set_irq_type(gpio_to_irq(dt_pdata->tdmb_irq), IRQ_TYPE_EDGE_FALLING);
  446. irq_ret = request_irq(gpio_to_irq(dt_pdata->tdmb_irq)
  447. , tdmb_irq_handler
  448. , IRQF_DISABLED
  449. , TDMB_DEV_NAME
  450. , NULL);
  451. if (irq_ret < 0) {
  452. DPRINTK("request_irq failed !! \r\n");
  453. ret = false;
  454. }
  455. } else {
  456. free_irq(gpio_to_irq(dt_pdata->tdmb_irq), NULL);
  457. }
  458. return ret;
  459. }
  460. void tdmb_control_gpio(bool poweron)
  461. {
  462. if (poweron)
  463. tdmb_gpio_on();
  464. else
  465. tdmb_gpio_off();
  466. }
  467. static long tdmb_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  468. {
  469. int ret = 0;
  470. unsigned long fig_freq = 0;
  471. struct ensemble_info_type *ensemble_info;
  472. struct tdmb_dm dm_buff;
  473. DPRINTK("call tdmb_ioctl : 0x%x\n", cmd);
  474. if (_IOC_TYPE(cmd) != IOCTL_MAGIC) {
  475. DPRINTK("tdmb_ioctl : _IOC_TYPE error\n");
  476. return -EINVAL;
  477. }
  478. if (_IOC_NR(cmd) >= IOCTL_MAXNR) {
  479. DPRINTK("tdmb_ioctl : _IOC_NR(cmd) 0x%x\n", _IOC_NR(cmd));
  480. return -EINVAL;
  481. }
  482. switch (cmd) {
  483. case IOCTL_TDMB_GET_DATA_BUFFSIZE:
  484. DPRINTK("IOCTL_TDMB_GET_DATA_BUFFSIZE %d\n", tdmb_ts_size);
  485. ret = copy_to_user((unsigned int *)arg,
  486. &tdmb_ts_size, sizeof(unsigned int));
  487. break;
  488. case IOCTL_TDMB_GET_CMD_BUFFSIZE:
  489. DPRINTK("IOCTL_TDMB_GET_CMD_BUFFSIZE %d\n", cmd_size);
  490. ret = copy_to_user((unsigned int *)arg,
  491. &cmd_size, sizeof(unsigned int));
  492. break;
  493. case IOCTL_TDMB_POWER_ON:
  494. DPRINTK("IOCTL_TDMB_POWER_ON\n");
  495. ret = tdmb_power_on();
  496. break;
  497. case IOCTL_TDMB_POWER_OFF:
  498. DPRINTK("IOCTL_TDMB_POWER_OFF\n");
  499. ret = tdmb_power_off();
  500. break;
  501. case IOCTL_TDMB_SCAN_FREQ_ASYNC:
  502. DPRINTK("IOCTL_TDMB_SCAN_FREQ_ASYNC\n");
  503. fig_freq = arg;
  504. ensemble_info = vmalloc(sizeof(struct ensemble_info_type));
  505. memset((char *)ensemble_info, 0x00\
  506. , sizeof(struct ensemble_info_type));
  507. ret = tdmbdrv_func->scan_ch(ensemble_info, fig_freq);
  508. if (ret == true)
  509. tdmb_make_result(DMB_FIC_RESULT_DONE,
  510. sizeof(struct ensemble_info_type),
  511. (unsigned char *)ensemble_info);
  512. else
  513. tdmb_make_result(DMB_FIC_RESULT_FAIL,
  514. sizeof(unsigned long),
  515. (unsigned char *)&fig_freq);
  516. vfree(ensemble_info);
  517. tdmb_last_ch = 0;
  518. break;
  519. case IOCTL_TDMB_SCAN_FREQ_SYNC:
  520. fig_freq = ((struct ensemble_info_type *)arg)->ensem_freq;
  521. DPRINTK("IOCTL_TDMB_SCAN_FREQ_SYNC %ld\n", fig_freq);
  522. ensemble_info = vmalloc(sizeof(struct ensemble_info_type));
  523. memset((char *)ensemble_info, 0x00\
  524. , sizeof(struct ensemble_info_type));
  525. ret = tdmbdrv_func->scan_ch(ensemble_info, fig_freq);
  526. if (ret == true) {
  527. if (copy_to_user((struct ensemble_info_type *)arg,
  528. ensemble_info,
  529. sizeof(struct ensemble_info_type))
  530. )
  531. DPRINTK("cmd(%x):copy_to_user failed\n", cmd);
  532. }
  533. vfree(ensemble_info);
  534. tdmb_last_ch = 0;
  535. break;
  536. case IOCTL_TDMB_SCANSTOP:
  537. DPRINTK("IOCTL_TDMB_SCANSTOP\n");
  538. ret = false;
  539. break;
  540. case IOCTL_TDMB_ASSIGN_CH:
  541. DPRINTK("IOCTL_TDMB_ASSIGN_CH %ld\n", arg);
  542. tdmb_init_data();
  543. ret = tdmbdrv_func->set_ch(arg, (arg % 1000), false);
  544. if (ret == true)
  545. tdmb_last_ch = arg;
  546. else
  547. tdmb_last_ch = 0;
  548. break;
  549. case IOCTL_TDMB_ASSIGN_CH_TEST:
  550. DPRINTK("IOCTL_TDMB_ASSIGN_CH_TEST %ld\n", arg);
  551. tdmb_init_data();
  552. ret = tdmbdrv_func->set_ch(arg, (arg % 1000), true);
  553. if (ret == true)
  554. tdmb_last_ch = arg;
  555. else
  556. tdmb_last_ch = 0;
  557. break;
  558. case IOCTL_TDMB_GET_DM:
  559. tdmbdrv_func->get_dm(&dm_buff);
  560. if (copy_to_user((struct tdmb_dm *)arg\
  561. , &dm_buff, sizeof(struct tdmb_dm)))
  562. DPRINTK("IOCTL_TDMB_GET_DM : copy_to_user failed\n");
  563. ret = true;
  564. DPRINTK("rssi %d, ber %d, ANT %d\n",
  565. dm_buff.rssi, dm_buff.ber, dm_buff.antenna);
  566. break;
  567. case IOCTL_TDMB_SET_AUTOSTART:
  568. DPRINTK("IOCTL_TDMB_SET_AUTOSTART : %ld\n",arg);
  569. #if defined(CONFIG_TDMB_ANT_DET)
  570. tdmb_ant_det_irq_set(arg);
  571. #endif
  572. break;
  573. }
  574. return ret;
  575. }
  576. static const struct file_operations tdmb_ctl_fops = {
  577. .owner = THIS_MODULE,
  578. .open = tdmb_open,
  579. .read = tdmb_read,
  580. .unlocked_ioctl = tdmb_ioctl,
  581. .mmap = tdmb_mmap,
  582. .release = tdmb_release,
  583. .llseek = no_llseek,
  584. };
  585. static struct tdmb_drv_func *tdmb_get_drv_func(void)
  586. {
  587. struct tdmb_drv_func * (*func)(void);
  588. #if defined(CONFIG_TDMB_FC8050)
  589. func = fc8050_drv_func;
  590. #elif defined(CONFIG_TDMB_FC8080)
  591. func = fc8080_drv_func;
  592. #elif defined(CONFIG_TDMB_MTV318)
  593. func = mtv318_drv_func;
  594. #elif defined(CONFIG_TDMB_MTV319)
  595. func = mtv319_drv_func;
  596. #elif defined(CONFIG_TDMB_TCC3170)
  597. func = tcc3170_drv_func;
  598. #else
  599. #error what???
  600. #endif
  601. return func();
  602. }
  603. #if defined(CONFIG_TDMB_ANT_DET)
  604. enum {
  605. TDMB_ANT_OPEN = 0,
  606. TDMB_ANT_CLOSE,
  607. TDMB_ANT_UNKNOWN,
  608. };
  609. enum {
  610. TDMB_ANT_DET_LOW = 0,
  611. TDMB_ANT_DET_HIGH,
  612. };
  613. static struct input_dev *tdmb_ant_input;
  614. static int tdmb_check_ant;
  615. static int ant_prev_status;
  616. static int ant_irq_ret=-1;
  617. #define TDMB_ANT_WAIT_INIT_TIME 500000 /* us */
  618. #define TDMB_ANT_CHECK_DURATION 50000 /* us */
  619. #define TDMB_ANT_CHECK_COUNT 10
  620. #define TDMB_ANT_WLOCK_TIMEOUT \
  621. ((TDMB_ANT_CHECK_DURATION * TDMB_ANT_CHECK_COUNT * 2) / 500000)
  622. static int tdmb_ant_det_check_value(void)
  623. {
  624. int loop = 0, cur_val = 0;
  625. int ret = TDMB_ANT_UNKNOWN;
  626. tdmb_check_ant = 1;
  627. DPRINTK("%s ant_prev_status(%d)\n",
  628. __func__, ant_prev_status);
  629. usleep_range(TDMB_ANT_WAIT_INIT_TIME, TDMB_ANT_WAIT_INIT_TIME); /* wait initial noise */
  630. for (loop = 0; loop < TDMB_ANT_CHECK_COUNT; loop++) {
  631. usleep_range(TDMB_ANT_CHECK_DURATION, TDMB_ANT_CHECK_DURATION);
  632. cur_val = gpio_get_value_cansleep(dt_pdata->tdmb_ant_irq);
  633. if (ant_prev_status == cur_val)
  634. break;
  635. }
  636. if (loop == TDMB_ANT_CHECK_COUNT) {
  637. if (ant_prev_status == TDMB_ANT_DET_LOW
  638. && cur_val == TDMB_ANT_DET_HIGH) {
  639. ret = TDMB_ANT_OPEN;
  640. } else if (ant_prev_status == TDMB_ANT_DET_HIGH
  641. && cur_val == TDMB_ANT_DET_LOW) {
  642. ret = TDMB_ANT_CLOSE;
  643. }
  644. ant_prev_status = cur_val;
  645. }
  646. tdmb_check_ant = 0;
  647. DPRINTK("%s cnt(%d) cur(%d) prev(%d)\n",
  648. __func__, loop, cur_val, ant_prev_status);
  649. return ret;
  650. }
  651. static int tdmb_ant_det_ignore_irq(void)
  652. {
  653. DPRINTK("chk_ant=%d\n", tdmb_check_ant);
  654. return tdmb_check_ant;
  655. }
  656. static void tdmb_ant_det_work_func(struct work_struct *work)
  657. {
  658. if (!tdmb_ant_input) {
  659. DPRINTK("%s: input device is not registered\n", __func__);
  660. return;
  661. }
  662. switch (tdmb_ant_det_check_value()) {
  663. case TDMB_ANT_OPEN:
  664. input_report_key(tdmb_ant_input, KEY_DMB_ANT_DET_UP, 1);
  665. input_report_key(tdmb_ant_input, KEY_DMB_ANT_DET_UP, 0);
  666. input_sync(tdmb_ant_input);
  667. DPRINTK("%s : TDMB_ANT_OPEN\n", __func__);
  668. break;
  669. case TDMB_ANT_CLOSE:
  670. input_report_key(tdmb_ant_input, KEY_DMB_ANT_DET_DOWN, 1);
  671. input_report_key(tdmb_ant_input, KEY_DMB_ANT_DET_DOWN, 0);
  672. input_sync(tdmb_ant_input);
  673. DPRINTK("%s : TDMB_ANT_CLOSE\n", __func__);
  674. break;
  675. case TDMB_ANT_UNKNOWN:
  676. DPRINTK("%s : TDMB_ANT_UNKNOWN\n", __func__);
  677. break;
  678. default:
  679. break;
  680. }
  681. }
  682. static struct workqueue_struct *tdmb_ant_det_wq;
  683. static DECLARE_WORK(tdmb_ant_det_work, tdmb_ant_det_work_func);
  684. static bool tdmb_ant_det_reg_input(struct platform_device *pdev)
  685. {
  686. struct input_dev *input;
  687. int err;
  688. DPRINTK("%s\n", __func__);
  689. input = input_allocate_device();
  690. if (!input) {
  691. DPRINTK("Can't allocate input device\n");
  692. err = -ENOMEM;
  693. }
  694. set_bit(EV_KEY, input->evbit);
  695. set_bit(KEY_DMB_ANT_DET_UP & KEY_MAX, input->keybit);
  696. set_bit(KEY_DMB_ANT_DET_DOWN & KEY_MAX, input->keybit);
  697. input->name = "sec_dmb_key";
  698. input->phys = "sec_dmb_key/input0";
  699. input->dev.parent = &pdev->dev;
  700. err = input_register_device(input);
  701. if (err) {
  702. DPRINTK("Can't register dmb_ant_det key: %d\n", err);
  703. goto free_input_dev;
  704. }
  705. tdmb_ant_input = input;
  706. return true;
  707. free_input_dev:
  708. input_free_device(input);
  709. return false;
  710. }
  711. static void tdmb_ant_det_unreg_input(void)
  712. {
  713. DPRINTK("%s\n", __func__);
  714. if (tdmb_ant_input) {
  715. input_unregister_device(tdmb_ant_input);
  716. tdmb_ant_input = NULL;
  717. }
  718. }
  719. static bool tdmb_ant_det_create_wq(void)
  720. {
  721. DPRINTK("%s\n", __func__);
  722. tdmb_ant_det_wq = create_singlethread_workqueue("tdmb_ant_det_wq");
  723. if (tdmb_ant_det_wq)
  724. return true;
  725. else
  726. return false;
  727. }
  728. static bool tdmb_ant_det_destroy_wq(void)
  729. {
  730. DPRINTK("%s\n", __func__);
  731. if (tdmb_ant_det_wq) {
  732. flush_workqueue(tdmb_ant_det_wq);
  733. destroy_workqueue(tdmb_ant_det_wq);
  734. tdmb_ant_det_wq = NULL;
  735. }
  736. return true;
  737. }
  738. static irqreturn_t tdmb_ant_det_irq_handler(int irq, void *dev_id)
  739. {
  740. int ret = 0;
  741. if (tdmb_ant_det_ignore_irq())
  742. return IRQ_HANDLED;
  743. wake_lock_timeout(&tdmb_ant_wlock, TDMB_ANT_WLOCK_TIMEOUT * HZ);
  744. if (tdmb_ant_det_wq) {
  745. ret = queue_work(tdmb_ant_det_wq, &tdmb_ant_det_work);
  746. if (ret == 0)
  747. DPRINTK("%s queue_work fail\n", __func__);
  748. }
  749. return IRQ_HANDLED;
  750. }
  751. bool tdmb_ant_det_irq_set(bool set)
  752. {
  753. bool ret = true;
  754. DPRINTK("%s : set(%d) ant_irq(%d)\n", __func__, set, ant_irq_ret);
  755. if (set) {
  756. if (ant_irq_ret < 0) {
  757. ant_prev_status =
  758. gpio_get_value_cansleep(dt_pdata->tdmb_ant_irq);
  759. irq_set_irq_type(gpio_to_irq(dt_pdata->tdmb_ant_irq)
  760. , IRQ_TYPE_EDGE_BOTH);
  761. ant_irq_ret = request_irq(gpio_to_irq(dt_pdata->tdmb_ant_irq)
  762. , tdmb_ant_det_irq_handler
  763. , IRQF_DISABLED
  764. , "tdmb_ant_det"
  765. , NULL);
  766. if (ant_irq_ret < 0) {
  767. DPRINTK("%s %d\r\n", __func__, ant_irq_ret);
  768. ret = false;
  769. } else {
  770. enable_irq_wake(gpio_to_irq(dt_pdata->tdmb_ant_irq));
  771. }
  772. }
  773. } else {
  774. if(ant_irq_ret >= 0) {
  775. disable_irq_wake(gpio_to_irq(dt_pdata->tdmb_ant_irq));
  776. free_irq(gpio_to_irq(dt_pdata->tdmb_ant_irq), NULL);
  777. ant_irq_ret=-1;
  778. ret = false;
  779. }
  780. }
  781. return ret;
  782. }
  783. #endif
  784. static struct tdmb_dt_platform_data *get_tdmb_dt_pdata(struct device *dev)
  785. {
  786. struct tdmb_dt_platform_data *pdata;
  787. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  788. if (!pdata) {
  789. DPRINTK("%s : could not allocate memory for platform data\n", __func__);
  790. goto err;
  791. }
  792. pdata->tdmb_en = of_get_named_gpio(dev->of_node, "tdmb_pwr_en", 0);
  793. if (gpio_is_valid(pdata->tdmb_en)) {
  794. int ret;
  795. ret = gpio_request(pdata->tdmb_en, "tdmb_pwr_en");
  796. if (ret) {
  797. DPRINTK("Unable to request tdmb_pwr_en [%d]\n", pdata->tdmb_en);
  798. goto alloc_err;
  799. }
  800. gpio_direction_output(pdata->tdmb_en, 0);
  801. } else {
  802. DPRINTK("Failed to get is valid tdmb_pwr_en\n");
  803. goto alloc_err;
  804. }
  805. pdata->tdmb_use_rst = of_property_read_bool(dev->of_node, "tdmb_use_rst");
  806. if (pdata->tdmb_use_rst) {
  807. pdata->tdmb_rst = of_get_named_gpio(dev->of_node, "tdmb_rst", 0);
  808. if (gpio_is_valid(pdata->tdmb_rst)) {
  809. int ret;
  810. ret = gpio_request(pdata->tdmb_rst, "tdmb_rst");
  811. if (ret) {
  812. DPRINTK("Unable to request tdmb_rst [%d]\n", pdata->tdmb_rst);
  813. goto alloc_err;
  814. }
  815. } else {
  816. DPRINTK("%s : without tdmb_rst\n", __func__);
  817. }
  818. } else {
  819. DPRINTK("%s : without tdmb_use_rst\n", __func__);
  820. }
  821. pdata->tdmb_use_irq = of_property_read_bool(dev->of_node, "tdmb_use_irq");
  822. if (pdata->tdmb_use_irq) {
  823. pdata->tdmb_irq = of_get_named_gpio(dev->of_node, "tdmb_irq", 0);
  824. if (gpio_is_valid(pdata->tdmb_irq)) {
  825. int ret;
  826. ret = gpio_request(pdata->tdmb_irq, "tdmb_irq");
  827. if (ret) {
  828. DPRINTK("Unable to request tdmb_int [%d]\n", pdata->tdmb_irq);
  829. goto alloc_err;
  830. }
  831. } else {
  832. DPRINTK("%s : without tdmb_irq\n", __func__);
  833. }
  834. } else {
  835. DPRINTK("%s : without tdmb_use_irq\n", __func__);
  836. }
  837. #ifdef CONFIG_TDMB_ANT_DET
  838. pdata->tdmb_ant_irq = of_get_named_gpio(dev->of_node, "tdmb_ant_irq", 0);
  839. if (gpio_is_valid(pdata->tdmb_ant_irq)) {
  840. int ret;
  841. ret = gpio_request(pdata->tdmb_ant_irq, "tdmb_ant_irq");
  842. if (ret) {
  843. DPRINTK("Unable to request tdmb_ant_irq [%d]\n", pdata->tdmb_ant_irq);
  844. goto alloc_err;
  845. }
  846. } else {
  847. DPRINTK("%s : can not find the tdmb_ant_irq\n", __func__);
  848. goto alloc_err;
  849. }
  850. #endif
  851. #ifdef CONFIG_TDMB_VREG_SUPPORT
  852. if(of_property_read_string(dev->of_node,
  853. "tdmb_vreg_supply", &pdata->tdmb_vreg_name)) {
  854. DPRINTK("Unable to find tdmb_vdd_supply\n");
  855. goto alloc_err;
  856. }
  857. #endif
  858. return pdata;
  859. alloc_err:
  860. devm_kfree(dev, pdata);
  861. err:
  862. return NULL;
  863. }
  864. static int tdmb_probe(struct platform_device *pdev)
  865. {
  866. int ret;
  867. struct device *tdmb_dev;
  868. DPRINTK("call tdmb_probe\n");
  869. #if defined(CONFIG_TDMB_TSIF_QC)
  870. tdmb_tsi_init();
  871. #endif
  872. dt_pdata = get_tdmb_dt_pdata(&pdev->dev);
  873. if (!dt_pdata) {
  874. pr_err("%s : tdmb_dt_pdata is NULL.\n", __func__);
  875. return -ENODEV;
  876. }
  877. dmb_device = &pdev->dev;
  878. ret = register_chrdev(TDMB_DEV_MAJOR, TDMB_DEV_NAME, &tdmb_ctl_fops);
  879. if (ret < 0)
  880. DPRINTK("register_chrdev(TDMB_DEV) failed!\n");
  881. tdmb_class = class_create(THIS_MODULE, TDMB_DEV_NAME);
  882. if (IS_ERR(tdmb_class)) {
  883. unregister_chrdev(TDMB_DEV_MAJOR, TDMB_DEV_NAME);
  884. DPRINTK("class_create failed!\n");
  885. return -EFAULT;
  886. }
  887. tdmb_dev = device_create(tdmb_class, NULL,
  888. MKDEV(TDMB_DEV_MAJOR, TDMB_DEV_MINOR),
  889. NULL, TDMB_DEV_NAME);
  890. if (IS_ERR(tdmb_dev)) {
  891. DPRINTK("device_create failed!\n");
  892. unregister_chrdev(TDMB_DEV_MAJOR, TDMB_DEV_NAME);
  893. class_destroy(tdmb_class);
  894. return -EFAULT;
  895. }
  896. tdmbdrv_func = tdmb_get_drv_func();
  897. if (tdmbdrv_func->init)
  898. tdmbdrv_func->init();
  899. #if TDMB_PRE_MALLOC
  900. tdmb_make_ring_buffer();
  901. #endif
  902. #ifdef TDMB_WAKE_LOCK_ENABLE
  903. #if defined(CONFIG_TDMB_QUALCOMM)
  904. pm_qos_add_request(&tdmb_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
  905. PM_QOS_DEFAULT_VALUE);
  906. #endif
  907. wake_lock_init(&tdmb_wlock, WAKE_LOCK_SUSPEND, "tdmb_wlock");
  908. #endif
  909. #ifdef CONFIG_TDMB_VREG_SUPPORT
  910. ret = tdmb_vreg_init(&pdev->dev);
  911. if (ret) {
  912. DPRINTK("tdmb_vreg_init failed!\n");
  913. return -ENXIO;
  914. }
  915. #endif
  916. #if defined(CONFIG_TDMB_ANT_DET)
  917. wake_lock_init(&tdmb_ant_wlock, WAKE_LOCK_SUSPEND, "tdmb_ant_wlock");
  918. if (!tdmb_ant_det_reg_input(pdev))
  919. goto err_reg_input;
  920. if (!tdmb_ant_det_create_wq())
  921. goto free_reg_input;
  922. return 0;
  923. free_reg_input:
  924. tdmb_ant_det_unreg_input();
  925. err_reg_input:
  926. return -EFAULT;
  927. #else
  928. return 0;
  929. #endif
  930. }
  931. static int tdmb_remove(struct platform_device *pdev)
  932. {
  933. DPRINTK("tdmb_remove!\n");
  934. #if defined(CONFIG_TDMB_ANT_DET)
  935. tdmb_ant_det_unreg_input();
  936. tdmb_ant_det_destroy_wq();
  937. tdmb_ant_det_irq_set(false);
  938. wake_lock_destroy(&tdmb_ant_wlock);
  939. #endif
  940. #if defined(CONFIG_TDMB_TSIF_QC)
  941. tdmb_tsi_deinit();
  942. #endif
  943. #ifdef CONFIG_TDMB_VREG_SUPPORT
  944. regulator_put(dt_pdata->tdmb_vreg);
  945. #endif
  946. return 0;
  947. }
  948. static int tdmb_suspend(struct platform_device *pdev, pm_message_t mesg)
  949. {
  950. return 0;
  951. }
  952. static int tdmb_resume(struct platform_device *pdev)
  953. {
  954. return 0;
  955. }
  956. static const struct of_device_id tdmb_match_table[] = {
  957. {.compatible = "samsung,tdmb"},
  958. {}
  959. };
  960. static struct platform_driver tdmb_driver = {
  961. .probe = tdmb_probe,
  962. .remove = tdmb_remove,
  963. .suspend = tdmb_suspend,
  964. .resume = tdmb_resume,
  965. .driver = {
  966. .owner = THIS_MODULE,
  967. .name = "tdmb",
  968. .of_match_table = tdmb_match_table,
  969. },
  970. };
  971. static int __init tdmb_init(void)
  972. {
  973. int ret;
  974. #ifdef CONFIG_SAMSUNG_LPM_MODE
  975. if (poweroff_charging) {
  976. pr_info("%s : LPM Charging Mode! return 0\n", __func__);
  977. return 0;
  978. }
  979. #endif
  980. DPRINTK("<klaatu TDMB> module init\n");
  981. ret = platform_driver_register(&tdmb_driver);
  982. if (ret)
  983. return ret;
  984. return 0;
  985. }
  986. static void __exit tdmb_exit(void)
  987. {
  988. DPRINTK("<klaatu TDMB> module exit\n");
  989. #if TDMB_PRE_MALLOC
  990. if (ts_ring != 0) {
  991. kfree(ts_ring);
  992. ts_ring = 0;
  993. }
  994. #endif
  995. unregister_chrdev(TDMB_DEV_MAJOR, "tdmb");
  996. device_destroy(tdmb_class, MKDEV(TDMB_DEV_MAJOR, TDMB_DEV_MINOR));
  997. class_destroy(tdmb_class);
  998. platform_driver_unregister(&tdmb_driver);
  999. #ifdef TDMB_WAKE_LOCK_ENABLE
  1000. #if defined(CONFIG_TDMB_QUALCOMM)
  1001. pm_qos_remove_request(&tdmb_pm_qos_req);
  1002. #endif
  1003. wake_lock_destroy(&tdmb_wlock);
  1004. #endif
  1005. }
  1006. module_init(tdmb_init);
  1007. module_exit(tdmb_exit);
  1008. MODULE_AUTHOR("Samsung");
  1009. MODULE_DESCRIPTION("TDMB Driver");
  1010. MODULE_LICENSE("GPL v2");