mhl_supp.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805
  1. /*
  2. SiI8348 Linux Driver
  3. Copyright (C) 2013 Silicon Image, Inc
  4. .
  5. This program is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License as
  7. published by the Free Software Foundation version 2.
  8. This program is distributed AS-IS WITHOUT ANY WARRANTY of any
  9. kind, whether express or implied; INCLUDING without the implied warranty
  10. of MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE or NON-INFRINGEMENT. See
  11. the GNU General Public License for more details at http://www.gnu.org/licenses/gpl-2.0.html.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/semaphore.h>
  15. #include <linux/list.h>
  16. #include <linux/cdev.h>
  17. #include <linux/device.h>
  18. #include <linux/delay.h>
  19. #include <linux/hrtimer.h>
  20. #include "sii_hal.h"
  21. #include "si_fw_macros.h"
  22. #include "si_mhl_defs.h"
  23. #include "si_infoframe.h"
  24. #include "si_edid.h"
  25. #include "si_mhl2_edid_3d_api.h"
  26. #include "si_mhl_tx_hw_drv_api.h"
  27. #ifdef MEDIA_DATA_TUNNEL_SUPPORT
  28. #include "si_mdt_inputdev.h"
  29. #endif
  30. #include "mhl_linux_tx.h"
  31. #include "mhl_supp.h"
  32. #include "si_infoframe.h"
  33. #include "si_app_devcap.h"
  34. #include "platform.h"
  35. #include "si_8348_drv.h"
  36. #include "hdmi_drv.h"
  37. #include "smartbook.h"
  38. int si_mhl_tx_post_initialize(struct mhl_dev_context *dev_context, bool bootup);
  39. static void cbus_abort_timer_callback(void *callback_param);
  40. bool si_mhl_tx_ucpk_send(struct mhl_dev_context *dev_context,
  41. uint8_t ucp_key_code);
  42. #define MHL_DEV_LD_DISPLAY (0x01 << 0)
  43. #define MHL_DEV_LD_VIDEO (0x01 << 1)
  44. #define MHL_DEV_LD_AUDIO (0x01 << 2)
  45. #define MHL_DEV_LD_MEDIA (0x01 << 3)
  46. #define MHL_DEV_LD_TUNER (0x01 << 4)
  47. #define MHL_DEV_LD_RECORD (0x01 << 5)
  48. #define MHL_DEV_LD_SPEAKER (0x01 << 6)
  49. #define MHL_DEV_LD_GUI (0x01 << 7)
  50. #define MHL_LOGICAL_DEVICE_MAP (MHL_DEV_LD_AUDIO | MHL_DEV_LD_VIDEO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_GUI)
  51. #define MHL_MAX_RCP_KEY_CODE (0x7F + 1) // inclusive
  52. uint8_t rcpSupportTable [MHL_MAX_RCP_KEY_CODE] = {
  53. (MHL_DEV_LD_GUI), // 0x00 = Select
  54. (MHL_DEV_LD_GUI), // 0x01 = Up
  55. (MHL_DEV_LD_GUI), // 0x02 = Down
  56. (MHL_DEV_LD_GUI), // 0x03 = Left
  57. (MHL_DEV_LD_GUI), // 0x04 = Right
  58. 0, 0, 0, 0, // 05-08 Reserved
  59. (MHL_DEV_LD_GUI), // 0x09 = Root Menu
  60. 0, 0, 0, // 0A-0C Reserved
  61. (MHL_DEV_LD_GUI), // 0x0D = Select
  62. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0E-1F Reserved
  63. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER), // Numeric keys 0x20-0x29
  64. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER),
  65. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER),
  66. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER),
  67. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER),
  68. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER),
  69. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER),
  70. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER),
  71. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER),
  72. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER),
  73. 0, // 0x2A = Dot
  74. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER), // Enter key = 0x2B
  75. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA | MHL_DEV_LD_TUNER), // Clear key = 0x2C
  76. 0, 0, 0, // 2D-2F Reserved
  77. (MHL_DEV_LD_TUNER), // 0x30 = Channel Up
  78. (MHL_DEV_LD_TUNER), // 0x31 = Channel Dn
  79. (MHL_DEV_LD_TUNER), // 0x32 = Previous Channel
  80. (MHL_DEV_LD_AUDIO), // 0x33 = Sound Select
  81. 0, // 0x34 = Input Select
  82. 0, // 0x35 = Show Information
  83. 0, // 0x36 = Help
  84. 0, // 0x37 = Page Up
  85. 0, // 0x38 = Page Down
  86. 0, 0, 0, 0, 0, 0, 0, // 0x39-0x3F Reserved
  87. 0, // 0x40 = Undefined
  88. (MHL_DEV_LD_SPEAKER), // 0x41 = Volume Up
  89. (MHL_DEV_LD_SPEAKER), // 0x42 = Volume Down
  90. (MHL_DEV_LD_SPEAKER), // 0x43 = Mute
  91. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO), // 0x44 = Play
  92. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_RECORD), // 0x45 = Stop
  93. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_RECORD), // 0x46 = Pause
  94. (MHL_DEV_LD_RECORD), // 0x47 = Record
  95. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO), // 0x48 = Rewind
  96. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO), // 0x49 = Fast Forward
  97. (MHL_DEV_LD_MEDIA), // 0x4A = Eject
  98. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA), // 0x4B = Forward
  99. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_MEDIA), // 0x4C = Backward
  100. 0, 0, 0, // 4D-4F Reserved
  101. 0, // 0x50 = Angle
  102. 0, // 0x51 = Subpicture
  103. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 52-5F Reserved
  104. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO), // 0x60 = Play Function
  105. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO), // 0x61 = Pause the Play Function
  106. (MHL_DEV_LD_RECORD), // 0x62 = Record Function
  107. (MHL_DEV_LD_RECORD), // 0x63 = Pause the Record Function
  108. (MHL_DEV_LD_VIDEO | MHL_DEV_LD_AUDIO | MHL_DEV_LD_RECORD), // 0x64 = Stop Function
  109. (MHL_DEV_LD_SPEAKER), // 0x65 = Mute Function
  110. (MHL_DEV_LD_SPEAKER), // 0x66 = Restore Mute Function
  111. 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x67-0x6F Undefined or reserved
  112. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // 0x70-0x7F Undefined or reserved
  113. };
  114. /*
  115. @file si_mhl_tx.c
  116. */
  117. //#include "si_mhl_defs.h"
  118. //#include "si_mhl_tx_api.h"
  119. //#include "si_mhl_tx.h"
  120. //#include "si_drv_mhl_tx.h" // exported stuff from the driver
  121. //#include "si_hdmi_tx_lite_api.h"
  122. struct mhl_dev_context *get_mhl_device_context(void *context)
  123. {
  124. struct mhl_dev_context *dev_context = context;
  125. if (dev_context->signature != MHL_DEV_CONTEXT_SIGNATURE)
  126. dev_context = container_of(context,
  127. struct mhl_dev_context,
  128. drv_context);
  129. return dev_context;
  130. }
  131. void init_cbus_queue(struct mhl_dev_context *dev_context)
  132. {
  133. struct cbus_req *entry;
  134. int idx;
  135. INIT_LIST_HEAD(&dev_context->cbus_queue);
  136. INIT_LIST_HEAD(&dev_context->cbus_free_list);
  137. dev_context->current_cbus_req = NULL;
  138. /* Place pre-allocated CBUS queue entries on the free list */
  139. for (idx = 0; idx < NUM_CBUS_EVENT_QUEUE_EVENTS; idx++) {
  140. entry = &dev_context->cbus_req_entries[idx];
  141. memset(entry, 0, sizeof(struct cbus_req));
  142. list_add(&entry->link, &dev_context->cbus_free_list);
  143. }
  144. }
  145. static void return_cbus_queue_entry(struct mhl_dev_context *dev_context,
  146. struct cbus_req *pReq)
  147. {
  148. list_add(&pReq->link, &dev_context->cbus_free_list);
  149. }
  150. static struct cbus_req* get_free_cbus_queue_entry(struct mhl_dev_context *dev_context)
  151. {
  152. struct cbus_req *req;
  153. struct list_head *entry;
  154. if (list_empty(&dev_context->cbus_free_list)) {
  155. MHL_TX_DBG_ERR(dev_context, "No free cbus queue entries available and add one\n");
  156. si_mhl_tx_post_initialize(dev_context, false);
  157. return NULL;
  158. //return_cbus_queue_entry(dev_context, req);
  159. }
  160. entry = dev_context->cbus_free_list.next;
  161. list_del(entry);
  162. req = list_entry(entry, struct cbus_req, link);
  163. /* Start clean */
  164. req->status.flags.cancel = 0;
  165. return req;
  166. }
  167. void queue_cbus_transaction(struct mhl_dev_context *dev_context,
  168. struct cbus_req *pReq)
  169. {
  170. MHL_TX_DBG_INFO(dev_context, "0x%02x 0x%02x 0x%02x\n",
  171. pReq->command,
  172. (MHL_MSC_MSG == pReq->command)?
  173. pReq->msg_data[0]:pReq->reg,
  174. (MHL_MSC_MSG == pReq->command)?
  175. pReq->msg_data[1]:pReq->reg_data);
  176. list_add_tail(&pReq->link, &dev_context->cbus_queue);
  177. }
  178. void queue_priority_cbus_transaction(struct mhl_dev_context *dev_context,
  179. struct cbus_req *req)
  180. {
  181. MHL_TX_DBG_INFO(dev_context, "0x%02x 0x%02x 0x%02x\n",
  182. req->command,
  183. (MHL_MSC_MSG == req->command)?
  184. req->msg_data[0] : req->reg,
  185. (MHL_MSC_MSG == req->command)?
  186. req->msg_data[1] : req->reg_data);
  187. list_add(&req->link, &dev_context->cbus_queue);
  188. }
  189. struct cbus_req *get_next_cbus_transaction(struct mhl_dev_context *dev_context)
  190. {
  191. struct cbus_req *req = NULL;
  192. struct list_head *entry;
  193. if (list_empty(&dev_context->cbus_queue)) {
  194. // MHL_TX_DBG_INFO(dev_context, "Queue empty\n"); // TODO: FD, TBD
  195. return NULL;
  196. }
  197. entry = dev_context->cbus_queue.next;
  198. if (entry) {
  199. list_del(entry);
  200. req = list_entry(entry, struct cbus_req, link);
  201. if(req) {
  202. // MHL_TX_DBG_INFO(dev_context, "0x%02x 0x%02x 0x%02x\n",
  203. // req->command,
  204. // (MHL_MSC_MSG == req->command)?
  205. // req->msg_data[0] : req->reg,
  206. // (MHL_MSC_MSG == req->command)?
  207. // req->msg_data[1] : req->reg_data);
  208. }
  209. }
  210. return req;
  211. }
  212. uint8_t calculate_generic_checksum(uint8_t *info_frame_data, uint8_t checksum, uint8_t length)
  213. {
  214. uint8_t i;
  215. for (i = 0; i < length; i++)
  216. checksum += info_frame_data[i];
  217. checksum = 0x100 - checksum;
  218. return checksum;
  219. }
  220. #ifdef EXAMPLE_ONLY // This function is not called from anywhere.
  221. int8_t avi_info_frame_cmp(avi_info_frame_t *p0, avi_info_frame_t *p1)
  222. {
  223. uint8_t i;
  224. uint8_t ret_val=0;
  225. uint8_t *puc0,*puc1;
  226. uint8_t temp0, temp1;
  227. puc0 = (uint8_t *)p0;
  228. puc1 = (uint8_t *)p1;
  229. for (i = 0; i < sizeof(*p0); ++i) {
  230. temp0 = *puc0++;
  231. temp1 = *puc1++;
  232. if (temp0 == temp1)
  233. continue;
  234. if (temp0 < temp1) {
  235. ret_val = -1;
  236. } else {
  237. ret_val = 1;
  238. }
  239. break;
  240. }
  241. return ret_val;
  242. }
  243. #endif // EXAMPLE_ONLY // This function is not called from anywhere.
  244. /*
  245. * si_mhl_tx_set_status
  246. *
  247. * Set MHL defined STATUS bits in peer's register set.
  248. *
  249. * register MHL register to write
  250. * value data to write to the register
  251. */
  252. bool si_mhl_tx_set_status(struct mhl_dev_context *dev_context, uint8_t reg_to_write, uint8_t value, uint8_t priority_level)
  253. {
  254. struct cbus_req *req;
  255. MHL_TX_DBG_INFO(dev_context, "called\n");
  256. req = get_free_cbus_queue_entry(dev_context);
  257. if(req == NULL) {
  258. dev_err(dev_context->mhl_dev, "si_mhl_tx_set_status:CBUS free queue exhausted\n");
  259. return false;
  260. }
  261. req->retry_count = 2;
  262. req->command = MHL_WRITE_STAT;
  263. req->reg = reg_to_write;
  264. req->reg_data = value;
  265. // req->offset_data = regToWrite;
  266. // req->payload_u.msg_data[0] = value;
  267. if(priority_level)
  268. queue_cbus_transaction(dev_context, req);
  269. else
  270. queue_priority_cbus_transaction(dev_context, req);
  271. return true;
  272. }
  273. /*
  274. * si_mhl_tx_set_int
  275. * Set MHL defined INTERRUPT bits in peer's register set.
  276. * This function returns true if operation was successfully performed.
  277. *
  278. * regToWrite Remote interrupt register to write
  279. * mask the bits to write to that register
  280. *
  281. * priority 0: add to head of CBusQueue
  282. * 1: add to tail of CBusQueue
  283. */
  284. bool si_mhl_tx_set_int(struct mhl_dev_context *dev_context,
  285. uint8_t reg_to_write, uint8_t mask,
  286. uint8_t priority_level)
  287. {
  288. struct cbus_req *req;
  289. req = get_free_cbus_queue_entry(dev_context);
  290. if(req == NULL) {
  291. MHL_TX_DBG_ERR(dev_context, "si_mhl_tx_set_int:CBUS free queue exhausted\n");
  292. return false;
  293. }
  294. req->retry_count = 2;
  295. req->command = MHL_SET_INT;
  296. req->reg = reg_to_write;
  297. req->reg_data = mask;
  298. // req->offset_data = reg_to_write;
  299. // req->payload_u.msg_data[0] = mask;
  300. if(priority_level)
  301. queue_cbus_transaction(dev_context, req);
  302. else
  303. queue_priority_cbus_transaction(dev_context, req);
  304. return true;
  305. }
  306. //bool si_mhl_tx_do_write_burst(struct mhl_dev_context *dev_context,
  307. // uint8_t start_reg, uint8_t *data,
  308. // uint8_t length)
  309. //{
  310. // if (dev_context->misc_flags_u.as_flags.FLAGS_WRITE_BURST_PENDING) {
  311. //
  312. // struct cbus_req *req;
  313. //
  314. // MHL_TX_DBG_INFO(dev_context, "startReg:%d length:%d\n",
  315. // (int)start_reg, (int)length);
  316. //
  317. // req = get_free_cbus_queue_entry(dev_context);
  318. // if(req == NULL) {
  319. // MHL_TX_DBG_ERR(dev_context, "CBUS free queue exhausted\n");
  320. // return false;
  321. // }
  322. //
  323. // req->retry_count = 1;
  324. // req->command = MHL_WRITE_BURST;
  325. // req->length = length;
  326. // req->offset_data = start_reg;
  327. // memcpy(req->payload_u.msg_data, data, length);
  328. //
  329. // queue_priority_cbus_transaction(dev_context, req);
  330. //
  331. // dev_context->misc_flags_u.as_flags.FLAGS_WRITE_BURST_PENDING = false;
  332. // return true;
  333. // }
  334. // return false;
  335. //}
  336. void si_mhl_tx_reset_states(struct mhl_dev_context *dev_context)
  337. {
  338. init_cbus_queue(dev_context);
  339. dev_context->mhl_connection_event = false;
  340. dev_context->mhl_connected = MHL_TX_EVENT_DISCONNECTION;
  341. dev_context->msc_msg_arrived = false;
  342. dev_context->status_0 = 0;
  343. dev_context->status_1 = 0;
  344. dev_context->link_mode = MHL_STATUS_CLK_MODE_NORMAL; // indicate normal (24-bit) mode
  345. dev_context->preferred_clk_mode = MHL_STATUS_CLK_MODE_NORMAL; // this can be overridden by the application calling si_mhl_tx_set_preferred_pixel_format()
  346. dev_context->misc_flags.as_uint32 = 0;
  347. #ifdef MEDIA_DATA_TUNNEL_SUPPORT
  348. memset(dev_context->mdt_devs.is_dev_registered, INPUT_WAITING_FOR_REGISTRATION, MDT_TYPE_COUNT);
  349. dev_context->mdt_devs.x_max = X_MAX;
  350. dev_context->mdt_devs.x_screen = SCALE_X_SCREEN;
  351. dev_context->mdt_devs.x_raw = SCALE_X_RAW;
  352. dev_context->mdt_devs.x_shift = X_SHIFT;
  353. dev_context->mdt_devs.y_max = Y_MAX;
  354. dev_context->mdt_devs.y_screen = SCALE_Y_SCREEN;
  355. dev_context->mdt_devs.y_raw = SCALE_Y_RAW;
  356. dev_context->mdt_devs.y_shift = Y_SHIFT;
  357. dev_context->mdt_devs.swap_xy = SWAP_XY;
  358. dev_context->mdt_devs.swap_updown = SWAP_UPDOWN;
  359. dev_context->mdt_devs.swap_leftright= SWAP_LEFTRIGHT;
  360. #endif
  361. memset(&dev_context->dev_cap_cache
  362. ,0
  363. ,sizeof(dev_context->dev_cap_cache)
  364. );
  365. dev_context->scratch_pad_read_done = true; // no 'blocking' by APP level at startup
  366. }
  367. void cbus_DPI_timer_callback(void *callback_param);
  368. int si_mhl_tx_initialize(struct mhl_dev_context *dev_context, bool bootup)
  369. {
  370. int ret;
  371. MHL_TX_DBG_INFO(dev_context, "called\n");
  372. ///if(bootup)
  373. {
  374. ret = mhl_tx_create_timer(dev_context, cbus_abort_timer_callback,
  375. dev_context, &dev_context->cbus_abort_timer);
  376. if (ret != 0) {
  377. MHL_TX_DBG_ERR(dev_context, "Failed to allocate CBUS abort timer!\n");
  378. return ret;
  379. }
  380. ret = mhl_tx_create_timer(dev_context, cbus_DPI_timer_callback,
  381. dev_context, &dev_context->cbus_dpi_timer);
  382. if (ret != 0) {
  383. MHL_TX_DBG_ERR(dev_context, "Failed to allocate CBUS dpi timer!\n");
  384. return ret;
  385. }
  386. }
  387. ///return 0;
  388. si_mhl_tx_reset_states(dev_context);
  389. return dev_context->drv_info->mhl_device_initialize(
  390. (struct drv_hw_context *)(&dev_context->drv_context));
  391. }
  392. int si_mhl_tx_post_initialize(struct mhl_dev_context *dev_context, bool bootup)
  393. {
  394. MHL_TX_DBG_INFO(dev_context, "called\n");
  395. si_mhl_tx_reset_states(dev_context);
  396. return dev_context->drv_info->mhl_device_initialize(
  397. (struct drv_hw_context *)(&dev_context->drv_context));
  398. }
  399. static void cbus_abort_timer_callback(void *callback_param)
  400. {
  401. struct mhl_dev_context *dev_context = callback_param;
  402. MHL_TX_DBG_INFO(dev_context, "CBUS abort timer expired, " \
  403. "enable CBUS messaging\n");
  404. dev_context->misc_flags.flags.cbus_abort_delay_active = false;
  405. si_mhl_tx_drive_states(dev_context);
  406. }
  407. void process_cbus_abort(struct mhl_dev_context *dev_context)
  408. {
  409. /* Delay the sending of any new CBUS messages for 2 seconds */
  410. dev_context->misc_flags.flags.cbus_abort_delay_active = true;
  411. mhl_tx_start_timer(dev_context, dev_context->cbus_abort_timer, 2000);
  412. }
  413. //static bool MHL_connect_state=false;
  414. void cbus_DPI_timer_callback(void *callback_param)
  415. {
  416. struct mhl_dev_context *dev_context = callback_param;
  417. MHL_TX_DBG_INFO(dev_context, "CBUS DPI timer expired\n");
  418. if(!(dev_context->intr_info.flags & DRV_INTR_FLAG_DISCONNECT)){
  419. mhl_event_notify(dev_context, MHL_TX_EVENT_EDID_DONE, 0, NULL);
  420. }
  421. }
  422. //if MHL can't work well and in conenct state, after DPI timer, DPI will send out the video to 8348
  423. void process_dpi(struct mhl_dev_context *dev_context)
  424. {
  425. MHL_TX_DBG_INFO(dev_context, "start DPI timer= 3s\n");
  426. mhl_tx_start_timer(dev_context, dev_context->cbus_dpi_timer, 4000);
  427. }
  428. #ifdef DEBUG //(
  429. static char *get_cbus_command_string(int command)
  430. {
  431. #define CBUS_COMMAND_CASE(command) case command: return #command;
  432. switch(command){
  433. CBUS_COMMAND_CASE(MHL_ACK)
  434. CBUS_COMMAND_CASE(MHL_NACK)
  435. CBUS_COMMAND_CASE(MHL_ABORT)
  436. CBUS_COMMAND_CASE(MHL_WRITE_STAT)
  437. CBUS_COMMAND_CASE(MHL_SET_INT)
  438. CBUS_COMMAND_CASE(MHL_READ_DEVCAP)
  439. CBUS_COMMAND_CASE(MHL_GET_STATE)
  440. CBUS_COMMAND_CASE(MHL_GET_VENDOR_ID)
  441. CBUS_COMMAND_CASE(MHL_SET_HPD)
  442. CBUS_COMMAND_CASE(MHL_CLR_HPD)
  443. CBUS_COMMAND_CASE(MHL_SET_CAP_ID)
  444. CBUS_COMMAND_CASE(MHL_GET_CAP_ID)
  445. CBUS_COMMAND_CASE(MHL_MSC_MSG)
  446. CBUS_COMMAND_CASE(MHL_GET_SC1_ERRORCODE)
  447. CBUS_COMMAND_CASE(MHL_GET_DDC_ERRORCODE)
  448. CBUS_COMMAND_CASE(MHL_GET_MSC_ERRORCODE)
  449. CBUS_COMMAND_CASE(MHL_WRITE_BURST)
  450. CBUS_COMMAND_CASE(MHL_GET_SC3_ERRORCODE)
  451. CBUS_COMMAND_CASE(MHL_READ_EDID_BLOCK)
  452. }
  453. return "unknown";
  454. }
  455. #endif //)
  456. /*
  457. * si_mhl_tx_drive_states
  458. *
  459. * This function is called by the interrupt handler in the driver layer.
  460. * to move the MSC engine to do the next thing before allowing the application
  461. * to run RCP APIs.
  462. */
  463. void si_mhl_tx_drive_states(struct mhl_dev_context *dev_context)
  464. {
  465. struct cbus_req *req;
  466. ///MHL_TX_DBG_INFO(dev_context, "called\n");
  467. if (dev_context->misc_flags.flags.cbus_abort_delay_active) {
  468. MHL_TX_DBG_INFO(dev_context, "CBUS abort delay in progress "\
  469. "can't send any messages\n");
  470. return;
  471. }
  472. if (dev_context->current_cbus_req != NULL) {
  473. MHL_TX_DBG_INFO(dev_context, "CBUS request:%s in progress\n"
  474. ,get_cbus_command_string(dev_context->current_cbus_req->command));
  475. return;
  476. }
  477. /* process queued CBus transactions */
  478. req = get_next_cbus_transaction(dev_context);
  479. if (req == NULL) {
  480. return;
  481. }
  482. MHL_TX_DBG_INFO(dev_context, "req: %p\n",req);
  483. /* coordinate write burst requests and grants. */
  484. if (MHL_SET_INT == req->command) {
  485. if (MHL_RCHANGE_INT == req->reg) {
  486. // Do not allow to proceed another round of write_burst until the current round is handled by APP level
  487. if (dev_context->misc_flags.flags.scratchpad_busy || !dev_context->scratch_pad_read_done) {
  488. if (MHL_INT_REQ_WRT == req->reg_data) {
  489. /*
  490. * Can't handle this request right now so just push it
  491. * back onto the front of the queue.
  492. */
  493. queue_priority_cbus_transaction(dev_context, req);
  494. req = NULL;
  495. MHL_TX_DBG_INFO(dev_context, "req: %p\n",req);
  496. }
  497. } else {
  498. if (MHL_INT_REQ_WRT == req->reg_data) {
  499. dev_context->misc_flags.flags.scratchpad_busy = true;
  500. dev_context->misc_flags.flags.write_burst_pending = true;
  501. } else if (MHL_INT_GRT_WRT == req->reg_data) {
  502. dev_context->misc_flags.flags.scratchpad_busy = true;
  503. }
  504. }
  505. }
  506. } else if (MHL_MSC_MSG == req->command) {
  507. dev_context->msc_msg_last_data = req->msg_data[1];
  508. } else if (MHL_WRITE_BURST == req->command) {
  509. if (dev_context->misc_flags.flags.write_burst_pending) {
  510. /* Still waiting for write burst grant */
  511. req = NULL;
  512. MHL_TX_DBG_INFO(dev_context, "req: %p\n",req);
  513. }
  514. }
  515. MHL_TX_DBG_INFO(dev_context, "req: %p\n",req);
  516. if (req) {
  517. bool success;
  518. dev_context->current_cbus_req = req;
  519. success =
  520. si_mhl_tx_drv_send_cbus_command((struct drv_hw_context *)
  521. (&dev_context->drv_context),
  522. req);
  523. if (!success) {
  524. return_cbus_queue_entry(dev_context, req);
  525. dev_context->current_cbus_req = NULL;
  526. if (MHL_READ_EDID_BLOCK == req->command) {
  527. dev_context->misc_flags.flags.edid_loop_active = 0;
  528. MHL_TX_DBG_INFO(dev_context, "tag: EDID active: %d\n"
  529. ,dev_context->misc_flags.flags.edid_loop_active);
  530. }
  531. }
  532. }
  533. }
  534. enum scratch_pad_status si_mhl_tx_request_write_burst(
  535. struct mhl_dev_context *dev_context, uint8_t reg_offset,
  536. uint8_t length, uint8_t *data)
  537. {
  538. struct cbus_req *req;
  539. enum scratch_pad_status status = SCRATCHPAD_BUSY;
  540. if (!(dev_context->dev_cap_cache.mdc.featureFlag
  541. & MHL_FEATURE_SP_SUPPORT)) {
  542. MHL_TX_DBG_ERR(dev_context, "failed SCRATCHPAD_NOT_SUPPORTED\n");
  543. status = SCRATCHPAD_NOT_SUPPORTED;
  544. } else if ((reg_offset + length) > SCRATCHPAD_SIZE) {
  545. MHL_TX_DBG_ERR(dev_context, "invalid offset + length\n");
  546. status = SCRATCHPAD_BAD_PARAM;
  547. } else {
  548. req = get_free_cbus_queue_entry(dev_context);
  549. if (req == NULL) {
  550. status = SCRATCHPAD_FAIL;
  551. goto err_exit;
  552. }
  553. memcpy(req->msg_data, data, length);
  554. req->retry_count = 2;
  555. req->command = MHL_WRITE_BURST;
  556. req->reg = MHL_RCHANGE_INT;
  557. req->reg_data = MHL_INT_REQ_WRT;
  558. req->offset = reg_offset;
  559. req->length = length;
  560. queue_priority_cbus_transaction(dev_context, req);
  561. MHL_TX_DBG_INFO(dev_context, "request accepted\n");
  562. si_mhl_tx_drive_states(dev_context);
  563. status = SCRATCHPAD_SUCCESS;
  564. }
  565. err_exit:
  566. return status;
  567. }
  568. /*
  569. * si_mhl_tx_send_msc_msg
  570. *
  571. * This function sends a MSC_MSG command to the peer.
  572. * It returns true if successful in doing so.
  573. */
  574. static bool si_mhl_tx_send_msc_msg(struct mhl_dev_context *dev_context,
  575. uint8_t command, uint8_t cmdData)
  576. {
  577. struct cbus_req *req;
  578. MHL_TX_DBG_INFO(dev_context, "called\n");
  579. req = get_free_cbus_queue_entry(dev_context);
  580. if (req == NULL) {
  581. dev_err(dev_context->mhl_dev, "si_mhl_tx_send_msc_msg:CBUS free queue exhausted\n");
  582. return false;
  583. }
  584. req->retry_count = 2;
  585. req->command = MHL_MSC_MSG;
  586. // req->payload_u.msg_data[0] = command;
  587. // req->payload_u.msg_data[1] = cmdData;
  588. req->msg_data[0] = command;
  589. req->msg_data[1] = cmdData;
  590. queue_cbus_transaction(dev_context, req);
  591. return true;
  592. }
  593. /*
  594. * si_mhl_rapk_send
  595. * This function sends RAPK to the peer device.
  596. */
  597. static bool si_mhl_rapk_send(struct mhl_dev_context *dev_context,
  598. uint8_t status)
  599. {
  600. return (si_mhl_tx_send_msc_msg(dev_context, MHL_MSC_MSG_RAPK, status));
  601. }
  602. /*
  603. * si_mhl_tx_rcpe_send
  604. *
  605. * The function will return a value of true if it could successfully send the RCPE
  606. * subcommand. Otherwise false.
  607. *
  608. * When successful, mhl_tx internally sends RCPK with original (last known)
  609. * keycode.
  610. */
  611. bool si_mhl_tx_rcpe_send(struct mhl_dev_context *dev_context, uint8_t rcpe_error_code)
  612. {
  613. bool status;
  614. MHL_TX_DBG_INFO(dev_context, "called\n");
  615. status = si_mhl_tx_send_msc_msg(dev_context, MHL_MSC_MSG_RCPE, rcpe_error_code);
  616. if (status)
  617. si_mhl_tx_drive_states(dev_context);
  618. return status;
  619. }
  620. /*
  621. * si_mhl_tx_process_events
  622. * This internal function is called at the end of interrupt processing. It's
  623. * purpose is to process events detected during the interrupt. Some events
  624. * are internally handled here but most are handled by a notification to
  625. * interested applications.
  626. */
  627. void si_mhl_tx_process_events(struct mhl_dev_context *dev_context)
  628. {
  629. uint8_t rapk_status;
  630. /* Make sure any events detected during the interrupt are processed. */
  631. si_mhl_tx_drive_states(dev_context);
  632. if(dev_context->mhl_connection_event) {
  633. MHL_TX_DBG_INFO(dev_context, "mhl_connection_event\n");
  634. /* Consume the message */
  635. dev_context->mhl_connection_event = false;
  636. /*
  637. * Let interested apps know about the connection state change
  638. */
  639. mhl_event_notify(dev_context, dev_context->mhl_connected,
  640. dev_context->dev_cap_cache.mdc.featureFlag,
  641. NULL);
  642. /* If connection has been lost, reset all state flags. */
  643. if(MHL_TX_EVENT_DISCONNECTION == dev_context->mhl_connected)
  644. {
  645. si_mhl_tx_reset_states(dev_context);
  646. }
  647. else if (MHL_TX_EVENT_CONNECTION == dev_context->mhl_connected)
  648. {
  649. si_mhl_tx_set_status(dev_context, MHL_STATUS_REG_CONNECTED_RDY, MHL_STATUS_DCAP_RDY, 1);
  650. }
  651. } else if(dev_context->msc_msg_arrived) {
  652. MHL_TX_DBG_INFO(dev_context, "MSC MSG <%02X, %02X>\n",
  653. dev_context->msc_msg_sub_command,
  654. dev_context->msc_msg_data);
  655. /* Consume the message */
  656. dev_context->msc_msg_arrived = false;
  657. /*
  658. * Map MSG sub-command to an event ID
  659. */
  660. switch(dev_context->msc_msg_sub_command) {
  661. case MHL_MSC_MSG_RAP:
  662. /*
  663. * RAP messages are fully handled here.
  664. */
  665. if (dev_context->mhl_flags & MHL_STATE_APPLICATION_RAP_BUSY){
  666. rapk_status = MHL_RAPK_BUSY;
  667. }else{
  668. rapk_status = MHL_RAPK_NO_ERR;
  669. }
  670. dev_context->rap_sub_command = dev_context->msc_msg_data;
  671. if (MHL_RAP_POLL== dev_context->msc_msg_data) {
  672. // just do the ack
  673. } else if (MHL_RAP_CONTENT_ON == dev_context->msc_msg_data) {
  674. MHL_TX_DBG_INFO(dev_context, "RAP CONTENT_ON\n");
  675. dev_context->misc_flags.flags.rap_content_on = true;
  676. si_mhl_tx_drv_content_on(
  677. (struct drv_hw_context *)&dev_context->drv_context);
  678. } else if (MHL_RAP_CONTENT_OFF == dev_context->msc_msg_data) {
  679. MHL_TX_DBG_INFO(dev_context, "RAP CONTENT_OFF\n");
  680. if (dev_context->misc_flags.flags.rap_content_on){
  681. dev_context->misc_flags.flags.rap_content_on = false;
  682. si_mhl_tx_drv_content_off(
  683. (struct drv_hw_context *)&dev_context->drv_context);
  684. }
  685. } else {
  686. MHL_TX_DBG_INFO(dev_context, "Unrecognized RAP code: 0x%02x "\
  687. "received\n", dev_context->msc_msg_data);
  688. rapk_status = MHL_RAPK_UNRECOGNIZED;
  689. }
  690. /* Always RAPK to the peer */
  691. si_mhl_rapk_send(dev_context, rapk_status);
  692. if (rapk_status == MHL_RAPK_NO_ERR)
  693. mhl_event_notify(dev_context, MHL_TX_EVENT_RAP_RECEIVED,
  694. dev_context->msc_msg_data, NULL);
  695. break;
  696. case MHL_MSC_MSG_RCP:
  697. /*
  698. * If we get a RCP key that we do NOT support, send back RCPE
  699. * Do not notify app layer.
  700. */
  701. if (rcpSupportTable[dev_context->msc_msg_data & 0x7F]
  702. & MHL_LOGICAL_DEVICE_MAP) {
  703. mhl_event_notify(dev_context, MHL_TX_EVENT_RCP_RECEIVED,
  704. dev_context->msc_msg_data, NULL);
  705. } else {
  706. /* Save keycode to send a RCPK after RCPE. */
  707. dev_context->msc_save_rcp_key_code = dev_context->msc_msg_data;
  708. si_mhl_tx_rcpe_send(dev_context, RCPE_INEEFECTIVE_KEY_CODE);
  709. }
  710. break;
  711. case MHL_MSC_MSG_RCPK:
  712. mhl_event_notify(dev_context, MHL_TX_EVENT_RCPK_RECEIVED,
  713. dev_context->msc_msg_data, NULL);
  714. break;
  715. case MHL_MSC_MSG_RCPE:
  716. mhl_event_notify(dev_context, MHL_TX_EVENT_RCPE_RECEIVED,
  717. dev_context->msc_msg_data, NULL);
  718. break;
  719. case MHL_MSC_MSG_UCP:
  720. /*
  721. * Save keycode so that we can send an UCPE message in
  722. * case the UCP key code is rejected by the host application.
  723. *
  724. */
  725. dev_context->msc_save_ucp_key_code = dev_context->msc_msg_data;
  726. mhl_event_notify(dev_context, MHL_TX_EVENT_UCP_RECEIVED,
  727. dev_context->msc_save_ucp_key_code, NULL);
  728. break;
  729. case MHL_MSC_MSG_UCPK:
  730. mhl_event_notify(dev_context, MHL_TX_EVENT_UCPK_RECEIVED,
  731. dev_context->msc_msg_data, NULL);
  732. break;
  733. case MHL_MSC_MSG_UCPE:
  734. mhl_event_notify(dev_context, MHL_TX_EVENT_UCPE_RECEIVED,
  735. dev_context->msc_msg_data, NULL);
  736. break;
  737. case MHL_MSC_MSG_RAPK:
  738. MHL_TX_DBG_INFO(dev_context, "RAPK\n");
  739. break;
  740. default:
  741. MHL_TX_DBG_WARN(dev_context, "Unexpected MSC message "\
  742. "sub-command code: 0x%02x received!\n",
  743. dev_context->msc_msg_sub_command);
  744. break;
  745. }
  746. }
  747. }
  748. bool si_mhl_tx_read_devcap(struct mhl_dev_context *dev_context,
  749. uint8_t offset)
  750. {
  751. struct cbus_req *req;
  752. MHL_TX_DBG_INFO(dev_context, "called\n");
  753. req = get_free_cbus_queue_entry(dev_context);
  754. if (req == NULL) {
  755. dev_err(dev_context->mhl_dev, "si_mhl_tx_read_devcap: CBUS free queue exhausted\n");
  756. return false;
  757. }
  758. req->retry_count = 2;
  759. req->command = MHL_READ_DEVCAP;
  760. req->reg = offset;
  761. req->reg_data = 0; /* do this to avoid confusion */
  762. // req->offset_data = offset;
  763. // req->payload_u.msg_data[0] = 0; /* do this to avoid confusion */
  764. queue_cbus_transaction(dev_context, req);
  765. return true;
  766. }
  767. bool si_mhl_tx_rcpk_send(struct mhl_dev_context *dev_context, uint8_t rcp_key_code)
  768. {
  769. bool status;
  770. MHL_TX_DBG_INFO(dev_context, "called\n");
  771. status = si_mhl_tx_send_msc_msg(dev_context, MHL_MSC_MSG_RCPK, rcp_key_code);
  772. if (status)
  773. si_mhl_tx_drive_states(dev_context);
  774. return status;
  775. }
  776. /*
  777. * si_mhl_tx_request_first_edid_block
  778. *
  779. * This function initiates a CBUS command to read the specified EDID block.
  780. * Returns true if the command was queued successfully.
  781. */
  782. extern void enable_intr3(struct drv_hw_context *hw_context);
  783. void si_mhl_tx_request_first_edid_block(struct mhl_dev_context *dev_context)
  784. {
  785. MHL_TX_DBG_INFO(dev_context, "tag: EDID active: %d\n" ,dev_context->misc_flags.flags.edid_loop_active);
  786. dev_context->edid_parse_done = false; // TOOD: FD, TBI
  787. /* Enable EDID interrupt */
  788. //TODO: FD, TBC, EDID DDC handling interrupt
  789. enable_intr3((struct drv_hw_context *) (&dev_context->drv_context));
  790. if (!dev_context->misc_flags.flags.edid_loop_active) {
  791. struct cbus_req *req;
  792. req = get_free_cbus_queue_entry(dev_context);
  793. if (req == NULL) {
  794. MHL_TX_DBG_INFO(dev_context, "couldn't get free cbus req \n");
  795. } else{
  796. dev_context->misc_flags.flags.edid_loop_active = 1;
  797. MHL_TX_DBG_INFO(dev_context, "tag: EDID active: %d\n"
  798. ,dev_context->misc_flags.flags.edid_loop_active);
  799. /* Send MHL_READ_EDID_BLOCK command */
  800. req->retry_count = 2;
  801. req->command = MHL_READ_EDID_BLOCK;
  802. req->offset = 0; /* block number */
  803. req->msg_data[0] = 0; /* do this to avoid confusion */
  804. queue_cbus_transaction(dev_context, req);
  805. si_mhl_tx_drive_states(dev_context);
  806. dev_context->current_cbus_req = NULL;
  807. }
  808. }
  809. }
  810. ///////////////////////////////////////////////////////////////////////////////
  811. //
  812. // si_mhl_tx_msc_command_done
  813. //
  814. // This function is called by the driver to inform of completion of last command.
  815. //
  816. // It is called in interrupt context to meet some MHL specified timings, therefore,
  817. // it should not have to call app layer and do negligible processing, no printfs.
  818. //
  819. //#define FLAG_OR_NOT(x) TestMiscFlag(FLAGS_HAVE_##x)?#x:""
  820. //#define SENT_OR_NOT(x) TestMiscFlag(FLAGS_SENT_##x)?#x:""
  821. void si_mhl_tx_msc_command_done(struct mhl_dev_context *dev_context, uint8_t data1)
  822. {
  823. struct cbus_req *req;
  824. req = dev_context->current_cbus_req;
  825. if (req == NULL) {
  826. MHL_TX_DBG_ERR(dev_context, "No message to associate with "\
  827. "completion notification\n");
  828. return;
  829. }
  830. ///MHL_TX_DBG_ERR(dev_context, " cmd--0x%x(reg0x%x) data1 = %02X\n", req->command, req->reg, data1);
  831. dev_context->current_cbus_req = NULL;
  832. if (req->status.flags.cancel == true) {
  833. MHL_TX_DBG_INFO(dev_context, "Canceling request with command 0x%02x\n",
  834. req->command);
  835. } else if (MHL_READ_DEVCAP == req->command ) {
  836. bool temp;
  837. int i;
  838. MHLDevCap_u devcap_changes;
  839. if (req->reg < DEVCAP_SIZE) // req->reg keep the current offset
  840. {
  841. dev_context->dev_cap_cache_new.devcap_cache[req->reg] = data1;
  842. MHL_TX_DBG_INFO(dev_context, "MHL_READ_DEVCAP DONE, idx: 0x%02x, data: 0x%02x\n",
  843. req->reg, data1);
  844. }
  845. /*
  846. * Check if all DEVCAP registers have been read
  847. */
  848. if(0x0F == req->reg)
  849. {
  850. #ifdef CONFIG_MTK_HDMI_3D_SUPPORT
  851. if((data1 == 0xB9) || (data1 == 0xBA))
  852. #else
  853. if(data1 == 0xB9)
  854. #endif
  855. {
  856. //SMB
  857. mhl_event_notify(dev_context, MHL_TX_EVENT_DEV_CAP_UPDATE, data1, NULL);
  858. }
  859. }
  860. // Not all DEVCAP registers have been read
  861. if (++dev_context->dev_cap_cache_index < DEVCAP_SIZE)
  862. {
  863. si_mhl_tx_read_devcap(dev_context, dev_context->dev_cap_cache_index);
  864. }
  865. // All DEVCAP registers have been read
  866. else
  867. {
  868. /*
  869. * Generate a change mask between the old and new devcaps
  870. */
  871. for (i=0; i< sizeof(dev_context->dev_cap_cache);++i){
  872. devcap_changes.devcap_cache[i]
  873. = dev_context->dev_cap_cache.devcap_cache[i]
  874. ^ dev_context->dev_cap_cache_new.devcap_cache[i];
  875. }
  876. // update the DEVCAP cache
  877. dev_context->dev_cap_cache = dev_context->dev_cap_cache_new;
  878. dev_context->dev_cap_cache_index = 0;
  879. // look for a change in the pow bit
  880. if (MHL_DEV_CATEGORY_POW_BIT & devcap_changes.mdc.deviceCategory){
  881. uint8_t param;
  882. param = dev_context->dev_cap_cache.mdc.deviceCategory
  883. & MHL_DEV_CATEGORY_POW_BIT;
  884. if (param) {
  885. /*
  886. * Since downstream device is supplying VBUS power we turn
  887. * off our VBUS power here. If the platform application
  888. * can control VBUS power it should turn off it's VBUS
  889. * power now.
  890. */
  891. mhl_tx_vbus_control(VBUS_OFF);
  892. //set_pin((struct drv_hw_context *)&dev_context->drv_context
  893. // ,LED_SINK_VBUS_ON
  894. // ,GPIO_LED_ON
  895. // );
  896. }else{
  897. mhl_tx_vbus_control(VBUS_ON);
  898. //set_pin((struct drv_hw_context *)&dev_context->drv_context
  899. // ,LED_SINK_VBUS_ON
  900. // ,GPIO_LED_OFF
  901. // );
  902. }
  903. /* Inform interested Apps of the MHL power change */
  904. mhl_event_notify(dev_context, MHL_TX_EVENT_POW_BIT_CHG,
  905. param, NULL);
  906. }
  907. /*
  908. * Check to see if any other bits besides POW_BIT have changed
  909. */
  910. devcap_changes.mdc.deviceCategory &= ~MHL_DEV_CATEGORY_POW_BIT;
  911. temp = 0;
  912. for (i = 0; i < sizeof(devcap_changes);++i){
  913. temp |= devcap_changes.devcap_cache[i];
  914. }
  915. if (temp){
  916. if (dev_context->misc_flags.flags.mhl_hpd) {
  917. MHL_TX_DBG_INFO(dev_context, "Have HPD\n");
  918. si_mhl_tx_initiate_edid_sequence(dev_context->edid_parser_context);
  919. } else {
  920. MHL_TX_DBG_INFO(dev_context, "No HPD\n");
  921. }
  922. }
  923. /* indicate that the DEVCAP cache is up to date. */
  924. dev_context->misc_flags.flags.have_complete_devcap = true;
  925. }
  926. } else if (MHL_READ_EDID_BLOCK == req->command) {
  927. si_mhl_tx_drive_states(dev_context); // TODO: FD, TBC, may need to check
  928. /*
  929. if (0 == data1) {
  930. si_mhl_tx_handle_atomic_hw_edid_read_complete( dev_context->edid_parser_context, req);
  931. dev_context->edid_parse_done = true; // TODO: FD, TBC, check carefully
  932. }
  933. dev_context->misc_flags.flags.edid_loop_active = 0;
  934. */
  935. MHL_TX_DBG_INFO(dev_context, "tag: EDID active: %d\n"
  936. ,dev_context->misc_flags.flags.edid_loop_active);
  937. // EDID read need to be done in SW TPI mode, as it is done now, switch back to default TPI mode: HW TPI mode
  938. // si_mhl_tx_drv_set_hw_tpi_mode((struct drv_hw_context *) (&dev_context->drv_context), true );
  939. } else if (MHL_WRITE_STAT == req->command) {
  940. MHL_TX_DBG_INFO(dev_context, "WRITE_STAT miscFlags: %08X\n\n",
  941. dev_context->misc_flags.as_uint32);
  942. if (MHL_STATUS_REG_CONNECTED_RDY == req->reg) {
  943. if (MHL_STATUS_DCAP_RDY & req->reg_data) {
  944. dev_context->misc_flags.flags.sent_dcap_rdy = true;
  945. MHL_TX_DBG_INFO(dev_context, "\n\nSent DCAP_RDY\n");
  946. si_mhl_tx_set_int(dev_context, MHL_RCHANGE_INT,
  947. MHL_INT_DCAP_CHG, 0);
  948. }
  949. } else if (MHL_STATUS_REG_LINK_MODE == req->reg) {
  950. if ( MHL_STATUS_PATH_ENABLED & req->reg_data) {
  951. dev_context->misc_flags.flags.sent_path_en = true;
  952. MHL_TX_DBG_INFO(dev_context, "FLAGS_SENT_PATH_EN\n");
  953. }
  954. }
  955. } else if (MHL_MSC_MSG == req->command) {
  956. // TODO: FD, TBI, seems there is no chance to get to here... This function will only be called after DRV_INTR_FLAG_MSC_DONE...
  957. if (dev_context->intr_info.flags & DRV_INTR_FLAG_MSC_NAK) {
  958. msleep(1000);
  959. MHL_TX_DBG_INFO(dev_context, "MSC_NAK, re-trying... \n");
  960. /*
  961. * Request must be retried, so place it back
  962. * on the front of the queue.
  963. */
  964. req->status.as_uint8 = 0;
  965. queue_priority_cbus_transaction(dev_context, req);
  966. req = NULL;
  967. } else {
  968. if (MHL_MSC_MSG_RCPE == req->msg_data[0]) {
  969. /*
  970. * RCPE is always followed by an RCPK with original
  971. * key code received.
  972. */
  973. si_mhl_tx_rcpk_send(dev_context, dev_context->msc_save_rcp_key_code);
  974. } else if (MHL_MSC_MSG_UCPE == req->msg_data[0]) {
  975. /*
  976. * UCPE is always followed by an UCPK with original
  977. * key code received.
  978. */
  979. si_mhl_tx_ucpk_send(dev_context, dev_context->msc_save_ucp_key_code);
  980. } else {
  981. MHL_TX_DBG_INFO(dev_context, "default\n" \
  982. "\tcommand: 0x%02X \n" \
  983. "\tmsg_data: 0x%02X " \
  984. "msc_msg_last_data: 0x%02X\n",
  985. req->command,
  986. req->msg_data[0],
  987. dev_context->msc_msg_last_data);
  988. }
  989. }
  990. } else if (MHL_WRITE_BURST == req->command) {
  991. MHL_TX_DBG_INFO(dev_context, "MHL_WRITE_BURST\n");
  992. /*
  993. * Write to scratch pad of downstream device is complete.
  994. * Send a SET_INT message to the device to inform it of the
  995. * completion. Use priority 0 to place this message at the
  996. * head of the queue.
  997. */
  998. si_mhl_tx_set_int(dev_context, MHL_RCHANGE_INT,
  999. MHL_INT_DSCR_CHG, 0);
  1000. } else if (MHL_SET_INT == req->command) {
  1001. MHL_TX_DBG_INFO(dev_context, "MHL_SET_INT\n");
  1002. if (MHL_RCHANGE_INT == req->reg) {
  1003. MHL_TX_DBG_INFO(dev_context, "\n\nSent MHL_RCHANGE_INT\n");
  1004. if (MHL_INT_DSCR_CHG == req->reg_data) {
  1005. MHL_TX_DBG_INFO(dev_context, "MHL_INT_DSCR_CHG\n");
  1006. dev_context->misc_flags.flags.scratchpad_busy = false;
  1007. } else if (MHL_INT_REQ_WRT == req->reg_data) {
  1008. /*
  1009. * Successfully sent scratch pad write request.
  1010. * Now reformat the command queue entry used to send the
  1011. * write request to send the write burst data once a
  1012. * write grant interrupt is received.
  1013. */
  1014. req->retry_count = 1;
  1015. req->command = MHL_WRITE_BURST;
  1016. queue_priority_cbus_transaction(dev_context, req);
  1017. req = NULL;
  1018. }
  1019. }
  1020. } else {
  1021. MHL_TX_DBG_INFO(dev_context, "default\n"
  1022. "\tcommand: 0x%02X reg: 0x%02x reg_data: 0x%02x "\
  1023. "offset: 0x%02x msg_data[0]: 0x%02x msg_data[1]: 0x%02x\n",
  1024. req->command,
  1025. req->reg, req->reg_data,
  1026. req->offset,
  1027. req->msg_data[0], req->msg_data[1]);
  1028. }
  1029. if (req != NULL)
  1030. return_cbus_queue_entry(dev_context, req);
  1031. if (!(dev_context->misc_flags.flags.rcp_ready)) {
  1032. MHL_TX_DBG_INFO(dev_context, "have(%s) sent(%s %s)\n",
  1033. (dev_context->misc_flags.flags.have_complete_devcap) ?
  1034. "complete DEV_CAP" : "",
  1035. (dev_context->misc_flags.flags.sent_path_en) ?
  1036. "PATH_EN" : "",
  1037. (dev_context->misc_flags.flags.sent_dcap_rdy) ?
  1038. "DCAP_RDY" : "");
  1039. if (dev_context->misc_flags.flags.have_complete_devcap) {
  1040. if (dev_context->misc_flags.flags.sent_path_en) {
  1041. if (dev_context->misc_flags.flags.sent_dcap_rdy) {
  1042. /*
  1043. * Now we can entertain App commands for RCP, UCP, RAP
  1044. */
  1045. dev_context->misc_flags.flags.rcp_ready = true;
  1046. }
  1047. }
  1048. }
  1049. }
  1050. }
  1051. void si_mhl_tx_process_write_burst_data(struct mhl_dev_context *dev_context)
  1052. {
  1053. int ret_val = 0;
  1054. BurstId_e burst_id;
  1055. MHL_TX_DBG_INFO(NULL,"\n");
  1056. // continue else statement to support 3D along with MDT
  1057. ret_val = si_mhl_tx_drv_get_scratch_pad((struct drv_hw_context *)
  1058. (&dev_context->drv_context), 0,
  1059. dev_context->incoming_scratch_pad.asBytes,
  1060. sizeof(dev_context->incoming_scratch_pad));
  1061. if (ret_val < 0) {
  1062. MHL_TX_DBG_INFO(dev_context, "scratch pad failure 0x%x\n",
  1063. ret_val);
  1064. } else {
  1065. burst_id = BURST_ID(dev_context->incoming_scratch_pad.
  1066. videoFormatData.burst_id);
  1067. switch(burst_id) {
  1068. case burst_id_3D_VIC:
  1069. #ifndef CONFIG_MTK_HDMI_3D_SUPPORT
  1070. si_mhl_tx_process_3d_vic_burst(
  1071. dev_context->edid_parser_context,
  1072. &dev_context->incoming_scratch_pad.videoFormatData);
  1073. #endif
  1074. break;
  1075. case burst_id_3D_DTD:
  1076. #ifndef CONFIG_MTK_HDMI_3D_SUPPORT
  1077. si_mhl_tx_process_3d_dtd_burst(
  1078. dev_context->edid_parser_context,
  1079. &dev_context->incoming_scratch_pad.videoFormatData);
  1080. #endif
  1081. break;
  1082. case LOCAL_ADOPTER_ID:
  1083. #ifdef MEDIA_DATA_TUNNEL_SUPPORT //(
  1084. case MHL_TEST_ADOPTER_ID:
  1085. si_mhl_tx_mdt_process_packet(dev_context,(void *)&dev_context->incoming_scratch_pad.asBytes);
  1086. #else //)(
  1087. // Set flag to 'unread' before the notification to APP level
  1088. dev_context->scratch_pad_read_done = false;
  1089. /*
  1090. * Cause a notification event to be raised to allow
  1091. * interested applications a chance to process the
  1092. * received write burst data.
  1093. */
  1094. mhl_event_notify(dev_context, MHL_TX_EVENT_SPAD_RECEIVED,
  1095. sizeof(dev_context->incoming_scratch_pad),
  1096. dev_context->incoming_scratch_pad.asBytes);
  1097. #endif //)
  1098. break;
  1099. default:
  1100. MHL_TX_DBG_INFO(dev_context, "Dropping write burst with "\
  1101. "invalid adopter id: 0x%04x\n", burst_id);
  1102. break;
  1103. }
  1104. }
  1105. }
  1106. void si_mhl_tx_set_pp_link(struct mhl_dev_context *dev_context, uint8_t value)
  1107. {
  1108. bool status = false;
  1109. status = si_mhl_tx_set_status(dev_context, MHL_STATUS_REG_LINK_MODE, value, 0);
  1110. if (status)
  1111. {
  1112. si_mhl_tx_drive_states(dev_context);
  1113. }
  1114. }
  1115. static bool si_mhl_tx_set_path_en(struct mhl_dev_context *dev_context)
  1116. {
  1117. MHL_TX_DBG_INFO(dev_context, "called\n");
  1118. si_mhl_tx_drv_enable_video_path((struct drv_hw_context *) (&dev_context->drv_context)); // TODO: FD, TBI, check references of this function for details
  1119. dev_context->link_mode |= MHL_STATUS_PATH_ENABLED;
  1120. return si_mhl_tx_set_status(dev_context, MHL_STATUS_REG_LINK_MODE, dev_context->link_mode, 1);
  1121. }
  1122. static bool si_mhl_tx_clr_path_en(struct mhl_dev_context *dev_context)
  1123. {
  1124. MHL_TX_DBG_ERR(dev_context, "called\n");
  1125. si_mhl_tx_drv_disable_video_path((struct drv_hw_context *) (&dev_context->drv_context));
  1126. dev_context->link_mode &= ~MHL_STATUS_PATH_ENABLED;
  1127. return si_mhl_tx_set_status(dev_context, MHL_STATUS_REG_LINK_MODE, dev_context->link_mode, 1);
  1128. }
  1129. static void si_mhl_tx_refresh_peer_devcap_entries(
  1130. struct mhl_dev_context *dev_context)
  1131. {
  1132. if (MHL_STATUS_DCAP_RDY & dev_context->status_0) {
  1133. MHL_TX_DBG_INFO(dev_context, "DCAP_RDY DEVCAP: %s\n"
  1134. ,dev_context->misc_flags.flags.have_complete_devcap
  1135. ?"current":"stale");
  1136. dev_context->misc_flags.flags.have_complete_devcap = false;//SET the para to default;
  1137. // TODO: FD, TBC, begin
  1138. // bugzilla 27431 - dongle power cord attachment fix. if (!dev_context->misc_flags.flags.have_complete_devcap)
  1139. {
  1140. MHL_TX_DBG_INFO(dev_context, "devcap is stale\n");
  1141. /*
  1142. * If there is a DEV CAP read operation in progress
  1143. * cancel it and issue a new DEV CAP read to make sure
  1144. * we pick up all the DEV CAP register changes.
  1145. */
  1146. if (dev_context->current_cbus_req != NULL) {
  1147. if (dev_context->current_cbus_req->command == MHL_READ_DEVCAP) {
  1148. dev_context->current_cbus_req->status.flags.cancel = true;
  1149. }
  1150. }
  1151. // TODO: FD, TBC, to read_devcap
  1152. dev_context->dev_cap_cache_index = 0;
  1153. si_mhl_tx_read_devcap(dev_context, dev_context->dev_cap_cache_index);
  1154. }
  1155. // TODO: FD, TBC, end
  1156. } else {
  1157. MHL_TX_DBG_INFO(dev_context, "Can't read DEV CAP registers, DCAP_RDY not set yet\n");
  1158. }
  1159. }
  1160. /*
  1161. * si_mhl_tx_got_mhl_msc_message
  1162. *
  1163. * This function is called by the driver to inform of arrival of a MHL MSC_MSG
  1164. * such as RCP, RCPK, RCPE.
  1165. */
  1166. //void si_mhl_tx_got_mhl_msc_message(struct mhl_dev_context *dev_context,
  1167. // uint8_t sub_command, uint8_t cmd_data)
  1168. //{
  1169. //
  1170. // /* Remember the event for processing at the completion of the interrupt. */
  1171. // dev_context->msc_msg_arrived = true;
  1172. // dev_context->msc_msg_sub_command = sub_command;
  1173. // dev_context->msc_msg_data = cmd_data;
  1174. //}
  1175. /*
  1176. * si_mhl_tx_got_mhl_intr
  1177. *
  1178. * This function is called to inform of the arrival
  1179. * of an MHL INTERRUPT message.
  1180. */
  1181. void si_mhl_tx_got_mhl_intr(struct mhl_dev_context *dev_context, uint8_t intr_0, uint8_t intr_1)
  1182. {
  1183. MHL_TX_DBG_INFO(dev_context, "INTERRUPT Arrived. %02X, %02X\n",
  1184. intr_0, intr_1);
  1185. /* Handle DCAP_CHG INTR here */
  1186. if (MHL_INT_DCAP_CHG & intr_0) {
  1187. MHL_TX_DBG_INFO(dev_context, "got DCAP_CHG\n");
  1188. if (MHL_STATUS_DCAP_RDY & dev_context->status_0) {
  1189. MHL_TX_DBG_INFO(dev_context, "got DCAP_CHG & DCAP_RDY\n");
  1190. si_mhl_tx_refresh_peer_devcap_entries(dev_context);
  1191. }
  1192. }
  1193. if (MHL_INT_DSCR_CHG & intr_0) {
  1194. /* remote WRITE_BURST is complete */
  1195. dev_context->misc_flags.flags.scratchpad_busy = false;
  1196. si_mhl_tx_process_write_burst_data(dev_context);
  1197. }
  1198. if( MHL_INT_REQ_WRT & intr_0) {
  1199. /* Scratch pad write request from the sink device. */
  1200. /*
  1201. * Also need to consider the 'read' status of scratch pad from APP level
  1202. */
  1203. if (dev_context->misc_flags.flags.scratchpad_busy || !dev_context->scratch_pad_read_done) {
  1204. /*
  1205. * Use priority 1 to defer sending grant until
  1206. * local traffic is done
  1207. */
  1208. si_mhl_tx_set_int(dev_context, MHL_RCHANGE_INT,
  1209. MHL_INT_GRT_WRT, 1);
  1210. } else {
  1211. dev_context->misc_flags.flags.scratchpad_busy = true;
  1212. /* use priority 0 to respond immediately */
  1213. si_mhl_tx_set_int(dev_context, MHL_RCHANGE_INT,
  1214. MHL_INT_GRT_WRT, 0);
  1215. }
  1216. }
  1217. if (MHL_INT_GRT_WRT & intr_0) {
  1218. /*
  1219. * Write burst grant received so enable
  1220. * write burst message to be sent.
  1221. */
  1222. dev_context->misc_flags.flags.write_burst_pending = false;
  1223. }
  1224. // uint8_t length = sizeof(dev_context->outgoing_scratch_pad);
  1225. // MHL_TX_DBG_INFO(dev_context, "MHL_INT_GRT_WRT length:%d\n",
  1226. // length);
  1227. // si_mhl_tx_do_write_burst(dev_context, 0x40,
  1228. // dev_context->outgoing_scratch_pad.asBytes,
  1229. // length);
  1230. // }
  1231. if(MHL_INT_EDID_CHG & intr_1) {
  1232. MHL_TX_DBG_INFO(dev_context, "MHL_INT_EDID_CHG\n");
  1233. si_edid_reset(dev_context->edid_parser_context);
  1234. if (dev_context->misc_flags.flags.have_complete_devcap) {
  1235. if (dev_context->misc_flags.flags.mhl_hpd){
  1236. MHL_TX_DBG_INFO(dev_context, "tag: EDID_CHG\n");
  1237. si_mhl_tx_initiate_edid_sequence(dev_context->edid_parser_context);
  1238. }
  1239. } else {
  1240. MHL_TX_DBG_INFO(dev_context, "refreshing DEVCAP");
  1241. si_mhl_tx_refresh_peer_devcap_entries(dev_context);
  1242. }
  1243. }
  1244. }
  1245. /*
  1246. * si_mhl_tx_got_mhl_status
  1247. *
  1248. * This function is called by the driver to inform of arrival of a MHL STATUS.
  1249. */
  1250. void si_mhl_tx_got_mhl_status(struct mhl_dev_context *dev_context, uint8_t status_0, uint8_t status_1)
  1251. {
  1252. uint8_t status_change_bit_mask_0;
  1253. uint8_t status_change_bit_mask_1;
  1254. MHL_TX_DBG_INFO(dev_context, "STATUS Arrived. %02X, %02X\n",
  1255. status_0, status_1);
  1256. /*
  1257. * Handle DCAP_RDY STATUS here itself
  1258. */
  1259. status_change_bit_mask_0 = status_0 ^ dev_context->status_0;
  1260. status_change_bit_mask_1 = status_1 ^ dev_context->status_1;
  1261. /*
  1262. * Remember the event. (other code checks the saved values,
  1263. * so save the values early, but not before the XOR operations above)
  1264. */
  1265. dev_context->status_0 = status_0;
  1266. dev_context->status_1 = status_1;
  1267. if(MHL_STATUS_DCAP_RDY & status_change_bit_mask_0) {
  1268. MHL_TX_DBG_INFO(dev_context, "DCAP_RDY changed\n");
  1269. if (MHL_STATUS_DCAP_RDY & status_0)
  1270. si_mhl_tx_refresh_peer_devcap_entries(dev_context);
  1271. }
  1272. /* did PATH_EN change? */
  1273. if(MHL_STATUS_PATH_ENABLED & status_change_bit_mask_1) {
  1274. MHL_TX_DBG_INFO(dev_context, "PATH_EN changed\n");
  1275. if(MHL_STATUS_PATH_ENABLED & status_1)
  1276. si_mhl_tx_set_path_en(dev_context);
  1277. else
  1278. si_mhl_tx_clr_path_en(dev_context);
  1279. }
  1280. }
  1281. /*
  1282. * si_mhl_tx_rcp_send
  1283. *
  1284. * This function checks if the peer device supports RCP and sends rcpKeyCode. The
  1285. * function will return a value of true if it could successfully send the RCP
  1286. * subcommand and the key code. Otherwise false.
  1287. *
  1288. */
  1289. bool si_mhl_tx_rcp_send(struct mhl_dev_context *dev_context, uint8_t rcpKeyCode)
  1290. {
  1291. bool status;
  1292. MHL_TX_DBG_INFO(dev_context, "called\n");
  1293. /*
  1294. * Make sure peer supports RCP
  1295. */
  1296. if ((dev_context->dev_cap_cache.mdc.featureFlag & MHL_FEATURE_RCP_SUPPORT) &&
  1297. (dev_context->misc_flags.flags.rcp_ready)) {
  1298. status = si_mhl_tx_send_msc_msg (dev_context, MHL_MSC_MSG_RCP, rcpKeyCode);
  1299. if(status)
  1300. si_mhl_tx_drive_states(dev_context);
  1301. } else {
  1302. MHL_TX_DBG_ERR(dev_context, "failed\n");
  1303. status = false;
  1304. }
  1305. return status;
  1306. }
  1307. /*
  1308. * si_ucp_msg_send
  1309. *
  1310. * This function sends the requested UCP message if UCP reception is
  1311. * supported by the downstream device.
  1312. *
  1313. * The function returns true if the message can be sent, false otherise.
  1314. */
  1315. bool si_ucp_msg_send(struct mhl_dev_context *dev_context,
  1316. uint8_t ucp_msg_sub_cmd, uint8_t ucp_msg_data)
  1317. {
  1318. bool status;
  1319. MHL_TX_DBG_INFO(dev_context, "called\n");
  1320. /*
  1321. * Make sure peer supports UCP and that the connection is
  1322. * in a state where a UCP message can be sent.
  1323. */
  1324. if ((dev_context->dev_cap_cache.mdc.featureFlag & MHL_FEATURE_UCP_RECV_SUPPORT) &&
  1325. (dev_context->misc_flags.flags.rcp_ready)) {
  1326. status = si_mhl_tx_send_msc_msg(dev_context, ucp_msg_sub_cmd, ucp_msg_data);
  1327. if (status) {
  1328. si_mhl_tx_drive_states(dev_context);
  1329. }
  1330. } else {
  1331. MHL_TX_DBG_ERR(dev_context, "failed\n");
  1332. status = false;
  1333. }
  1334. return status;
  1335. }
  1336. /*
  1337. * si_mhl_tx_ucp_send
  1338. *
  1339. * This function is (indirectly) called by a host application to send
  1340. * a UCP key code to the downstream device.
  1341. *
  1342. * Returns true if the key code can be sent, false otherwise.
  1343. */
  1344. bool si_mhl_tx_ucp_send(struct mhl_dev_context *dev_context,
  1345. uint8_t ucp_key_code)
  1346. {
  1347. MHL_TX_DBG_INFO(dev_context, "called key code: 0x%02x\n", ucp_key_code);
  1348. return (si_ucp_msg_send(dev_context, MHL_MSC_MSG_UCP, ucp_key_code));
  1349. }
  1350. /*
  1351. * si_mhl_tx_ucp_send
  1352. *
  1353. * This function is (indirectly) called by a host application to send
  1354. * a UCP acknowledge message for a received UCP key code message.
  1355. *
  1356. * Returns true if the message can be sent, false otherwise.
  1357. */
  1358. bool si_mhl_tx_ucpk_send(struct mhl_dev_context *dev_context,
  1359. uint8_t ucp_key_code)
  1360. {
  1361. MHL_TX_DBG_INFO(dev_context, "called key code: 0x%02x\n", ucp_key_code);
  1362. return (si_ucp_msg_send(dev_context, MHL_MSC_MSG_UCPK, ucp_key_code));
  1363. }
  1364. /*
  1365. * si_mhl_tx_ucpe_send
  1366. *
  1367. * This function is (indirectly) called by a host application to send a
  1368. * UCP negative acknowledgment message for a received UCP key code message.
  1369. *
  1370. * Returns true if the message can be sent, false otherwise.
  1371. *
  1372. * When successful, mhl_tx internally sends UCPK with original (last known)
  1373. * UCP keycode.
  1374. */
  1375. bool si_mhl_tx_ucpe_send(struct mhl_dev_context *dev_context,
  1376. uint8_t ucpe_error_code)
  1377. {
  1378. MHL_TX_DBG_INFO(dev_context, "called\n");
  1379. return (si_ucp_msg_send(dev_context, MHL_MSC_MSG_UCPE, ucpe_error_code));
  1380. }
  1381. /*
  1382. * si_mhl_tx_rap_send
  1383. *
  1384. * This function sends the requested RAP action code message if RAP
  1385. * is supported by the downstream device.
  1386. *
  1387. * The function returns true if the message can be sent, false otherwise.
  1388. */
  1389. bool si_mhl_tx_rap_send(struct mhl_dev_context *dev_context,
  1390. uint8_t rap_action_code)
  1391. {
  1392. bool status;
  1393. MHL_TX_DBG_INFO(dev_context, "called\n");
  1394. /*
  1395. * Make sure peer supports RAP and that the connection is
  1396. * in a state where a RAP message can be sent.
  1397. */
  1398. if ((dev_context->dev_cap_cache.mdc.featureFlag & MHL_FEATURE_RAP_SUPPORT) &&
  1399. (dev_context->misc_flags.flags.rcp_ready)) {
  1400. status = si_mhl_tx_send_msc_msg(dev_context, MHL_MSC_MSG_RAP, rap_action_code);
  1401. if (status) {
  1402. si_mhl_tx_drive_states(dev_context);
  1403. }
  1404. } else {
  1405. MHL_TX_DBG_ERR(dev_context, "failed\n");
  1406. status = false;
  1407. }
  1408. return status;
  1409. }
  1410. /*
  1411. * si_mhl_tx_notify_downstream_hpd_change
  1412. *
  1413. * Handle the arrival of SET_HPD or CLEAR_HPD messages.
  1414. *
  1415. * Turn the content off or on based on what we got.
  1416. */
  1417. void si_mhl_tx_notify_downstream_hpd_change( struct mhl_dev_context *dev_context, uint8_t downstream_hpd)
  1418. {
  1419. MHL_TX_DBG_INFO(dev_context, "HPD = %s\n",
  1420. downstream_hpd ? "HIGH" : "LOW");
  1421. if (0 == downstream_hpd) {
  1422. struct cbus_req *req=dev_context->current_cbus_req;
  1423. dev_context->misc_flags.flags.mhl_hpd = false;
  1424. mhl_tx_stop_timer(dev_context, dev_context->cbus_dpi_timer);
  1425. mhl_event_notify(dev_context, MHL_TX_EVENT_HPD_CLEAR, 0, NULL);
  1426. if (req) {
  1427. if (MHL_READ_EDID_BLOCK == req->command){
  1428. return_cbus_queue_entry(dev_context, req);
  1429. dev_context->current_cbus_req = NULL;
  1430. /*dev_context->misc_flags.flags.edid_loop_active = 0;
  1431. MHL_TX_DBG_INFO(dev_context, "tag: EDID active: %d\n"
  1432. ,dev_context->misc_flags.flags.edid_loop_active);*/
  1433. }
  1434. }
  1435. dev_context->misc_flags.flags.edid_loop_active = 0;
  1436. MHL_TX_DBG_INFO(dev_context, "tag: EDID active: %d\n"
  1437. ,dev_context->misc_flags.flags.edid_loop_active);
  1438. si_edid_reset(dev_context->edid_parser_context);
  1439. } else {
  1440. dev_context->misc_flags.flags.mhl_hpd = true;
  1441. process_dpi(dev_context);
  1442. /*
  1443. * possible EDID read is complete here
  1444. * see MHL spec section 5.9.1
  1445. */
  1446. if (dev_context->misc_flags.flags.have_complete_devcap) {
  1447. /* Devcap refresh is complete */
  1448. MHL_TX_DBG_INFO(dev_context, "tag:\n");
  1449. si_mhl_tx_initiate_edid_sequence(dev_context->edid_parser_context);
  1450. } else {
  1451. si_mhl_tx_refresh_peer_devcap_entries(dev_context);
  1452. }
  1453. }
  1454. }
  1455. /*
  1456. * si_mhl_tx_get_peer_dev_cap_entry
  1457. *
  1458. * index -- the devcap index to get
  1459. * *data pointer to location to write data
  1460. *
  1461. * returns
  1462. * 0 -- success
  1463. * 1 -- busy.
  1464. */
  1465. uint8_t si_mhl_tx_get_peer_dev_cap_entry(struct mhl_dev_context *dev_context,
  1466. uint8_t index, uint8_t *data)
  1467. {
  1468. if (!dev_context->misc_flags.flags.have_complete_devcap) {
  1469. /* update is in progress */
  1470. return 1;
  1471. } else {
  1472. *data = dev_context->dev_cap_cache.devcap_cache[index];
  1473. return 0;
  1474. }
  1475. }
  1476. /*
  1477. si_get_scratch_pad_vector
  1478. offset -- The beginning offset into the scratch pad from which to fetch entries.
  1479. length -- The number of entries to fetch
  1480. *data -- A pointer to an array of bytes where the data should be placed.
  1481. returns:
  1482. scratch_pad_status see si_mhl_tx_api.h for details
  1483. */
  1484. enum scratch_pad_status si_get_scratch_pad_vector(
  1485. struct mhl_dev_context *dev_context,
  1486. uint8_t offset,uint8_t length,
  1487. uint8_t *data)
  1488. {
  1489. if (!(dev_context->dev_cap_cache.mdc.featureFlag
  1490. & MHL_FEATURE_SP_SUPPORT)) {
  1491. MHL_TX_DBG_INFO(dev_context, "failed SCRATCHPAD_NOT_SUPPORTED\n");
  1492. return SCRATCHPAD_NOT_SUPPORTED;
  1493. } else if (dev_context->misc_flags.flags.scratchpad_busy) {
  1494. return SCRATCHPAD_BUSY;
  1495. } else if ((offset >= sizeof(dev_context->incoming_scratch_pad)) ||
  1496. (length > (sizeof(dev_context->incoming_scratch_pad)- offset))) {
  1497. return SCRATCHPAD_BAD_PARAM;
  1498. } else {
  1499. uint8_t *scratch_pad = dev_context->incoming_scratch_pad.asBytes;
  1500. scratch_pad += offset;
  1501. memcpy(data, scratch_pad, length);
  1502. // Set flag to 'read' after data is retrieved by APP level
  1503. dev_context->scratch_pad_read_done = true;
  1504. }
  1505. return SCRATCHPAD_SUCCESS;
  1506. }
  1507. #ifdef ENABLE_DUMP_INFOFRAME //(
  1508. #define AT_ROW_END(i,length) (i & (length-1)) == (length-1)
  1509. void DumpIncomingInfoFrameImpl(char *pszId,char *pszFile,int iLine,info_frame_t *pInfoFrame,uint8_t length)
  1510. {
  1511. uint8_t j;
  1512. uint8_t *pData = (uint8_t *)pInfoFrame;
  1513. pr_debug("mhl_tx infoframe: %s: length:0x%02x -- ",pszId,length);
  1514. for (j = 0; j < length; j++)
  1515. {
  1516. pr_debug("%02X ", pData[j]);
  1517. if (AT_ROW_END(j,32))
  1518. {
  1519. pr_debug("\n");
  1520. }
  1521. }
  1522. pr_debug("\n");
  1523. }
  1524. #endif //)
  1525. void *si_mhl_tx_get_drv_context(void *context)
  1526. {
  1527. struct mhl_dev_context *dev_context = context;
  1528. if (dev_context->signature == MHL_DEV_CONTEXT_SIGNATURE) {
  1529. return &dev_context->drv_context;
  1530. } else {
  1531. return context;
  1532. }
  1533. }
  1534. uint8_t si_get_peer_mhl_version(void *dev_context)
  1535. {
  1536. struct mhl_dev_context *dev_context_ptr = (struct mhl_dev_context *)dev_context;
  1537. uint8_t ret_val = dev_context_ptr->dev_cap_cache.mdc.mhl_version;
  1538. MHL_TX_DBG_INFO(dev_context_ptr, "0x%02x\n", ret_val);
  1539. return ret_val;
  1540. }
  1541. int si_peer_supports_packed_pixel(void *dev_context)
  1542. {
  1543. struct mhl_dev_context *dev_context_ptr =
  1544. (struct mhl_dev_context *)dev_context;
  1545. return PACKED_PIXEL_AVAILABLE(dev_context_ptr);
  1546. }
  1547. int si_mhl_tx_shutdown(struct mhl_dev_context *dev_context)
  1548. {
  1549. MHL_TX_DBG_ERR(dev_context, "SiI8348 may continue to output video. Driver features and APIs will not work.\n");
  1550. si_mhl_tx_drv_shutdown((struct drv_hw_context *)&dev_context->drv_context);
  1551. return 0;
  1552. }