xhci-ssusb-mtk.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. /*
  2. * Copyright (c) 2015 MediaTek Inc.
  3. * Author: Chiachun.wang <chiachun.wang@mediatek.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/slab.h>
  17. #include "xhci.h"
  18. #include "xhci-ssusb-mtk.h"
  19. #define SCH_SUCCESS 1
  20. #define SCH_FAIL 0
  21. #define MAX_PORT_NUM 4
  22. #define SS_BW_BOUND 51000
  23. #define HS_BW_BOUND 6144
  24. /* mtk scheduler bitmasks */
  25. #define EP_BPKTS(p) ((p) & 0x3f)
  26. #define EP_BCSCOUNT(p) (((p) & 0x7) << 8)
  27. #define EP_BBM(p) ((p) << 11)
  28. #define EP_BOFFSET(p) ((p) & 0x3fff)
  29. #define EP_BREPEAT(p) (((p) & 0x7fff) << 16)
  30. static int add_sch_ep(struct sch_ep *sep, struct sch_port *sport)
  31. {
  32. struct sch_ep **ep_array;
  33. int speed = sep->speed;
  34. int ep_type = sep->ep_type;
  35. int i;
  36. if (sep->is_in && speed == USB_SPEED_SUPER)
  37. ep_array = sport->ss_in_eps;
  38. else if (speed == USB_SPEED_SUPER)
  39. ep_array = sport->ss_out_eps;
  40. else if (speed == USB_SPEED_HIGH ||
  41. (sep->is_tt && ep_type == USB_ENDPOINT_XFER_ISOC))
  42. ep_array = sport->hs_eps;
  43. else
  44. ep_array = sport->tt_intr_eps;
  45. for (i = 0; i < MAX_EP_NUM; i++) {
  46. if (ep_array[i] == NULL) {
  47. ep_array[i] = sep;
  48. return SCH_SUCCESS;
  49. }
  50. }
  51. return SCH_FAIL;
  52. }
  53. static int need_more_bw_cost(int old_interval, int old_offset,
  54. int new_interval, int new_offset)
  55. {
  56. int tmp_offset;
  57. int tmp_interval;
  58. int ret = 0;
  59. if (old_interval >= new_interval) {
  60. tmp_offset = old_offset + old_interval - new_offset;
  61. tmp_interval = new_interval;
  62. } else {
  63. tmp_offset = new_offset + new_interval - old_offset;
  64. tmp_interval = old_interval;
  65. }
  66. if (tmp_offset % tmp_interval == 0)
  67. ret = 1;
  68. return ret;
  69. }
  70. static int get_bw_cost(struct sch_ep *sep, int interval, int offset)
  71. {
  72. int ep_offset;
  73. int ep_interval;
  74. int ep_repeat;
  75. int ep_mult;
  76. int m_offset;
  77. int k;
  78. int bw_cost = 0;
  79. ep_interval = sep->interval;
  80. ep_offset = sep->offset;
  81. if (sep->repeat == 0) {
  82. if (need_more_bw_cost(ep_interval, ep_offset, interval, offset))
  83. bw_cost = sep->bw_cost;
  84. } else {
  85. ep_repeat = sep->repeat;
  86. ep_mult = sep->mult;
  87. for (k = 0; k <= ep_mult; k++) {
  88. m_offset = ep_offset + (k * ep_repeat);
  89. if (need_more_bw_cost(ep_interval, m_offset,
  90. interval, offset)) {
  91. bw_cost = sep->bw_cost;
  92. break;
  93. }
  94. }
  95. }
  96. return bw_cost;
  97. }
  98. static int count_ss_bw_single(struct sch_ep **ep_array, int interval,
  99. int offset, int td_size)
  100. {
  101. struct sch_ep *cur_sep;
  102. int bw_required;
  103. int i;
  104. bw_required = 0;
  105. for (i = 0; i < MAX_EP_NUM; i++) {
  106. cur_sep = ep_array[i];
  107. if (cur_sep == NULL)
  108. continue;
  109. bw_required += get_bw_cost(cur_sep, interval, offset);
  110. }
  111. bw_required += td_size;
  112. return bw_required;
  113. }
  114. static int count_ss_bw_repeat(struct sch_ep **ep_array, int maxp,
  115. int interval, int burst, int mult, int offset, int repeat)
  116. {
  117. int bw_required_per_repeat;
  118. int final_bw_required;
  119. int tmp_bw_required;
  120. int bw_required[3] = {0, 0, 0};
  121. struct sch_ep *cur_sep;
  122. int cur_offset;
  123. int i, j;
  124. bw_required_per_repeat = maxp * (burst + 1);
  125. for (j = 0; j <= mult; j++) {
  126. tmp_bw_required = 0;
  127. cur_offset = offset + (j * repeat);
  128. for (i = 0; i < MAX_EP_NUM; i++) {
  129. cur_sep = ep_array[i];
  130. if (cur_sep == NULL)
  131. continue;
  132. tmp_bw_required +=
  133. get_bw_cost(cur_sep, interval, cur_offset);
  134. }
  135. bw_required[j] = tmp_bw_required;
  136. }
  137. final_bw_required = SS_BW_BOUND;
  138. for (j = 0; j <= mult; j++) {
  139. if (bw_required[j] < final_bw_required)
  140. final_bw_required = bw_required[j];
  141. }
  142. final_bw_required += bw_required_per_repeat;
  143. return final_bw_required;
  144. }
  145. static int count_ss_bw(struct sch_port *sport, struct sch_ep *sep,
  146. int offset, int repeat, int td_size)
  147. {
  148. struct sch_ep **ep_array;
  149. int interval = sep->interval;
  150. int bw_cost;
  151. ep_array = sep->is_in ? sport->ss_in_eps : sport->ss_out_eps;
  152. if (repeat) {
  153. bw_cost = count_ss_bw_repeat(ep_array, sep->maxp, interval,
  154. sep->burst, sep->mult, offset, repeat);
  155. } else
  156. bw_cost = count_ss_bw_single(ep_array, interval,
  157. offset, td_size);
  158. return bw_cost;
  159. }
  160. static int is_isoc_tt_mframe_overlap(struct sch_ep *sep,
  161. int interval, int offset)
  162. {
  163. int ep_offset = sep->offset;
  164. int ep_interval = sep->interval << 3;
  165. int tmp_offset;
  166. int tmp_interval;
  167. int is_overlap = 0;
  168. if (ep_interval >= interval) {
  169. tmp_offset = ep_offset + ep_interval - offset;
  170. tmp_interval = interval;
  171. } else {
  172. tmp_offset = offset + interval - ep_offset;
  173. tmp_interval = ep_interval;
  174. }
  175. if (sep->is_in) {
  176. if ((tmp_offset % tmp_interval >= 2)
  177. && (tmp_offset % tmp_interval <= sep->cs_count)) {
  178. is_overlap = 1;
  179. }
  180. } else {
  181. if (tmp_offset % tmp_interval <= sep->cs_count)
  182. is_overlap = 1;
  183. }
  184. return is_overlap;
  185. }
  186. static int count_hs_bw(struct sch_port *sport, int ep_type, int maxp,
  187. int interval, int offset, int td_size)
  188. {
  189. int i;
  190. int bw_required;
  191. struct sch_ep *cur_sep;
  192. int ep_offset;
  193. int ep_interval;
  194. bw_required = 0;
  195. for (i = 0; i < MAX_EP_NUM; i++) {
  196. cur_sep = sport->hs_eps[i];
  197. if (cur_sep == NULL)
  198. continue;
  199. ep_offset = cur_sep->offset;
  200. ep_interval = cur_sep->interval;
  201. if (cur_sep->is_tt &&
  202. (cur_sep->ep_type == USB_ENDPOINT_XFER_ISOC)) {
  203. if (is_isoc_tt_mframe_overlap(cur_sep,
  204. interval, offset))
  205. bw_required += 188;
  206. } else {
  207. if (need_more_bw_cost(ep_interval, ep_offset,
  208. interval, offset))
  209. bw_required += cur_sep->bw_cost;
  210. }
  211. }
  212. bw_required += td_size;
  213. return bw_required;
  214. }
  215. static int count_tt_isoc_bw(int is_in, int maxp, int interval,
  216. int offset, int td_size, struct sch_port *sport)
  217. {
  218. char is_cs;
  219. int s_frame, s_mframe, cur_mframe;
  220. int bw_required, max_bw;
  221. int ss_cs_count;
  222. int cs_mframe;
  223. int i, j;
  224. struct sch_ep *cur_sep;
  225. int ep_offset;
  226. int ep_interval;
  227. int tt_isoc_interval;
  228. tt_isoc_interval = interval << 3; /* frame to mframe */
  229. is_cs = is_in ? 1 : 0;
  230. s_frame = offset / 8;
  231. s_mframe = offset % 8;
  232. ss_cs_count = (maxp + (188 - 1)) / 188;
  233. if (is_cs) {
  234. cs_mframe = offset % 8 + 2 + ss_cs_count;
  235. if (cs_mframe <= 6)
  236. ss_cs_count += 2;
  237. else if (cs_mframe == 7)
  238. ss_cs_count++;
  239. else if (cs_mframe > 8)
  240. return -1;
  241. }
  242. max_bw = 0;
  243. i = is_in ? 2 : 0;
  244. for (cur_mframe = offset + i; i < ss_cs_count; cur_mframe++, i++) {
  245. bw_required = 0;
  246. for (j = 0; j < MAX_EP_NUM; j++) {
  247. cur_sep = sport->hs_eps[j];
  248. if (cur_sep == NULL)
  249. continue;
  250. ep_offset = cur_sep->offset;
  251. ep_interval = cur_sep->interval;
  252. if (cur_sep->is_tt &&
  253. (cur_sep->ep_type == USB_ENDPOINT_XFER_ISOC)) {
  254. /*
  255. * isoc tt
  256. * check if mframe offset overlap
  257. * if overlap, add 188 to the bw
  258. */
  259. if (is_isoc_tt_mframe_overlap(cur_sep,
  260. tt_isoc_interval, cur_mframe))
  261. bw_required += 188;
  262. } else if (cur_sep->ep_type == USB_ENDPOINT_XFER_INT
  263. || cur_sep->ep_type == USB_ENDPOINT_XFER_ISOC) {
  264. /* check if mframe */
  265. if (need_more_bw_cost(ep_interval, ep_offset,
  266. tt_isoc_interval, cur_mframe))
  267. bw_required += cur_sep->bw_cost;
  268. }
  269. }
  270. bw_required += 188;
  271. if (bw_required > max_bw)
  272. max_bw = bw_required;
  273. }
  274. return max_bw;
  275. }
  276. static int count_tt_intr_bw(int interval, int offset,
  277. struct sch_port *u3h_sch_port)
  278. {
  279. struct sch_ep *cur_sep;
  280. int ep_offset;
  281. int ep_interval;
  282. int i;
  283. /* check all eps in tt_intr_eps */
  284. for (i = 0; i < MAX_EP_NUM; i++) {
  285. cur_sep = u3h_sch_port->tt_intr_eps[i];
  286. if (cur_sep == NULL)
  287. continue;
  288. ep_offset = cur_sep->offset;
  289. ep_interval = cur_sep->interval;
  290. if (need_more_bw_cost(ep_interval, ep_offset, interval, offset))
  291. return SCH_FAIL;
  292. }
  293. return SCH_SUCCESS;
  294. }
  295. static int check_tt_intr_bw(struct sch_ep *sep, struct sch_port *sport)
  296. {
  297. int frame_idx;
  298. int frame_interval;
  299. int interval = sep->interval;
  300. frame_interval = interval >> 3;
  301. for (frame_idx = 0; frame_idx < frame_interval; frame_idx++) {
  302. if (count_tt_intr_bw(frame_interval, frame_idx, sport)
  303. == SCH_SUCCESS) {
  304. sep->offset = frame_idx << 3;
  305. sep->pkts = 1;
  306. sep->cs_count = 3;
  307. sep->bw_cost = sep->maxp;
  308. sep->repeat = 0;
  309. return SCH_SUCCESS;
  310. }
  311. }
  312. return SCH_FAIL;
  313. }
  314. static int check_tt_iso_bw(struct sch_ep *sep, struct sch_port *sport)
  315. {
  316. int cs_count = 0;
  317. int td_size;
  318. int mframe_idx, frame_idx;
  319. int cur_bw, best_bw, best_bw_idx;
  320. int cur_offset, cs_mframe;
  321. int interval;
  322. best_bw = HS_BW_BOUND;
  323. best_bw_idx = -1;
  324. cur_bw = 0;
  325. td_size = sep->maxp;
  326. interval = sep->interval >> 3;
  327. for (frame_idx = 0; frame_idx < interval; frame_idx++) {
  328. for (mframe_idx = 0; mframe_idx < 8; mframe_idx++) {
  329. cur_offset = (frame_idx * 8) + mframe_idx;
  330. cur_bw = count_tt_isoc_bw(sep->is_in, sep->maxp,
  331. interval, cur_offset, td_size, sport);
  332. if (cur_bw >= 0 && cur_bw < best_bw) {
  333. best_bw_idx = cur_offset;
  334. best_bw = cur_bw;
  335. if (cur_bw == td_size ||
  336. cur_bw < (HS_BW_BOUND >> 1))
  337. goto found_best_offset;
  338. }
  339. }
  340. }
  341. if (best_bw_idx == -1)
  342. return SCH_FAIL;
  343. found_best_offset:
  344. sep->offset = best_bw_idx;
  345. sep->pkts = 1;
  346. cs_count = (sep->maxp + (188 - 1)) / 188;
  347. if (sep->is_in) {
  348. cs_mframe = (sep->offset >> 3) + 2 + cs_count;
  349. if (cs_mframe <= 6)
  350. cs_count += 2;
  351. else if (cs_mframe == 7)
  352. cs_count++;
  353. }
  354. sep->cs_count = cs_count;
  355. sep->bw_cost = 188;
  356. sep->repeat = 0;
  357. return SCH_SUCCESS;
  358. }
  359. static int check_hs_bw(struct sch_ep *sep, struct sch_port *sport)
  360. {
  361. int td_size;
  362. int cur_bw, best_bw, best_bw_idx;
  363. int cur_offset;
  364. int interval = sep->interval;
  365. best_bw = HS_BW_BOUND;
  366. best_bw_idx = -1;
  367. cur_bw = 0;
  368. td_size = sep->maxp * (sep->burst + 1);
  369. for (cur_offset = 0; cur_offset < interval; cur_offset++) {
  370. cur_bw = count_hs_bw(sport, sep->ep_type, sep->maxp, interval,
  371. cur_offset, td_size);
  372. if (cur_bw >= 0 && cur_bw < best_bw) {
  373. best_bw_idx = cur_offset;
  374. best_bw = cur_bw;
  375. if (cur_bw == td_size || cur_bw < (HS_BW_BOUND >> 1))
  376. break;
  377. }
  378. }
  379. if (best_bw_idx == -1)
  380. return SCH_FAIL;
  381. sep->offset = best_bw_idx;
  382. sep->pkts = sep->burst + 1;
  383. sep->cs_count = 0;
  384. sep->bw_cost = td_size;
  385. sep->repeat = 0;
  386. return SCH_SUCCESS;
  387. }
  388. static int check_ss_bw(struct sch_ep *sep, struct sch_port *sport)
  389. {
  390. int cur_bw, best_bw, best_bw_idx;
  391. int repeat, max_repeat, best_bw_repeat;
  392. int maxp = sep->maxp;
  393. int interval = sep->interval;
  394. int burst = sep->burst;
  395. int mult = sep->mult;
  396. int frame_idx;
  397. int td_size;
  398. best_bw = SS_BW_BOUND;
  399. best_bw_idx = -1;
  400. cur_bw = 0;
  401. td_size = maxp * (mult + 1) * (burst + 1);
  402. if (mult == 0)
  403. max_repeat = 0;
  404. else
  405. max_repeat = (interval - 1) / (mult + 1);
  406. best_bw_repeat = 0;
  407. for (frame_idx = 0; frame_idx < interval; frame_idx++) {
  408. for (repeat = max_repeat; repeat >= 0; repeat--) {
  409. cur_bw = count_ss_bw(sport, sep,
  410. frame_idx, repeat, td_size);
  411. if (cur_bw >= 0 && cur_bw < best_bw) {
  412. best_bw_idx = frame_idx;
  413. best_bw_repeat = repeat;
  414. best_bw = cur_bw;
  415. if (cur_bw <= td_size ||
  416. cur_bw < (SS_BW_BOUND >> 1))
  417. goto found_best_offset;
  418. }
  419. }
  420. }
  421. if (best_bw_idx == -1)
  422. return SCH_FAIL;
  423. found_best_offset:
  424. sep->offset = best_bw_idx;
  425. sep->cs_count = 0;
  426. sep->repeat = best_bw_repeat;
  427. if (sep->repeat == 0) {
  428. sep->bw_cost = (burst + 1) * (mult + 1) * maxp;
  429. sep->pkts = (burst + 1) * (mult + 1);
  430. } else {
  431. sep->bw_cost = (burst + 1) * maxp;
  432. sep->pkts = (burst + 1);
  433. }
  434. return SCH_SUCCESS;
  435. }
  436. static struct sch_port *xhci_to_sch_port(struct xhci_hcd *xhci, int rh_port)
  437. {
  438. struct sch_port *port_array = (struct sch_port *)xhci->sch_ports;
  439. if (rh_port < 1 || rh_port > MAX_PORT_NUM)
  440. return NULL;
  441. return port_array + (rh_port - 1);
  442. }
  443. static struct sch_ep *scheduler_remove_ep(struct xhci_hcd *xhci,
  444. int rh_port, int speed, int is_tt, struct usb_host_endpoint *ep)
  445. {
  446. struct sch_ep **ep_array;
  447. struct sch_ep *cur_ep;
  448. struct sch_port *sport;
  449. int is_in;
  450. int i;
  451. sport = xhci_to_sch_port(xhci, rh_port);
  452. if (sport == NULL) {
  453. xhci_dbg(xhci, "can't get sch_port for roothub-port%d\n", rh_port);
  454. return NULL;
  455. }
  456. is_in = usb_endpoint_dir_in(&ep->desc);
  457. if (is_in && speed == USB_SPEED_SUPER)
  458. ep_array = sport->ss_in_eps;
  459. else if (speed == USB_SPEED_SUPER)
  460. ep_array = sport->ss_out_eps;
  461. else if (speed == USB_SPEED_HIGH ||
  462. (is_tt && usb_endpoint_xfer_isoc(&ep->desc)))
  463. ep_array = sport->hs_eps;
  464. else
  465. ep_array = sport->tt_intr_eps;
  466. for (i = 0; i < MAX_EP_NUM; i++) {
  467. cur_ep = ep_array[i];
  468. if (cur_ep != NULL && cur_ep->ep == ep) {
  469. ep_array[i] = NULL;
  470. xhci_err(xhci, "rm_ep -- ep:0x%p\n", ep);
  471. return cur_ep;
  472. }
  473. }
  474. return NULL;
  475. }
  476. static int scheduler_add_ep(struct xhci_hcd *xhci, struct sch_ep *sep)
  477. {
  478. struct sch_port *sport;
  479. struct xhci_ep_ctx *ep_ctx;
  480. int speed, is_tt, ep_type;
  481. int ret = SCH_SUCCESS;
  482. speed = sep->speed;
  483. is_tt = sep->is_tt;
  484. ep_type = sep->ep_type;
  485. ep_ctx = sep->ep_ctx;
  486. sport = xhci_to_sch_port(xhci, sep->rh_port);
  487. if (sport == NULL) {
  488. xhci_dbg(xhci, "can't get sch_port for roothub-port%d\n", sep->rh_port);
  489. return SCH_FAIL;
  490. }
  491. xhci_err(xhci, "add_ep -- rh_port:%d, speed:%d, in:%d tt:%d ep_type:%d\n",
  492. sep->rh_port, speed, sep->is_in, is_tt, ep_type);
  493. xhci_err(xhci, "\t maxp:%d, interval:%d, burst:%d, mult:%d, ep:0x%p\n",
  494. sep->maxp, sep->interval, sep->burst, sep->mult,
  495. sep->ep);
  496. /* only process special cases */
  497. if (is_tt && ep_type == USB_ENDPOINT_XFER_INT &&
  498. ((speed == USB_SPEED_LOW) || (speed == USB_SPEED_FULL)))
  499. ret = check_tt_intr_bw(sep, sport);
  500. else if (is_tt && ep_type == USB_ENDPOINT_XFER_ISOC)
  501. ret = check_tt_iso_bw(sep, sport);
  502. else if (speed == USB_SPEED_HIGH &&
  503. (ep_type == USB_ENDPOINT_XFER_INT ||
  504. ep_type == USB_ENDPOINT_XFER_ISOC))
  505. ret = check_hs_bw(sep, sport);
  506. else if (speed == USB_SPEED_SUPER &&
  507. (ep_type == USB_ENDPOINT_XFER_INT ||
  508. ep_type == USB_ENDPOINT_XFER_ISOC))
  509. ret = check_ss_bw(sep, sport);
  510. else
  511. sep->pkts = 1;
  512. if (ret == SCH_FAIL)
  513. return ret;
  514. /* all transfers are fixed as burst mode-1 */
  515. sep->burst_mode = 1;
  516. if (add_sch_ep(sep, sport) == SCH_FAIL) {
  517. xhci_err(xhci, "%s: no space to save sch_ep\n", __func__);
  518. return SCH_FAIL;
  519. }
  520. ep_ctx->reserved[0] |= (EP_BPKTS(sep->pkts) |
  521. EP_BCSCOUNT(sep->cs_count) |
  522. EP_BBM(sep->burst_mode));
  523. ep_ctx->reserved[1] |= (EP_BOFFSET(sep->offset) |
  524. EP_BREPEAT(sep->repeat));
  525. xhci_dbg(xhci, "\tBPKTS:%x, BCSCOUNT:%x, BBM:%x, BOFFSET:%x, BREPEAT:%x\n",
  526. sep->pkts, sep->cs_count, sep->burst_mode, sep->offset,
  527. sep->repeat);
  528. return SCH_SUCCESS;
  529. }
  530. int xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
  531. struct usb_host_endpoint *ep)
  532. {
  533. struct xhci_hcd *xhci;
  534. struct xhci_slot_ctx *slot_ctx;
  535. struct xhci_virt_device *vdev;
  536. struct sch_ep *sch_ep = NULL;
  537. int is_tt;
  538. int rh_port;
  539. xhci = hcd_to_xhci(hcd);
  540. vdev = xhci->devs[udev->slot_id];
  541. slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
  542. is_tt = !!(slot_ctx->tt_info & TT_SLOT);
  543. rh_port = DEVINFO_TO_ROOT_HUB_PORT(slot_ctx->dev_info2);
  544. sch_ep = scheduler_remove_ep(xhci, rh_port, udev->speed, is_tt, ep);
  545. if (sch_ep != NULL) {
  546. kfree(sch_ep);
  547. xhci_dbg(xhci, "remove ep:0x%p\n", ep);
  548. } else
  549. xhci_warn(xhci, "don't find sch_ep when drop ep(0x%p)\n", ep);
  550. return 0;
  551. }
  552. int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
  553. struct usb_host_endpoint *ep)
  554. {
  555. struct xhci_hcd *xhci;
  556. struct xhci_virt_device *virt_dev;
  557. struct xhci_container_ctx *in_ctx;
  558. struct xhci_slot_ctx *slot_ctx;
  559. struct xhci_ep_ctx *in_ep_ctx;
  560. struct sch_ep *sch_ep;
  561. unsigned int ep_index;
  562. xhci = hcd_to_xhci(hcd);
  563. /* sch_ep struct should init as zero */
  564. sch_ep = kzalloc(sizeof(struct sch_ep), GFP_KERNEL);
  565. if (sch_ep == NULL)
  566. return -ENOMEM;
  567. virt_dev = xhci->devs[udev->slot_id];
  568. in_ctx = virt_dev->in_ctx;
  569. ep_index = xhci_get_endpoint_index(&ep->desc);
  570. in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
  571. slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
  572. sch_ep->is_tt = !!(slot_ctx->tt_info & TT_SLOT);
  573. if (usb_endpoint_xfer_int(&ep->desc))
  574. sch_ep->ep_type = USB_ENDPOINT_XFER_INT;
  575. else if (usb_endpoint_xfer_isoc(&ep->desc))
  576. sch_ep->ep_type = USB_ENDPOINT_XFER_ISOC;
  577. else if (usb_endpoint_xfer_bulk(&ep->desc))
  578. sch_ep->ep_type = USB_ENDPOINT_XFER_BULK;
  579. if (udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH
  580. || udev->speed == USB_SPEED_LOW) {
  581. sch_ep->burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
  582. sch_ep->mult = 0;
  583. } else if (udev->speed == USB_SPEED_SUPER) {
  584. sch_ep->burst = ep->ss_ep_comp.bMaxBurst;
  585. sch_ep->mult = ep->ss_ep_comp.bmAttributes & 0x3;
  586. }
  587. sch_ep->maxp = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
  588. sch_ep->speed = udev->speed;
  589. sch_ep->interval = EP_INTERVAL_TO_UFRAMES(in_ep_ctx->ep_info);
  590. sch_ep->is_in = usb_endpoint_dir_in(&ep->desc);
  591. sch_ep->ep = ep;
  592. sch_ep->rh_port = DEVINFO_TO_ROOT_HUB_PORT(slot_ctx->dev_info2);
  593. sch_ep->ep_ctx = in_ep_ctx;
  594. if (scheduler_add_ep(xhci, sch_ep) != SCH_SUCCESS) {
  595. kfree(sch_ep);
  596. xhci_err(xhci, "there is not enough bandwidth for mtk xhci\n");
  597. return -ENOSPC;
  598. }
  599. return 0;
  600. }
  601. /* @dev : struct device pointer of xhci platform_device */
  602. int xhci_mtk_init_quirk(struct xhci_hcd *xhci)
  603. {
  604. xhci->sch_ports = kzalloc(sizeof(struct sch_port) * MAX_PORT_NUM, GFP_KERNEL);
  605. if (xhci->sch_ports == NULL)
  606. return -ENOMEM;
  607. xhci->quirks |= XHCI_MTK_HOST;
  608. return 0;
  609. }
  610. void xhci_mtk_exit_quirk(struct xhci_hcd *xhci)
  611. {
  612. kfree(xhci->sch_ports);
  613. }
  614. /*
  615. * The TD size is the number of bytes remaining in the TD (including this TRB),
  616. * right shifted by 10.
  617. * It must fit in bits 21:17, so it can't be bigger than 31.
  618. */
  619. u32 xhci_mtk_td_remainder_quirk(unsigned int td_running_total,
  620. unsigned trb_buffer_length, struct urb *urb)
  621. {
  622. u32 max = 31;
  623. int remainder, td_packet_count, packet_transferred;
  624. unsigned int td_transfer_size = urb->transfer_buffer_length;
  625. unsigned int maxp;
  626. /* no scatter-gather for control transfer */
  627. if (usb_endpoint_xfer_control(&urb->ep->desc))
  628. return 0;
  629. maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
  630. /* 0 for the last TRB */
  631. if (td_running_total + trb_buffer_length == td_transfer_size)
  632. return 0;
  633. packet_transferred = td_running_total / maxp;
  634. td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp);
  635. remainder = td_packet_count - packet_transferred;
  636. if (remainder > max)
  637. return max << 17;
  638. else
  639. return remainder << 17;
  640. }