sync.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. /*
  2. * drivers/base/sync.c
  3. *
  4. * Copyright (C) 2012 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/debugfs.h>
  17. #include <linux/export.h>
  18. #include <linux/file.h>
  19. #include <linux/fs.h>
  20. #include <linux/kernel.h>
  21. #include <linux/poll.h>
  22. #include <linux/sched.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/slab.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/anon_inodes.h>
  27. #include "sync.h"
  28. #define CREATE_TRACE_POINTS
  29. #include "trace/sync.h"
  30. static const struct fence_ops android_fence_ops;
  31. static const struct file_operations sync_fence_fops;
  32. struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
  33. int size, const char *name)
  34. {
  35. struct sync_timeline *obj;
  36. if (size < sizeof(struct sync_timeline))
  37. return NULL;
  38. obj = kzalloc(size, GFP_KERNEL);
  39. if (obj == NULL)
  40. return NULL;
  41. kref_init(&obj->kref);
  42. obj->ops = ops;
  43. obj->context = fence_context_alloc(1);
  44. strlcpy(obj->name, name, sizeof(obj->name));
  45. INIT_LIST_HEAD(&obj->child_list_head);
  46. INIT_LIST_HEAD(&obj->active_list_head);
  47. spin_lock_init(&obj->child_list_lock);
  48. sync_timeline_debug_add(obj);
  49. return obj;
  50. }
  51. EXPORT_SYMBOL(sync_timeline_create);
  52. static void sync_timeline_free(struct kref *kref)
  53. {
  54. struct sync_timeline *obj =
  55. container_of(kref, struct sync_timeline, kref);
  56. sync_timeline_debug_remove(obj);
  57. if (obj->ops->release_obj)
  58. obj->ops->release_obj(obj);
  59. kfree(obj);
  60. }
  61. static void sync_timeline_get(struct sync_timeline *obj)
  62. {
  63. kref_get(&obj->kref);
  64. }
  65. static void sync_timeline_put(struct sync_timeline *obj)
  66. {
  67. kref_put(&obj->kref, sync_timeline_free);
  68. }
  69. void sync_timeline_destroy(struct sync_timeline *obj)
  70. {
  71. obj->destroyed = true;
  72. /*
  73. * Ensure timeline is marked as destroyed before
  74. * changing timeline's fences status.
  75. */
  76. smp_wmb();
  77. /*
  78. * signal any children that their parent is going away.
  79. */
  80. sync_timeline_signal(obj);
  81. sync_timeline_put(obj);
  82. }
  83. EXPORT_SYMBOL(sync_timeline_destroy);
  84. void sync_timeline_signal(struct sync_timeline *obj)
  85. {
  86. unsigned long flags;
  87. LIST_HEAD(signaled_pts);
  88. struct sync_pt *pt, *next;
  89. trace_sync_timeline(obj);
  90. spin_lock_irqsave(&obj->child_list_lock, flags);
  91. list_for_each_entry_safe(pt, next, &obj->active_list_head,
  92. active_list) {
  93. if (fence_is_signaled_locked(&pt->base))
  94. list_del_init(&pt->active_list);
  95. }
  96. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  97. }
  98. EXPORT_SYMBOL(sync_timeline_signal);
  99. struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
  100. {
  101. unsigned long flags;
  102. struct sync_pt *pt;
  103. if (size < sizeof(struct sync_pt))
  104. return NULL;
  105. pt = kzalloc(size, GFP_KERNEL);
  106. if (pt == NULL)
  107. return NULL;
  108. spin_lock_irqsave(&obj->child_list_lock, flags);
  109. sync_timeline_get(obj);
  110. fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
  111. obj->context, ++obj->value);
  112. list_add_tail(&pt->child_list, &obj->child_list_head);
  113. INIT_LIST_HEAD(&pt->active_list);
  114. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  115. return pt;
  116. }
  117. EXPORT_SYMBOL(sync_pt_create);
  118. void sync_pt_free(struct sync_pt *pt)
  119. {
  120. fence_put(&pt->base);
  121. }
  122. EXPORT_SYMBOL(sync_pt_free);
  123. static struct sync_fence *sync_fence_alloc(int size, const char *name)
  124. {
  125. struct sync_fence *fence;
  126. fence = kzalloc(size, GFP_KERNEL);
  127. if (fence == NULL)
  128. return NULL;
  129. fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
  130. fence, 0);
  131. if (IS_ERR(fence->file))
  132. goto err;
  133. kref_init(&fence->kref);
  134. strlcpy(fence->name, name, sizeof(fence->name));
  135. init_waitqueue_head(&fence->wq);
  136. return fence;
  137. err:
  138. kfree(fence);
  139. return NULL;
  140. }
  141. static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
  142. {
  143. struct sync_fence_cb *check;
  144. struct sync_fence *fence;
  145. check = container_of(cb, struct sync_fence_cb, cb);
  146. fence = check->fence;
  147. if (atomic_dec_and_test(&fence->status))
  148. wake_up_all(&fence->wq);
  149. }
  150. /* TODO: implement a create which takes more that one sync_pt */
  151. struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
  152. {
  153. struct sync_fence *fence;
  154. fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
  155. if (fence == NULL)
  156. return NULL;
  157. fence->num_fences = 1;
  158. atomic_set(&fence->status, 1);
  159. fence->cbs[0].sync_pt = &pt->base;
  160. fence->cbs[0].fence = fence;
  161. if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
  162. fence_check_cb_func))
  163. atomic_dec(&fence->status);
  164. sync_fence_debug_add(fence);
  165. return fence;
  166. }
  167. EXPORT_SYMBOL(sync_fence_create);
  168. struct sync_fence *sync_fence_fdget(int fd)
  169. {
  170. struct file *file = fget(fd);
  171. if (file == NULL)
  172. return NULL;
  173. if (file->f_op != &sync_fence_fops)
  174. goto err;
  175. return file->private_data;
  176. err:
  177. fput(file);
  178. return NULL;
  179. }
  180. EXPORT_SYMBOL(sync_fence_fdget);
  181. void sync_fence_put(struct sync_fence *fence)
  182. {
  183. fput(fence->file);
  184. }
  185. EXPORT_SYMBOL(sync_fence_put);
  186. void sync_fence_install(struct sync_fence *fence, int fd)
  187. {
  188. fd_install(fd, fence->file);
  189. }
  190. EXPORT_SYMBOL(sync_fence_install);
  191. static void sync_fence_add_pt(struct sync_fence *fence,
  192. int *i, struct fence *pt)
  193. {
  194. fence->cbs[*i].sync_pt = pt;
  195. fence->cbs[*i].fence = fence;
  196. if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
  197. fence_get(pt);
  198. (*i)++;
  199. }
  200. }
  201. struct sync_fence *sync_fence_merge(const char *name,
  202. struct sync_fence *a, struct sync_fence *b)
  203. {
  204. int num_fences = a->num_fences + b->num_fences;
  205. struct sync_fence *fence;
  206. int i, i_a, i_b;
  207. unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
  208. fence = sync_fence_alloc(size, name);
  209. if (fence == NULL)
  210. return NULL;
  211. atomic_set(&fence->status, num_fences);
  212. /*
  213. * Assume sync_fence a and b are both ordered and have no
  214. * duplicates with the same context.
  215. *
  216. * If a sync_fence can only be created with sync_fence_merge
  217. * and sync_fence_create, this is a reasonable assumption.
  218. */
  219. for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
  220. struct fence *pt_a = a->cbs[i_a].sync_pt;
  221. struct fence *pt_b = b->cbs[i_b].sync_pt;
  222. if (pt_a->context < pt_b->context) {
  223. sync_fence_add_pt(fence, &i, pt_a);
  224. i_a++;
  225. } else if (pt_a->context > pt_b->context) {
  226. sync_fence_add_pt(fence, &i, pt_b);
  227. i_b++;
  228. } else {
  229. if (pt_a->seqno - pt_b->seqno <= INT_MAX)
  230. sync_fence_add_pt(fence, &i, pt_a);
  231. else
  232. sync_fence_add_pt(fence, &i, pt_b);
  233. i_a++;
  234. i_b++;
  235. }
  236. }
  237. for (; i_a < a->num_fences; i_a++)
  238. sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
  239. for (; i_b < b->num_fences; i_b++)
  240. sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
  241. if (num_fences > i)
  242. atomic_sub(num_fences - i, &fence->status);
  243. fence->num_fences = i;
  244. sync_fence_debug_add(fence);
  245. return fence;
  246. }
  247. EXPORT_SYMBOL(sync_fence_merge);
  248. int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
  249. int wake_flags, void *key)
  250. {
  251. struct sync_fence_waiter *wait;
  252. wait = container_of(curr, struct sync_fence_waiter, work);
  253. list_del_init(&wait->work.task_list);
  254. wait->callback(wait->work.private, wait);
  255. return 1;
  256. }
  257. int sync_fence_wait_async(struct sync_fence *fence,
  258. struct sync_fence_waiter *waiter)
  259. {
  260. int err = atomic_read(&fence->status);
  261. unsigned long flags;
  262. if (err < 0)
  263. return err;
  264. if (!err)
  265. return 1;
  266. init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
  267. waiter->work.private = fence;
  268. spin_lock_irqsave(&fence->wq.lock, flags);
  269. err = atomic_read(&fence->status);
  270. if (err > 0)
  271. __add_wait_queue_tail(&fence->wq, &waiter->work);
  272. spin_unlock_irqrestore(&fence->wq.lock, flags);
  273. if (err < 0)
  274. return err;
  275. return !err;
  276. }
  277. EXPORT_SYMBOL(sync_fence_wait_async);
  278. int sync_fence_cancel_async(struct sync_fence *fence,
  279. struct sync_fence_waiter *waiter)
  280. {
  281. unsigned long flags;
  282. int ret = 0;
  283. spin_lock_irqsave(&fence->wq.lock, flags);
  284. if (!list_empty(&waiter->work.task_list))
  285. list_del_init(&waiter->work.task_list);
  286. else
  287. ret = -ENOENT;
  288. spin_unlock_irqrestore(&fence->wq.lock, flags);
  289. return ret;
  290. }
  291. EXPORT_SYMBOL(sync_fence_cancel_async);
  292. int sync_fence_wait(struct sync_fence *fence, long timeout)
  293. {
  294. long ret;
  295. int i;
  296. if (timeout < 0)
  297. timeout = MAX_SCHEDULE_TIMEOUT;
  298. else
  299. timeout = msecs_to_jiffies(timeout);
  300. trace_sync_wait(fence, 1);
  301. for (i = 0; i < fence->num_fences; ++i)
  302. trace_sync_pt(fence->cbs[i].sync_pt);
  303. ret = wait_event_interruptible_timeout(fence->wq,
  304. atomic_read(&fence->status) <= 0,
  305. timeout);
  306. trace_sync_wait(fence, 0);
  307. if (ret < 0) {
  308. return ret;
  309. } else if (ret == 0) {
  310. if (timeout) {
  311. pr_info("fence timeout on [%pK] after %dms\n", fence,
  312. jiffies_to_msecs(timeout));
  313. sync_dump();
  314. }
  315. return -ETIME;
  316. }
  317. ret = atomic_read(&fence->status);
  318. if (ret) {
  319. pr_info("fence error %ld on [%pK]\n", ret, fence);
  320. sync_dump();
  321. }
  322. return ret;
  323. }
  324. EXPORT_SYMBOL(sync_fence_wait);
  325. static const char *android_fence_get_driver_name(struct fence *fence)
  326. {
  327. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  328. struct sync_timeline *parent = sync_pt_parent(pt);
  329. return parent->ops->driver_name;
  330. }
  331. static const char *android_fence_get_timeline_name(struct fence *fence)
  332. {
  333. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  334. struct sync_timeline *parent = sync_pt_parent(pt);
  335. return parent->name;
  336. }
  337. static void android_fence_release(struct fence *fence)
  338. {
  339. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  340. struct sync_timeline *parent = sync_pt_parent(pt);
  341. unsigned long flags;
  342. spin_lock_irqsave(fence->lock, flags);
  343. list_del(&pt->child_list);
  344. if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
  345. list_del(&pt->active_list);
  346. spin_unlock_irqrestore(fence->lock, flags);
  347. if (parent->ops->free_pt)
  348. parent->ops->free_pt(pt);
  349. sync_timeline_put(parent);
  350. fence_free(&pt->base);
  351. }
  352. static bool android_fence_signaled(struct fence *fence)
  353. {
  354. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  355. struct sync_timeline *parent = sync_pt_parent(pt);
  356. int ret;
  357. ret = parent->ops->has_signaled(pt);
  358. if (ret < 0)
  359. fence->status = ret;
  360. return ret;
  361. }
  362. // MTK Add Google Patch 54a9f8e
  363. static bool android_fence_enable_signaling(struct fence *fence)
  364. {
  365. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  366. struct sync_timeline *parent = sync_pt_parent(pt);
  367. if (android_fence_signaled(fence))
  368. return false;
  369. list_add_tail(&pt->active_list, &parent->active_list_head);
  370. return true;
  371. }
  372. // End Patch
  373. static void android_fence_disable_signaling(struct fence *fence)
  374. {
  375. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  376. list_del_init(&pt->active_list);
  377. }
  378. static int android_fence_fill_driver_data(struct fence *fence,
  379. void *data, int size)
  380. {
  381. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  382. struct sync_timeline *parent = sync_pt_parent(pt);
  383. if (!parent->ops->fill_driver_data)
  384. return 0;
  385. return parent->ops->fill_driver_data(pt, data, size);
  386. }
  387. static void android_fence_value_str(struct fence *fence,
  388. char *str, int size)
  389. {
  390. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  391. struct sync_timeline *parent = sync_pt_parent(pt);
  392. if (!parent->ops->pt_value_str) {
  393. if (size)
  394. *str = 0;
  395. return;
  396. }
  397. parent->ops->pt_value_str(pt, str, size);
  398. }
  399. static void android_fence_timeline_value_str(struct fence *fence,
  400. char *str, int size)
  401. {
  402. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  403. struct sync_timeline *parent = sync_pt_parent(pt);
  404. if (!parent->ops->timeline_value_str) {
  405. if (size)
  406. *str = 0;
  407. return;
  408. }
  409. parent->ops->timeline_value_str(parent, str, size);
  410. }
  411. static const struct fence_ops android_fence_ops = {
  412. .get_driver_name = android_fence_get_driver_name,
  413. .get_timeline_name = android_fence_get_timeline_name,
  414. .enable_signaling = android_fence_enable_signaling,
  415. // MTK Add Google Patch 54a9f8e
  416. .disable_signaling = android_fence_disable_signaling,
  417. // End Patch
  418. .signaled = android_fence_signaled,
  419. .wait = fence_default_wait,
  420. .release = android_fence_release,
  421. .fill_driver_data = android_fence_fill_driver_data,
  422. .fence_value_str = android_fence_value_str,
  423. .timeline_value_str = android_fence_timeline_value_str,
  424. };
  425. static void sync_fence_free(struct kref *kref)
  426. {
  427. struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
  428. int i; // Google Patch 4f4f7cc
  429. for (i = 0; i < fence->num_fences; ++i) {
  430. fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb); // Google Patch 4f4f7cc
  431. fence_put(fence->cbs[i].sync_pt);
  432. }
  433. kfree(fence);
  434. }
  435. static int sync_fence_release(struct inode *inode, struct file *file)
  436. {
  437. struct sync_fence *fence = file->private_data;
  438. sync_fence_debug_remove(fence);
  439. kref_put(&fence->kref, sync_fence_free);
  440. return 0;
  441. }
  442. static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
  443. {
  444. struct sync_fence *fence = file->private_data;
  445. int status;
  446. poll_wait(file, &fence->wq, wait);
  447. status = atomic_read(&fence->status);
  448. if (!status)
  449. return POLLIN;
  450. else if (status < 0)
  451. return POLLERR;
  452. return 0;
  453. }
  454. static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
  455. {
  456. __s32 value;
  457. if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
  458. return -EFAULT;
  459. return sync_fence_wait(fence, value);
  460. }
  461. static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
  462. {
  463. int fd = get_unused_fd_flags(O_CLOEXEC);
  464. int err;
  465. struct sync_fence *fence2, *fence3;
  466. struct sync_merge_data data;
  467. if (fd < 0)
  468. return fd;
  469. if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
  470. err = -EFAULT;
  471. goto err_put_fd;
  472. }
  473. fence2 = sync_fence_fdget(data.fd2);
  474. if (fence2 == NULL) {
  475. err = -ENOENT;
  476. goto err_put_fd;
  477. }
  478. data.name[sizeof(data.name) - 1] = '\0';
  479. fence3 = sync_fence_merge(data.name, fence, fence2);
  480. if (fence3 == NULL) {
  481. err = -ENOMEM;
  482. goto err_put_fence2;
  483. }
  484. data.fence = fd;
  485. if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
  486. err = -EFAULT;
  487. goto err_put_fence3;
  488. }
  489. sync_fence_install(fence3, fd);
  490. sync_fence_put(fence2);
  491. return 0;
  492. err_put_fence3:
  493. sync_fence_put(fence3);
  494. err_put_fence2:
  495. sync_fence_put(fence2);
  496. err_put_fd:
  497. put_unused_fd(fd);
  498. return err;
  499. }
  500. static int sync_fill_pt_info(struct fence *fence, void *data, int size)
  501. {
  502. struct sync_pt_info *info = data;
  503. int ret;
  504. if (size < sizeof(struct sync_pt_info))
  505. return -ENOMEM;
  506. info->len = sizeof(struct sync_pt_info);
  507. if (fence->ops->fill_driver_data) {
  508. ret = fence->ops->fill_driver_data(fence, info->driver_data,
  509. size - sizeof(*info));
  510. if (ret < 0)
  511. return ret;
  512. info->len += ret;
  513. }
  514. strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
  515. sizeof(info->obj_name));
  516. strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
  517. sizeof(info->driver_name));
  518. if (fence_is_signaled(fence))
  519. info->status = fence->status >= 0 ? 1 : fence->status;
  520. else
  521. info->status = 0;
  522. info->timestamp_ns = ktime_to_ns(fence->timestamp);
  523. return info->len;
  524. }
  525. static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
  526. unsigned long arg)
  527. {
  528. struct sync_fence_info_data *data;
  529. __u32 size;
  530. __u32 len = 0;
  531. int ret, i;
  532. if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
  533. return -EFAULT;
  534. if (size < sizeof(struct sync_fence_info_data))
  535. return -EINVAL;
  536. if (size > 4096)
  537. size = 4096;
  538. data = kzalloc(size, GFP_KERNEL);
  539. if (data == NULL)
  540. return -ENOMEM;
  541. strlcpy(data->name, fence->name, sizeof(data->name));
  542. data->status = atomic_read(&fence->status);
  543. if (data->status >= 0)
  544. data->status = !data->status;
  545. len = sizeof(struct sync_fence_info_data);
  546. for (i = 0; i < fence->num_fences; ++i) {
  547. struct fence *pt = fence->cbs[i].sync_pt;
  548. ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
  549. if (ret < 0)
  550. goto out;
  551. len += ret;
  552. }
  553. data->len = len;
  554. if (copy_to_user((void __user *)arg, data, len))
  555. ret = -EFAULT;
  556. else
  557. ret = 0;
  558. out:
  559. kfree(data);
  560. return ret;
  561. }
  562. static long sync_fence_ioctl(struct file *file, unsigned int cmd,
  563. unsigned long arg)
  564. {
  565. struct sync_fence *fence = file->private_data;
  566. switch (cmd) {
  567. case SYNC_IOC_WAIT:
  568. return sync_fence_ioctl_wait(fence, arg);
  569. case SYNC_IOC_MERGE:
  570. return sync_fence_ioctl_merge(fence, arg);
  571. case SYNC_IOC_FENCE_INFO:
  572. return sync_fence_ioctl_fence_info(fence, arg);
  573. default:
  574. return -ENOTTY;
  575. }
  576. }
  577. static const struct file_operations sync_fence_fops = {
  578. .release = sync_fence_release,
  579. .poll = sync_fence_poll,
  580. .unlocked_ioctl = sync_fence_ioctl,
  581. .compat_ioctl = sync_fence_ioctl,
  582. };