cookie.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /* netfs cookie management
  2. *
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * See Documentation/filesystems/caching/netfs-api.txt for more information on
  12. * the netfs API.
  13. */
  14. #define FSCACHE_DEBUG_LEVEL COOKIE
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. struct kmem_cache *fscache_cookie_jar;
  19. static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
  20. static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
  21. static int fscache_alloc_object(struct fscache_cache *cache,
  22. struct fscache_cookie *cookie);
  23. static int fscache_attach_object(struct fscache_cookie *cookie,
  24. struct fscache_object *object);
  25. /*
  26. * initialise an cookie jar slab element prior to any use
  27. */
  28. void fscache_cookie_init_once(void *_cookie)
  29. {
  30. struct fscache_cookie *cookie = _cookie;
  31. memset(cookie, 0, sizeof(*cookie));
  32. spin_lock_init(&cookie->lock);
  33. spin_lock_init(&cookie->stores_lock);
  34. INIT_HLIST_HEAD(&cookie->backing_objects);
  35. }
  36. /*
  37. * request a cookie to represent an object (index, datafile, xattr, etc)
  38. * - parent specifies the parent object
  39. * - the top level index cookie for each netfs is stored in the fscache_netfs
  40. * struct upon registration
  41. * - def points to the definition
  42. * - the netfs_data will be passed to the functions pointed to in *def
  43. * - all attached caches will be searched to see if they contain this object
  44. * - index objects aren't stored on disk until there's a dependent file that
  45. * needs storing
  46. * - other objects are stored in a selected cache immediately, and all the
  47. * indices forming the path to it are instantiated if necessary
  48. * - we never let on to the netfs about errors
  49. * - we may set a negative cookie pointer, but that's okay
  50. */
  51. struct fscache_cookie *__fscache_acquire_cookie(
  52. struct fscache_cookie *parent,
  53. const struct fscache_cookie_def *def,
  54. void *netfs_data,
  55. bool enable)
  56. {
  57. struct fscache_cookie *cookie;
  58. BUG_ON(!def);
  59. _enter("{%s},{%s},%p,%u",
  60. parent ? (char *) parent->def->name : "<no-parent>",
  61. def->name, netfs_data, enable);
  62. fscache_stat(&fscache_n_acquires);
  63. /* if there's no parent cookie, then we don't create one here either */
  64. if (!parent) {
  65. fscache_stat(&fscache_n_acquires_null);
  66. _leave(" [no parent]");
  67. return NULL;
  68. }
  69. /* validate the definition */
  70. BUG_ON(!def->get_key);
  71. BUG_ON(!def->name[0]);
  72. BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
  73. parent->def->type != FSCACHE_COOKIE_TYPE_INDEX);
  74. /* allocate and initialise a cookie */
  75. cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
  76. if (!cookie) {
  77. fscache_stat(&fscache_n_acquires_oom);
  78. _leave(" [ENOMEM]");
  79. return NULL;
  80. }
  81. atomic_set(&cookie->usage, 1);
  82. atomic_set(&cookie->n_children, 0);
  83. /* We keep the active count elevated until relinquishment to prevent an
  84. * attempt to wake up every time the object operations queue quiesces.
  85. */
  86. atomic_set(&cookie->n_active, 1);
  87. atomic_inc(&parent->usage);
  88. atomic_inc(&parent->n_children);
  89. cookie->def = def;
  90. cookie->parent = parent;
  91. cookie->netfs_data = netfs_data;
  92. cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET);
  93. /* radix tree insertion won't use the preallocation pool unless it's
  94. * told it may not wait */
  95. INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
  96. switch (cookie->def->type) {
  97. case FSCACHE_COOKIE_TYPE_INDEX:
  98. fscache_stat(&fscache_n_cookie_index);
  99. break;
  100. case FSCACHE_COOKIE_TYPE_DATAFILE:
  101. fscache_stat(&fscache_n_cookie_data);
  102. break;
  103. default:
  104. fscache_stat(&fscache_n_cookie_special);
  105. break;
  106. }
  107. if (enable) {
  108. /* if the object is an index then we need do nothing more here
  109. * - we create indices on disk when we need them as an index
  110. * may exist in multiple caches */
  111. if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
  112. if (fscache_acquire_non_index_cookie(cookie) == 0) {
  113. set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
  114. } else {
  115. atomic_dec(&parent->n_children);
  116. __fscache_cookie_put(cookie);
  117. fscache_stat(&fscache_n_acquires_nobufs);
  118. _leave(" = NULL");
  119. return NULL;
  120. }
  121. } else {
  122. set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
  123. }
  124. }
  125. fscache_stat(&fscache_n_acquires_ok);
  126. _leave(" = %p", cookie);
  127. return cookie;
  128. }
  129. EXPORT_SYMBOL(__fscache_acquire_cookie);
  130. /*
  131. * Enable a cookie to permit it to accept new operations.
  132. */
  133. void __fscache_enable_cookie(struct fscache_cookie *cookie,
  134. bool (*can_enable)(void *data),
  135. void *data)
  136. {
  137. _enter("%p", cookie);
  138. wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
  139. TASK_UNINTERRUPTIBLE);
  140. if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
  141. goto out_unlock;
  142. if (can_enable && !can_enable(data)) {
  143. /* The netfs decided it didn't want to enable after all */
  144. } else if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
  145. /* Wait for outstanding disablement to complete */
  146. __fscache_wait_on_invalidate(cookie);
  147. if (fscache_acquire_non_index_cookie(cookie) == 0)
  148. set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
  149. } else {
  150. set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
  151. }
  152. out_unlock:
  153. clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
  154. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
  155. }
  156. EXPORT_SYMBOL(__fscache_enable_cookie);
  157. /*
  158. * acquire a non-index cookie
  159. * - this must make sure the index chain is instantiated and instantiate the
  160. * object representation too
  161. */
  162. static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
  163. {
  164. struct fscache_object *object;
  165. struct fscache_cache *cache;
  166. uint64_t i_size;
  167. int ret;
  168. _enter("");
  169. set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  170. /* now we need to see whether the backing objects for this cookie yet
  171. * exist, if not there'll be nothing to search */
  172. down_read(&fscache_addremove_sem);
  173. if (list_empty(&fscache_cache_list)) {
  174. up_read(&fscache_addremove_sem);
  175. _leave(" = 0 [no caches]");
  176. return 0;
  177. }
  178. /* select a cache in which to store the object */
  179. cache = fscache_select_cache_for_object(cookie->parent);
  180. if (!cache) {
  181. up_read(&fscache_addremove_sem);
  182. fscache_stat(&fscache_n_acquires_no_cache);
  183. _leave(" = -ENOMEDIUM [no cache]");
  184. return -ENOMEDIUM;
  185. }
  186. _debug("cache %s", cache->tag->name);
  187. set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  188. /* ask the cache to allocate objects for this cookie and its parent
  189. * chain */
  190. ret = fscache_alloc_object(cache, cookie);
  191. if (ret < 0) {
  192. up_read(&fscache_addremove_sem);
  193. _leave(" = %d", ret);
  194. return ret;
  195. }
  196. /* pass on how big the object we're caching is supposed to be */
  197. cookie->def->get_attr(cookie->netfs_data, &i_size);
  198. spin_lock(&cookie->lock);
  199. if (hlist_empty(&cookie->backing_objects)) {
  200. spin_unlock(&cookie->lock);
  201. goto unavailable;
  202. }
  203. object = hlist_entry(cookie->backing_objects.first,
  204. struct fscache_object, cookie_link);
  205. fscache_set_store_limit(object, i_size);
  206. /* initiate the process of looking up all the objects in the chain
  207. * (done by fscache_initialise_object()) */
  208. fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
  209. spin_unlock(&cookie->lock);
  210. /* we may be required to wait for lookup to complete at this point */
  211. if (!fscache_defer_lookup) {
  212. _debug("non-deferred lookup %p", &cookie->flags);
  213. wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  214. TASK_UNINTERRUPTIBLE);
  215. _debug("complete");
  216. if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
  217. goto unavailable;
  218. }
  219. up_read(&fscache_addremove_sem);
  220. _leave(" = 0 [deferred]");
  221. return 0;
  222. unavailable:
  223. up_read(&fscache_addremove_sem);
  224. _leave(" = -ENOBUFS");
  225. return -ENOBUFS;
  226. }
  227. /*
  228. * recursively allocate cache object records for a cookie/cache combination
  229. * - caller must be holding the addremove sem
  230. */
  231. static int fscache_alloc_object(struct fscache_cache *cache,
  232. struct fscache_cookie *cookie)
  233. {
  234. struct fscache_object *object;
  235. int ret;
  236. _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
  237. spin_lock(&cookie->lock);
  238. hlist_for_each_entry(object, &cookie->backing_objects,
  239. cookie_link) {
  240. if (object->cache == cache)
  241. goto object_already_extant;
  242. }
  243. spin_unlock(&cookie->lock);
  244. /* ask the cache to allocate an object (we may end up with duplicate
  245. * objects at this stage, but we sort that out later) */
  246. fscache_stat(&fscache_n_cop_alloc_object);
  247. object = cache->ops->alloc_object(cache, cookie);
  248. fscache_stat_d(&fscache_n_cop_alloc_object);
  249. if (IS_ERR(object)) {
  250. fscache_stat(&fscache_n_object_no_alloc);
  251. ret = PTR_ERR(object);
  252. goto error;
  253. }
  254. fscache_stat(&fscache_n_object_alloc);
  255. object->debug_id = atomic_inc_return(&fscache_object_debug_id);
  256. _debug("ALLOC OBJ%x: %s {%lx}",
  257. object->debug_id, cookie->def->name, object->events);
  258. ret = fscache_alloc_object(cache, cookie->parent);
  259. if (ret < 0)
  260. goto error_put;
  261. /* only attach if we managed to allocate all we needed, otherwise
  262. * discard the object we just allocated and instead use the one
  263. * attached to the cookie */
  264. if (fscache_attach_object(cookie, object) < 0) {
  265. fscache_stat(&fscache_n_cop_put_object);
  266. cache->ops->put_object(object);
  267. fscache_stat_d(&fscache_n_cop_put_object);
  268. }
  269. _leave(" = 0");
  270. return 0;
  271. object_already_extant:
  272. ret = -ENOBUFS;
  273. if (fscache_object_is_dead(object)) {
  274. spin_unlock(&cookie->lock);
  275. goto error;
  276. }
  277. spin_unlock(&cookie->lock);
  278. _leave(" = 0 [found]");
  279. return 0;
  280. error_put:
  281. fscache_stat(&fscache_n_cop_put_object);
  282. cache->ops->put_object(object);
  283. fscache_stat_d(&fscache_n_cop_put_object);
  284. error:
  285. _leave(" = %d", ret);
  286. return ret;
  287. }
  288. /*
  289. * attach a cache object to a cookie
  290. */
  291. static int fscache_attach_object(struct fscache_cookie *cookie,
  292. struct fscache_object *object)
  293. {
  294. struct fscache_object *p;
  295. struct fscache_cache *cache = object->cache;
  296. int ret;
  297. _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
  298. spin_lock(&cookie->lock);
  299. /* there may be multiple initial creations of this object, but we only
  300. * want one */
  301. ret = -EEXIST;
  302. hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
  303. if (p->cache == object->cache) {
  304. if (fscache_object_is_dying(p))
  305. ret = -ENOBUFS;
  306. goto cant_attach_object;
  307. }
  308. }
  309. /* pin the parent object */
  310. spin_lock_nested(&cookie->parent->lock, 1);
  311. hlist_for_each_entry(p, &cookie->parent->backing_objects,
  312. cookie_link) {
  313. if (p->cache == object->cache) {
  314. if (fscache_object_is_dying(p)) {
  315. ret = -ENOBUFS;
  316. spin_unlock(&cookie->parent->lock);
  317. goto cant_attach_object;
  318. }
  319. object->parent = p;
  320. spin_lock(&p->lock);
  321. p->n_children++;
  322. spin_unlock(&p->lock);
  323. break;
  324. }
  325. }
  326. spin_unlock(&cookie->parent->lock);
  327. /* attach to the cache's object list */
  328. if (list_empty(&object->cache_link)) {
  329. spin_lock(&cache->object_list_lock);
  330. list_add(&object->cache_link, &cache->object_list);
  331. spin_unlock(&cache->object_list_lock);
  332. }
  333. /* attach to the cookie */
  334. object->cookie = cookie;
  335. atomic_inc(&cookie->usage);
  336. hlist_add_head(&object->cookie_link, &cookie->backing_objects);
  337. fscache_objlist_add(object);
  338. ret = 0;
  339. cant_attach_object:
  340. spin_unlock(&cookie->lock);
  341. _leave(" = %d", ret);
  342. return ret;
  343. }
  344. /*
  345. * Invalidate an object. Callable with spinlocks held.
  346. */
  347. void __fscache_invalidate(struct fscache_cookie *cookie)
  348. {
  349. struct fscache_object *object;
  350. _enter("{%s}", cookie->def->name);
  351. fscache_stat(&fscache_n_invalidates);
  352. /* Only permit invalidation of data files. Invalidating an index will
  353. * require the caller to release all its attachments to the tree rooted
  354. * there, and if it's doing that, it may as well just retire the
  355. * cookie.
  356. */
  357. ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
  358. /* We will be updating the cookie too. */
  359. BUG_ON(!cookie->def->get_aux);
  360. /* If there's an object, we tell the object state machine to handle the
  361. * invalidation on our behalf, otherwise there's nothing to do.
  362. */
  363. if (!hlist_empty(&cookie->backing_objects)) {
  364. spin_lock(&cookie->lock);
  365. if (fscache_cookie_enabled(cookie) &&
  366. !hlist_empty(&cookie->backing_objects) &&
  367. !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
  368. &cookie->flags)) {
  369. object = hlist_entry(cookie->backing_objects.first,
  370. struct fscache_object,
  371. cookie_link);
  372. if (fscache_object_is_live(object))
  373. fscache_raise_event(
  374. object, FSCACHE_OBJECT_EV_INVALIDATE);
  375. }
  376. spin_unlock(&cookie->lock);
  377. }
  378. _leave("");
  379. }
  380. EXPORT_SYMBOL(__fscache_invalidate);
  381. /*
  382. * Wait for object invalidation to complete.
  383. */
  384. void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
  385. {
  386. _enter("%p", cookie);
  387. wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
  388. TASK_UNINTERRUPTIBLE);
  389. _leave("");
  390. }
  391. EXPORT_SYMBOL(__fscache_wait_on_invalidate);
  392. /*
  393. * update the index entries backing a cookie
  394. */
  395. void __fscache_update_cookie(struct fscache_cookie *cookie)
  396. {
  397. struct fscache_object *object;
  398. fscache_stat(&fscache_n_updates);
  399. if (!cookie) {
  400. fscache_stat(&fscache_n_updates_null);
  401. _leave(" [no cookie]");
  402. return;
  403. }
  404. _enter("{%s}", cookie->def->name);
  405. BUG_ON(!cookie->def->get_aux);
  406. spin_lock(&cookie->lock);
  407. if (fscache_cookie_enabled(cookie)) {
  408. /* update the index entry on disk in each cache backing this
  409. * cookie.
  410. */
  411. hlist_for_each_entry(object,
  412. &cookie->backing_objects, cookie_link) {
  413. fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
  414. }
  415. }
  416. spin_unlock(&cookie->lock);
  417. _leave("");
  418. }
  419. EXPORT_SYMBOL(__fscache_update_cookie);
  420. /*
  421. * Disable a cookie to stop it from accepting new requests from the netfs.
  422. */
  423. void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
  424. {
  425. struct fscache_object *object;
  426. bool awaken = false;
  427. _enter("%p,%u", cookie, invalidate);
  428. ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
  429. if (atomic_read(&cookie->n_children) != 0) {
  430. pr_err("Cookie '%s' still has children\n",
  431. cookie->def->name);
  432. BUG();
  433. }
  434. wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
  435. TASK_UNINTERRUPTIBLE);
  436. if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
  437. goto out_unlock_enable;
  438. /* If the cookie is being invalidated, wait for that to complete first
  439. * so that we can reuse the flag.
  440. */
  441. __fscache_wait_on_invalidate(cookie);
  442. /* Dispose of the backing objects */
  443. set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
  444. spin_lock(&cookie->lock);
  445. if (!hlist_empty(&cookie->backing_objects)) {
  446. hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
  447. if (invalidate)
  448. set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
  449. fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
  450. }
  451. } else {
  452. if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  453. awaken = true;
  454. }
  455. spin_unlock(&cookie->lock);
  456. if (awaken)
  457. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  458. /* Wait for cessation of activity requiring access to the netfs (when
  459. * n_active reaches 0). This makes sure outstanding reads and writes
  460. * have completed.
  461. */
  462. if (!atomic_dec_and_test(&cookie->n_active))
  463. wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
  464. TASK_UNINTERRUPTIBLE);
  465. /* Reset the cookie state if it wasn't relinquished */
  466. if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
  467. atomic_inc(&cookie->n_active);
  468. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  469. }
  470. out_unlock_enable:
  471. clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
  472. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
  473. _leave("");
  474. }
  475. EXPORT_SYMBOL(__fscache_disable_cookie);
  476. /*
  477. * release a cookie back to the cache
  478. * - the object will be marked as recyclable on disk if retire is true
  479. * - all dependents of this cookie must have already been unregistered
  480. * (indices/files/pages)
  481. */
  482. void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
  483. {
  484. fscache_stat(&fscache_n_relinquishes);
  485. if (retire)
  486. fscache_stat(&fscache_n_relinquishes_retire);
  487. if (!cookie) {
  488. fscache_stat(&fscache_n_relinquishes_null);
  489. _leave(" [no cookie]");
  490. return;
  491. }
  492. _enter("%p{%s,%p,%d},%d",
  493. cookie, cookie->def->name, cookie->netfs_data,
  494. atomic_read(&cookie->n_active), retire);
  495. /* No further netfs-accessing operations on this cookie permitted */
  496. set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
  497. __fscache_disable_cookie(cookie, retire);
  498. /* Clear pointers back to the netfs */
  499. cookie->netfs_data = NULL;
  500. cookie->def = NULL;
  501. BUG_ON(cookie->stores.rnode);
  502. if (cookie->parent) {
  503. ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
  504. ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
  505. atomic_dec(&cookie->parent->n_children);
  506. }
  507. /* Dispose of the netfs's link to the cookie */
  508. ASSERTCMP(atomic_read(&cookie->usage), >, 0);
  509. fscache_cookie_put(cookie);
  510. _leave("");
  511. }
  512. EXPORT_SYMBOL(__fscache_relinquish_cookie);
  513. /*
  514. * destroy a cookie
  515. */
  516. void __fscache_cookie_put(struct fscache_cookie *cookie)
  517. {
  518. struct fscache_cookie *parent;
  519. _enter("%p", cookie);
  520. for (;;) {
  521. _debug("FREE COOKIE %p", cookie);
  522. parent = cookie->parent;
  523. BUG_ON(!hlist_empty(&cookie->backing_objects));
  524. kmem_cache_free(fscache_cookie_jar, cookie);
  525. if (!parent)
  526. break;
  527. cookie = parent;
  528. BUG_ON(atomic_read(&cookie->usage) <= 0);
  529. if (!atomic_dec_and_test(&cookie->usage))
  530. break;
  531. }
  532. _leave("");
  533. }
  534. /*
  535. * check the consistency between the netfs inode and the backing cache
  536. *
  537. * NOTE: it only serves no-index type
  538. */
  539. int __fscache_check_consistency(struct fscache_cookie *cookie)
  540. {
  541. struct fscache_operation *op;
  542. struct fscache_object *object;
  543. bool wake_cookie = false;
  544. int ret;
  545. _enter("%p,", cookie);
  546. ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
  547. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  548. return -ERESTARTSYS;
  549. if (hlist_empty(&cookie->backing_objects))
  550. return 0;
  551. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  552. if (!op)
  553. return -ENOMEM;
  554. fscache_operation_init(op, NULL, NULL);
  555. op->flags = FSCACHE_OP_MYTHREAD |
  556. (1 << FSCACHE_OP_WAITING) |
  557. (1 << FSCACHE_OP_UNUSE_COOKIE);
  558. spin_lock(&cookie->lock);
  559. if (!fscache_cookie_enabled(cookie) ||
  560. hlist_empty(&cookie->backing_objects))
  561. goto inconsistent;
  562. object = hlist_entry(cookie->backing_objects.first,
  563. struct fscache_object, cookie_link);
  564. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  565. goto inconsistent;
  566. op->debug_id = atomic_inc_return(&fscache_op_debug_id);
  567. __fscache_use_cookie(cookie);
  568. if (fscache_submit_op(object, op) < 0)
  569. goto submit_failed;
  570. /* the work queue now carries its own ref on the object */
  571. spin_unlock(&cookie->lock);
  572. ret = fscache_wait_for_operation_activation(object, op,
  573. NULL, NULL, NULL);
  574. if (ret == 0) {
  575. /* ask the cache to honour the operation */
  576. ret = object->cache->ops->check_consistency(op);
  577. fscache_op_complete(op, false);
  578. } else if (ret == -ENOBUFS) {
  579. ret = 0;
  580. }
  581. fscache_put_operation(op);
  582. _leave(" = %d", ret);
  583. return ret;
  584. submit_failed:
  585. wake_cookie = __fscache_unuse_cookie(cookie);
  586. inconsistent:
  587. spin_unlock(&cookie->lock);
  588. if (wake_cookie)
  589. __fscache_wake_unused_cookie(cookie);
  590. kfree(op);
  591. _leave(" = -ESTALE");
  592. return -ESTALE;
  593. }
  594. EXPORT_SYMBOL(__fscache_check_consistency);