alloc.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /*
  2. * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/errno.h>
  33. #include <linux/slab.h>
  34. #include <linux/mm.h>
  35. #include <linux/export.h>
  36. #include <linux/bitmap.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/mlx5/driver.h>
  40. #include "mlx5_core.h"
  41. /* Handling for queue buffers -- we allocate a bunch of memory and
  42. * register it in a memory region at HCA virtual address 0. If the
  43. * requested size is > max_direct, we split the allocation into
  44. * multiple pages, so we don't require too much contiguous memory.
  45. */
  46. int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
  47. struct mlx5_buf *buf)
  48. {
  49. dma_addr_t t;
  50. buf->size = size;
  51. if (size <= max_direct) {
  52. buf->nbufs = 1;
  53. buf->npages = 1;
  54. buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
  55. buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
  56. size, &t, GFP_KERNEL);
  57. if (!buf->direct.buf)
  58. return -ENOMEM;
  59. buf->direct.map = t;
  60. while (t & ((1 << buf->page_shift) - 1)) {
  61. --buf->page_shift;
  62. buf->npages *= 2;
  63. }
  64. } else {
  65. int i;
  66. buf->direct.buf = NULL;
  67. buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
  68. buf->npages = buf->nbufs;
  69. buf->page_shift = PAGE_SHIFT;
  70. buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
  71. GFP_KERNEL);
  72. if (!buf->page_list)
  73. return -ENOMEM;
  74. for (i = 0; i < buf->nbufs; i++) {
  75. buf->page_list[i].buf =
  76. dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
  77. &t, GFP_KERNEL);
  78. if (!buf->page_list[i].buf)
  79. goto err_free;
  80. buf->page_list[i].map = t;
  81. }
  82. if (BITS_PER_LONG == 64) {
  83. struct page **pages;
  84. pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
  85. if (!pages)
  86. goto err_free;
  87. for (i = 0; i < buf->nbufs; i++)
  88. pages[i] = virt_to_page(buf->page_list[i].buf);
  89. buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
  90. kfree(pages);
  91. if (!buf->direct.buf)
  92. goto err_free;
  93. }
  94. }
  95. return 0;
  96. err_free:
  97. mlx5_buf_free(dev, buf);
  98. return -ENOMEM;
  99. }
  100. EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
  101. void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
  102. {
  103. int i;
  104. if (buf->nbufs == 1)
  105. dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
  106. buf->direct.map);
  107. else {
  108. if (BITS_PER_LONG == 64 && buf->direct.buf)
  109. vunmap(buf->direct.buf);
  110. for (i = 0; i < buf->nbufs; i++)
  111. if (buf->page_list[i].buf)
  112. dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
  113. buf->page_list[i].buf,
  114. buf->page_list[i].map);
  115. kfree(buf->page_list);
  116. }
  117. }
  118. EXPORT_SYMBOL_GPL(mlx5_buf_free);
  119. static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
  120. {
  121. struct mlx5_db_pgdir *pgdir;
  122. pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
  123. if (!pgdir)
  124. return NULL;
  125. bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
  126. pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
  127. &pgdir->db_dma, GFP_KERNEL);
  128. if (!pgdir->db_page) {
  129. kfree(pgdir);
  130. return NULL;
  131. }
  132. return pgdir;
  133. }
  134. static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
  135. struct mlx5_db *db)
  136. {
  137. int offset;
  138. int i;
  139. i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
  140. if (i >= MLX5_DB_PER_PAGE)
  141. return -ENOMEM;
  142. __clear_bit(i, pgdir->bitmap);
  143. db->u.pgdir = pgdir;
  144. db->index = i;
  145. offset = db->index * L1_CACHE_BYTES;
  146. db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
  147. db->dma = pgdir->db_dma + offset;
  148. return 0;
  149. }
  150. int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
  151. {
  152. struct mlx5_db_pgdir *pgdir;
  153. int ret = 0;
  154. mutex_lock(&dev->priv.pgdir_mutex);
  155. list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
  156. if (!mlx5_alloc_db_from_pgdir(pgdir, db))
  157. goto out;
  158. pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
  159. if (!pgdir) {
  160. ret = -ENOMEM;
  161. goto out;
  162. }
  163. list_add(&pgdir->list, &dev->priv.pgdir_list);
  164. /* This should never fail -- we just allocated an empty page: */
  165. WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
  166. out:
  167. mutex_unlock(&dev->priv.pgdir_mutex);
  168. return ret;
  169. }
  170. EXPORT_SYMBOL_GPL(mlx5_db_alloc);
  171. void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
  172. {
  173. mutex_lock(&dev->priv.pgdir_mutex);
  174. __set_bit(db->index, db->u.pgdir->bitmap);
  175. if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
  176. dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
  177. db->u.pgdir->db_page, db->u.pgdir->db_dma);
  178. list_del(&db->u.pgdir->list);
  179. kfree(db->u.pgdir);
  180. }
  181. mutex_unlock(&dev->priv.pgdir_mutex);
  182. }
  183. EXPORT_SYMBOL_GPL(mlx5_db_free);
  184. void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
  185. {
  186. u64 addr;
  187. int i;
  188. for (i = 0; i < buf->npages; i++) {
  189. if (buf->nbufs == 1)
  190. addr = buf->direct.map + (i << buf->page_shift);
  191. else
  192. addr = buf->page_list[i].map;
  193. pas[i] = cpu_to_be64(addr);
  194. }
  195. }
  196. EXPORT_SYMBOL_GPL(mlx5_fill_page_array);