Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_DAX_H
3 : #define _LINUX_DAX_H
4 :
5 : #include <linux/fs.h>
6 : #include <linux/mm.h>
7 : #include <linux/radix-tree.h>
8 :
9 : /* Flag for synchronous flush */
10 : #define DAXDEV_F_SYNC (1UL << 0)
11 :
12 : typedef unsigned long dax_entry_t;
13 :
14 : struct iomap_ops;
15 : struct iomap;
16 : struct dax_device;
17 : struct dax_operations {
18 : /*
19 : * direct_access: translate a device-relative
20 : * logical-page-offset into an absolute physical pfn. Return the
21 : * number of pages available for DAX at that pfn.
22 : */
23 : long (*direct_access)(struct dax_device *, pgoff_t, long,
24 : void **, pfn_t *);
25 : /*
26 : * Validate whether this device is usable as an fsdax backing
27 : * device.
28 : */
29 : bool (*dax_supported)(struct dax_device *, struct block_device *, int,
30 : sector_t, sector_t);
31 : /* copy_from_iter: required operation for fs-dax direct-i/o */
32 : size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
33 : struct iov_iter *);
34 : /* copy_to_iter: required operation for fs-dax direct-i/o */
35 : size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
36 : struct iov_iter *);
37 : /* zero_page_range: required operation. Zero page range */
38 : int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
39 : };
40 :
41 : extern struct attribute_group dax_attribute_group;
42 :
43 : #if IS_ENABLED(CONFIG_DAX)
44 : struct dax_device *dax_get_by_host(const char *host);
45 : struct dax_device *alloc_dax(void *private, const char *host,
46 : const struct dax_operations *ops, unsigned long flags);
47 : void put_dax(struct dax_device *dax_dev);
48 : void kill_dax(struct dax_device *dax_dev);
49 : void dax_write_cache(struct dax_device *dax_dev, bool wc);
50 : bool dax_write_cache_enabled(struct dax_device *dax_dev);
51 : bool __dax_synchronous(struct dax_device *dax_dev);
52 : static inline bool dax_synchronous(struct dax_device *dax_dev)
53 : {
54 : return __dax_synchronous(dax_dev);
55 : }
56 : void __set_dax_synchronous(struct dax_device *dax_dev);
57 : static inline void set_dax_synchronous(struct dax_device *dax_dev)
58 : {
59 : __set_dax_synchronous(dax_dev);
60 : }
61 : bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
62 : int blocksize, sector_t start, sector_t len);
63 : /*
64 : * Check if given mapping is supported by the file / underlying device.
65 : */
66 : static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
67 : struct dax_device *dax_dev)
68 : {
69 : if (!(vma->vm_flags & VM_SYNC))
70 : return true;
71 : if (!IS_DAX(file_inode(vma->vm_file)))
72 : return false;
73 : return dax_synchronous(dax_dev);
74 : }
75 : #else
76 0 : static inline struct dax_device *dax_get_by_host(const char *host)
77 : {
78 0 : return NULL;
79 : }
80 : static inline struct dax_device *alloc_dax(void *private, const char *host,
81 : const struct dax_operations *ops, unsigned long flags)
82 : {
83 : /*
84 : * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
85 : * NULL is an error or expected.
86 : */
87 : return NULL;
88 : }
89 0 : static inline void put_dax(struct dax_device *dax_dev)
90 : {
91 0 : }
92 0 : static inline void kill_dax(struct dax_device *dax_dev)
93 : {
94 0 : }
95 : static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
96 : {
97 : }
98 : static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
99 : {
100 : return false;
101 : }
102 : static inline bool dax_synchronous(struct dax_device *dax_dev)
103 : {
104 : return true;
105 : }
106 : static inline void set_dax_synchronous(struct dax_device *dax_dev)
107 : {
108 : }
109 0 : static inline bool dax_supported(struct dax_device *dax_dev,
110 : struct block_device *bdev, int blocksize, sector_t start,
111 : sector_t len)
112 : {
113 0 : return false;
114 : }
115 26782 : static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
116 : struct dax_device *dax_dev)
117 : {
118 26782 : return !(vma->vm_flags & VM_SYNC);
119 : }
120 : #endif
121 :
122 : struct writeback_control;
123 : int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
124 : #if IS_ENABLED(CONFIG_FS_DAX)
125 : bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
126 : static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
127 : {
128 : return __bdev_dax_supported(bdev, blocksize);
129 : }
130 :
131 : bool __generic_fsdax_supported(struct dax_device *dax_dev,
132 : struct block_device *bdev, int blocksize, sector_t start,
133 : sector_t sectors);
134 : static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
135 : struct block_device *bdev, int blocksize, sector_t start,
136 : sector_t sectors)
137 : {
138 : return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
139 : sectors);
140 : }
141 :
142 : static inline void fs_put_dax(struct dax_device *dax_dev)
143 : {
144 : put_dax(dax_dev);
145 : }
146 :
147 : struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
148 : int dax_writeback_mapping_range(struct address_space *mapping,
149 : struct dax_device *dax_dev, struct writeback_control *wbc);
150 :
151 : struct page *dax_layout_busy_page(struct address_space *mapping);
152 : struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
153 : dax_entry_t dax_lock_page(struct page *page);
154 : void dax_unlock_page(struct page *page, dax_entry_t cookie);
155 : #else
156 1 : static inline bool bdev_dax_supported(struct block_device *bdev,
157 : int blocksize)
158 : {
159 1 : return false;
160 : }
161 :
162 : static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
163 : struct block_device *bdev, int blocksize, sector_t start,
164 : sector_t sectors)
165 : {
166 : return false;
167 : }
168 :
169 1 : static inline void fs_put_dax(struct dax_device *dax_dev)
170 : {
171 1 : }
172 :
173 2 : static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
174 : {
175 2 : return NULL;
176 : }
177 :
178 5 : static inline struct page *dax_layout_busy_page(struct address_space *mapping)
179 : {
180 5 : return NULL;
181 : }
182 :
183 : static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
184 : {
185 : return NULL;
186 : }
187 :
188 : static inline int dax_writeback_mapping_range(struct address_space *mapping,
189 : struct dax_device *dax_dev, struct writeback_control *wbc)
190 : {
191 : return -EOPNOTSUPP;
192 : }
193 :
194 : static inline dax_entry_t dax_lock_page(struct page *page)
195 : {
196 : if (IS_DAX(page->mapping->host))
197 : return ~0UL;
198 : return 0;
199 : }
200 :
201 : static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
202 : {
203 : }
204 : #endif
205 :
206 : #if IS_ENABLED(CONFIG_DAX)
207 : int dax_read_lock(void);
208 : void dax_read_unlock(int id);
209 : #else
210 0 : static inline int dax_read_lock(void)
211 : {
212 0 : return 0;
213 : }
214 :
215 0 : static inline void dax_read_unlock(int id)
216 : {
217 0 : }
218 : #endif /* CONFIG_DAX */
219 : bool dax_alive(struct dax_device *dax_dev);
220 : void *dax_get_private(struct dax_device *dax_dev);
221 : long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
222 : void **kaddr, pfn_t *pfn);
223 : size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
224 : size_t bytes, struct iov_iter *i);
225 : size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
226 : size_t bytes, struct iov_iter *i);
227 : int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
228 : size_t nr_pages);
229 : void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
230 :
231 : ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
232 : const struct iomap_ops *ops);
233 : vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
234 : pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
235 : vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
236 : enum page_entry_size pe_size, pfn_t pfn);
237 : int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
238 : int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
239 : pgoff_t index);
240 : s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap);
241 0 : static inline bool dax_mapping(struct address_space *mapping)
242 : {
243 0 : return mapping->host && IS_DAX(mapping->host);
244 : }
245 :
246 : #ifdef CONFIG_DEV_DAX_HMEM_DEVICES
247 : void hmem_register_device(int target_nid, struct resource *r);
248 : #else
249 : static inline void hmem_register_device(int target_nid, struct resource *r)
250 : {
251 : }
252 : #endif
253 :
254 : #endif
|