libfuse
fuse_lowlevel.c
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4
5 Implementation of (most of) the low-level FUSE API. The session loop
6 functions are implemented in separate files.
7
8 This program can be distributed under the terms of the GNU LGPLv2.
9 See the file LGPL2.txt
10*/
11
12#define _GNU_SOURCE
13
14#include "fuse_config.h"
15#include "fuse_i.h"
16#include "fuse_kernel.h"
17#include "fuse_opt.h"
18#include "fuse_misc.h"
19#include "mount_util.h"
20#include "util.h"
21#include "fuse_uring_i.h"
22
23#include <pthread.h>
24#include <stdatomic.h>
25#include <stdint.h>
26#include <inttypes.h>
27#include <stdbool.h>
28#include <stdio.h>
29#include <stdlib.h>
30#include <stddef.h>
31#include <stdalign.h>
32#include <string.h>
33#include <unistd.h>
34#include <limits.h>
35#include <errno.h>
36#include <assert.h>
37#include <sys/file.h>
38#include <sys/ioctl.h>
39#include <stdalign.h>
40
41#ifdef USDT_ENABLED
42#include "usdt.h"
43#endif
44
45#ifndef F_LINUX_SPECIFIC_BASE
46#define F_LINUX_SPECIFIC_BASE 1024
47#endif
48#ifndef F_SETPIPE_SZ
49#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
50#endif
51
52#define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
53#define OFFSET_MAX 0x7fffffffffffffffLL
54
55struct fuse_pollhandle {
56 uint64_t kh;
57 struct fuse_session *se;
58};
59
60static size_t pagesize;
61
62static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
63{
64 pagesize = getpagesize();
65}
66
67#ifdef USDT_ENABLED
68/* tracepoints */
69static void trace_request_receive(int err)
70{
71 USDT(libfuse, request_receive, err);
72}
73
74static void trace_request_process(unsigned int opcode, unsigned int unique)
75{
76 USDT(libfuse, request_process, opcode, unique);
77}
78
79static void trace_request_reply(uint64_t unique, unsigned int len,
80 int error, int reply_err)
81{
82 USDT(libfuse, request_reply, unique, len, error, reply_err);
83}
84#else
85static void trace_request_receive(int err)
86{
87 (void)err;
88}
89
90static void trace_request_process(unsigned int opcode, unsigned int unique)
91{
92 (void)opcode;
93 (void)unique;
94}
95
96static void trace_request_reply(uint64_t unique, unsigned int len,
97 int error, int reply_err)
98{
99 (void)unique;
100 (void)len;
101 (void)error;
102 (void)reply_err;
103}
104#endif
105
106static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
107{
108 attr->ino = stbuf->st_ino;
109 attr->mode = stbuf->st_mode;
110 attr->nlink = stbuf->st_nlink;
111 attr->uid = stbuf->st_uid;
112 attr->gid = stbuf->st_gid;
113 attr->rdev = stbuf->st_rdev;
114 attr->size = stbuf->st_size;
115 attr->blksize = stbuf->st_blksize;
116 attr->blocks = stbuf->st_blocks;
117 attr->atime = stbuf->st_atime;
118 attr->mtime = stbuf->st_mtime;
119 attr->ctime = stbuf->st_ctime;
120 attr->atimensec = ST_ATIM_NSEC(stbuf);
121 attr->mtimensec = ST_MTIM_NSEC(stbuf);
122 attr->ctimensec = ST_CTIM_NSEC(stbuf);
123}
124
125static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
126{
127 stbuf->st_mode = attr->mode;
128 stbuf->st_uid = attr->uid;
129 stbuf->st_gid = attr->gid;
130 stbuf->st_size = attr->size;
131 stbuf->st_atime = attr->atime;
132 stbuf->st_mtime = attr->mtime;
133 stbuf->st_ctime = attr->ctime;
134 ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
135 ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
136 ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
137}
138
139static size_t iov_length(const struct iovec *iov, size_t count)
140{
141 size_t seg;
142 size_t ret = 0;
143
144 for (seg = 0; seg < count; seg++)
145 ret += iov[seg].iov_len;
146 return ret;
147}
148
149void list_init_req(struct fuse_req *req)
150{
151 req->next = req;
152 req->prev = req;
153}
154
155static void list_del_req(struct fuse_req *req)
156{
157 struct fuse_req *prev = req->prev;
158 struct fuse_req *next = req->next;
159 prev->next = next;
160 next->prev = prev;
161}
162
163static void list_add_req(struct fuse_req *req, struct fuse_req *next)
164{
165 struct fuse_req *prev = next->prev;
166 req->next = next;
167 req->prev = prev;
168 prev->next = req;
169 next->prev = req;
170}
171
172static void destroy_req(fuse_req_t req)
173{
174 if (req->flags.is_uring) {
175 fuse_log(FUSE_LOG_ERR, "Refusing to destruct uring req\n");
176 return;
177 }
178 assert(req->ch == NULL);
179 pthread_mutex_destroy(&req->lock);
180 free(req);
181}
182
183void fuse_free_req(fuse_req_t req)
184{
185 int ctr;
186 struct fuse_session *se = req->se;
187
188 /* XXX: for now no support for interrupts with io-uring
189 * It actually might work already, though. But then would add
190 * a lock across ring queues.
191 */
192 if (se->conn.no_interrupt || req->flags.is_uring) {
193 ctr = --req->ref_cnt;
194 fuse_chan_put(req->ch);
195 req->ch = NULL;
196 } else {
197 pthread_mutex_lock(&se->lock);
198 req->u.ni.func = NULL;
199 req->u.ni.data = NULL;
200 list_del_req(req);
201 ctr = --req->ref_cnt;
202 fuse_chan_put(req->ch);
203 req->ch = NULL;
204 pthread_mutex_unlock(&se->lock);
205 }
206 if (!ctr)
207 destroy_req(req);
208}
209
210static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
211{
212 struct fuse_req *req;
213
214 req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
215 if (req == NULL) {
216 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
217 } else {
218 req->se = se;
219 req->ref_cnt = 1;
220 list_init_req(req);
221 pthread_mutex_init(&req->lock, NULL);
222 }
223
224 return req;
225}
226
227/*
228 * Send data to fuse-kernel using an fd of the fuse device.
229 */
230static int fuse_write_msg_dev(struct fuse_session *se, struct fuse_chan *ch,
231 struct iovec *iov, int count)
232{
233 ssize_t res;
234 int err;
235
236 if (se->io != NULL)
237
238 /* se->io->writev is never NULL if se->io is not NULL as
239 * specified by fuse_session_custom_io()
240 */
241 res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
242 se->userdata);
243 else
244 res = writev(ch ? ch->fd : se->fd, iov, count);
245
246 if (res == -1) {
247 /* ENOENT means the operation was interrupted */
248 err = errno;
249 if (!fuse_session_exited(se) && err != ENOENT)
250 perror("fuse: writing device");
251 return -err;
252 }
253
254 return 0;
255}
256
257static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
258 struct iovec *iov, int count, fuse_req_t req)
259{
260 struct fuse_out_header *out = iov[0].iov_base;
261 int err;
262 bool is_uring = req && req->flags.is_uring ? true : false;
263
264 if (!is_uring)
265 assert(se != NULL);
266 out->len = iov_length(iov, count);
267
268 if (se->debug) {
269 if (out->unique == 0) {
270 fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
271 out->error, out->len);
272 } else if (out->error) {
273 fuse_log(FUSE_LOG_DEBUG,
274 " unique: %llu, error: %i (%s), outsize: %i\n",
275 (unsigned long long) out->unique, out->error,
276 strerror(-out->error), out->len);
277 } else {
278 fuse_log(FUSE_LOG_DEBUG,
279 " unique: %llu, success, outsize: %i\n",
280 (unsigned long long) out->unique, out->len);
281 }
282 }
283
284 if (is_uring)
285 err = fuse_send_msg_uring(req, iov, count);
286 else
287 err = fuse_write_msg_dev(se, ch, iov, count);
288
289 trace_request_reply(out->unique, out->len, out->error, err);
290 return err;
291}
292
293int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
294 int count)
295{
296 struct fuse_out_header out;
297
298#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
299 const char *str = strerrordesc_np(error * -1);
300 if ((str == NULL && error != 0) || error > 0) {
301#else
302 if (error <= -1000 || error > 0) {
303#endif
304 fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
305 error = -ERANGE;
306 }
307
308 out.unique = req->unique;
309 out.error = error;
310
311 iov[0].iov_base = &out;
312 iov[0].iov_len = sizeof(struct fuse_out_header);
313
314 return fuse_send_msg(req->se, req->ch, iov, count, req);
315}
316
317static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
318 int count)
319{
320 int res;
321
322 res = fuse_send_reply_iov_nofree(req, error, iov, count);
323 fuse_free_req(req);
324 return res;
325}
326
327static int send_reply(fuse_req_t req, int error, const void *arg,
328 size_t argsize)
329{
330 if (req->flags.is_uring)
331 return send_reply_uring(req, error, arg, argsize);
332
333 struct iovec iov[2];
334 int count = 1;
335 if (argsize) {
336 iov[1].iov_base = (void *) arg;
337 iov[1].iov_len = argsize;
338 count++;
339 }
340 return send_reply_iov(req, error, iov, count);
341}
342
343int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
344{
345 int res;
346 struct iovec *padded_iov;
347
348 padded_iov = malloc((count + 1) * sizeof(struct iovec));
349 if (padded_iov == NULL)
350 return fuse_reply_err(req, ENOMEM);
351
352 memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
353 count++;
354
355 res = send_reply_iov(req, 0, padded_iov, count);
356 free(padded_iov);
357
358 return res;
359}
360
361
362/* `buf` is allowed to be empty so that the proper size may be
363 allocated by the caller */
364size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
365 const char *name, const struct stat *stbuf, off_t off)
366{
367 (void)req;
368 size_t namelen;
369 size_t entlen;
370 size_t entlen_padded;
371 struct fuse_dirent *dirent;
372
373 namelen = strlen(name);
374 entlen = FUSE_NAME_OFFSET + namelen;
375 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
376
377 if ((buf == NULL) || (entlen_padded > bufsize))
378 return entlen_padded;
379
380 dirent = (struct fuse_dirent*) buf;
381 dirent->ino = stbuf->st_ino;
382 dirent->off = off;
383 dirent->namelen = namelen;
384 dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
385 memcpy(dirent->name, name, namelen);
386 memset(dirent->name + namelen, 0, entlen_padded - entlen);
387
388 return entlen_padded;
389}
390
391static void convert_statfs(const struct statvfs *stbuf,
392 struct fuse_kstatfs *kstatfs)
393{
394 kstatfs->bsize = stbuf->f_bsize;
395 kstatfs->frsize = stbuf->f_frsize;
396 kstatfs->blocks = stbuf->f_blocks;
397 kstatfs->bfree = stbuf->f_bfree;
398 kstatfs->bavail = stbuf->f_bavail;
399 kstatfs->files = stbuf->f_files;
400 kstatfs->ffree = stbuf->f_ffree;
401 kstatfs->namelen = stbuf->f_namemax;
402}
403
404static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
405{
406 return send_reply(req, 0, arg, argsize);
407}
408
409int fuse_reply_err(fuse_req_t req, int err)
410{
411 return send_reply(req, -err, NULL, 0);
412}
413
415{
416 fuse_free_req(req);
417}
418
419static unsigned long calc_timeout_sec(double t)
420{
421 if (t > (double) ULONG_MAX)
422 return ULONG_MAX;
423 else if (t < 0.0)
424 return 0;
425 else
426 return (unsigned long) t;
427}
428
429static unsigned int calc_timeout_nsec(double t)
430{
431 double f = t - (double) calc_timeout_sec(t);
432 if (f < 0.0)
433 return 0;
434 else if (f >= 0.999999999)
435 return 999999999;
436 else
437 return (unsigned int) (f * 1.0e9);
438}
439
440static void fill_entry(struct fuse_entry_out *arg,
441 const struct fuse_entry_param *e)
442{
443 arg->nodeid = e->ino;
444 arg->generation = e->generation;
445 arg->entry_valid = calc_timeout_sec(e->entry_timeout);
446 arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
447 arg->attr_valid = calc_timeout_sec(e->attr_timeout);
448 arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
449 convert_stat(&e->attr, &arg->attr);
450}
451
452/* `buf` is allowed to be empty so that the proper size may be
453 allocated by the caller */
454size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
455 const char *name,
456 const struct fuse_entry_param *e, off_t off)
457{
458 (void)req;
459 size_t namelen;
460 size_t entlen;
461 size_t entlen_padded;
462
463 namelen = strlen(name);
464 entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
465 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
466 if ((buf == NULL) || (entlen_padded > bufsize))
467 return entlen_padded;
468
469 struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
470 memset(&dp->entry_out, 0, sizeof(dp->entry_out));
471 fill_entry(&dp->entry_out, e);
472
473 struct fuse_dirent *dirent = &dp->dirent;
474 dirent->ino = e->attr.st_ino;
475 dirent->off = off;
476 dirent->namelen = namelen;
477 dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
478 memcpy(dirent->name, name, namelen);
479 memset(dirent->name + namelen, 0, entlen_padded - entlen);
480
481 return entlen_padded;
482}
483
484static void fill_open(struct fuse_open_out *arg,
485 const struct fuse_file_info *f)
486{
487 arg->fh = f->fh;
488 if (f->backing_id > 0) {
489 arg->backing_id = f->backing_id;
490 arg->open_flags |= FOPEN_PASSTHROUGH;
491 }
492 if (f->direct_io)
493 arg->open_flags |= FOPEN_DIRECT_IO;
494 if (f->keep_cache)
495 arg->open_flags |= FOPEN_KEEP_CACHE;
496 if (f->cache_readdir)
497 arg->open_flags |= FOPEN_CACHE_DIR;
498 if (f->nonseekable)
499 arg->open_flags |= FOPEN_NONSEEKABLE;
500 if (f->noflush)
501 arg->open_flags |= FOPEN_NOFLUSH;
503 arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES;
504}
505
506int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
507{
508 struct fuse_entry_out arg;
509 size_t size = req->se->conn.proto_minor < 9 ?
510 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
511
512 /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
513 negative entry */
514 if (!e->ino && req->se->conn.proto_minor < 4)
515 return fuse_reply_err(req, ENOENT);
516
517 memset(&arg, 0, sizeof(arg));
518 fill_entry(&arg, e);
519 return send_reply_ok(req, &arg, size);
520}
521
522int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
523 const struct fuse_file_info *f)
524{
525 alignas(uint64_t) char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
526 size_t entrysize = req->se->conn.proto_minor < 9 ?
527 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
528 struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
529 struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
530
531 memset(buf, 0, sizeof(buf));
532 fill_entry(earg, e);
533 fill_open(oarg, f);
534 return send_reply_ok(req, buf,
535 entrysize + sizeof(struct fuse_open_out));
536}
537
538int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
539 double attr_timeout)
540{
541 struct fuse_attr_out arg;
542 size_t size = req->se->conn.proto_minor < 9 ?
543 FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
544
545 memset(&arg, 0, sizeof(arg));
546 arg.attr_valid = calc_timeout_sec(attr_timeout);
547 arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
548 convert_stat(attr, &arg.attr);
549
550 return send_reply_ok(req, &arg, size);
551}
552
553int fuse_reply_readlink(fuse_req_t req, const char *linkname)
554{
555 return send_reply_ok(req, linkname, strlen(linkname));
556}
557
558int fuse_passthrough_open(fuse_req_t req, int fd)
559{
560 struct fuse_backing_map map = { .fd = fd };
561 int ret;
562
563 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_OPEN, &map);
564 if (ret <= 0) {
565 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_open: %s\n", strerror(errno));
566 return 0;
567 }
568
569 return ret;
570}
571
572int fuse_passthrough_close(fuse_req_t req, int backing_id)
573{
574 int ret;
575
576 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_CLOSE, &backing_id);
577 if (ret < 0)
578 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_close: %s\n", strerror(errno));
579
580 return ret;
581}
582
583int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
584{
585 struct fuse_open_out arg;
586
587 memset(&arg, 0, sizeof(arg));
588 fill_open(&arg, f);
589 return send_reply_ok(req, &arg, sizeof(arg));
590}
591
592static int do_fuse_reply_write(fuse_req_t req, size_t count)
593{
594 struct fuse_write_out arg;
595
596 memset(&arg, 0, sizeof(arg));
597 arg.size = count;
598
599 return send_reply_ok(req, &arg, sizeof(arg));
600}
601
602static int do_fuse_reply_copy(fuse_req_t req, size_t count)
603{
604 struct fuse_copy_file_range_out arg;
605
606 memset(&arg, 0, sizeof(arg));
607 arg.bytes_copied = count;
608
609 return send_reply_ok(req, &arg, sizeof(arg));
610}
611
612int fuse_reply_write(fuse_req_t req, size_t count)
613{
614 /*
615 * This function is also used by FUSE_COPY_FILE_RANGE and its 64-bit
616 * variant.
617 */
618 if (req->flags.is_copy_file_range_64)
619 return do_fuse_reply_copy(req, count);
620 else
621 return do_fuse_reply_write(req, count);
622}
623
624int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
625{
626 return send_reply_ok(req, buf, size);
627}
628
629static int fuse_send_data_iov_fallback(struct fuse_session *se,
630 struct fuse_chan *ch,
631 struct iovec *iov, int iov_count,
632 struct fuse_bufvec *buf,
633 size_t len, fuse_req_t req)
634{
635 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
636 void *mbuf;
637 int res;
638
639 /* Optimize common case */
640 if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
641 !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
642 /* FIXME: also avoid memory copy if there are multiple buffers
643 but none of them contain an fd */
644
645 iov[iov_count].iov_base = buf->buf[0].mem;
646 iov[iov_count].iov_len = len;
647 iov_count++;
648 return fuse_send_msg(se, ch, iov, iov_count, req);
649 }
650
651 res = posix_memalign(&mbuf, pagesize, len);
652 if (res != 0)
653 return res;
654
655 mem_buf.buf[0].mem = mbuf;
656 res = fuse_buf_copy(&mem_buf, buf, 0);
657 if (res < 0) {
658 free(mbuf);
659 return -res;
660 }
661 len = res;
662
663 iov[iov_count].iov_base = mbuf;
664 iov[iov_count].iov_len = len;
665 iov_count++;
666 res = fuse_send_msg(se, ch, iov, iov_count, req);
667 free(mbuf);
668
669 return res;
670}
671
672struct fuse_ll_pipe {
673 size_t size;
674 int can_grow;
675 int pipe[2];
676};
677
678static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
679{
680 close(llp->pipe[0]);
681 close(llp->pipe[1]);
682 free(llp);
683}
684
685#ifdef HAVE_SPLICE
686#if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
687static int fuse_pipe(int fds[2])
688{
689 int rv = pipe(fds);
690
691 if (rv == -1)
692 return rv;
693
694 if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
695 fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
696 fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
697 fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
698 close(fds[0]);
699 close(fds[1]);
700 rv = -1;
701 }
702 return rv;
703}
704#else
705static int fuse_pipe(int fds[2])
706{
707 return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
708}
709#endif
710
711static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
712{
713 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
714 if (llp == NULL) {
715 int res;
716
717 llp = malloc(sizeof(struct fuse_ll_pipe));
718 if (llp == NULL)
719 return NULL;
720
721 res = fuse_pipe(llp->pipe);
722 if (res == -1) {
723 free(llp);
724 return NULL;
725 }
726
727 /*
728 *the default size is 16 pages on linux
729 */
730 llp->size = pagesize * 16;
731 llp->can_grow = 1;
732
733 pthread_setspecific(se->pipe_key, llp);
734 }
735
736 return llp;
737}
738#endif
739
740static void fuse_ll_clear_pipe(struct fuse_session *se)
741{
742 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
743 if (llp) {
744 pthread_setspecific(se->pipe_key, NULL);
745 fuse_ll_pipe_free(llp);
746 }
747}
748
749#if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
750static int read_back(int fd, char *buf, size_t len)
751{
752 int res;
753
754 res = read(fd, buf, len);
755 if (res == -1) {
756 fuse_log(FUSE_LOG_ERR,
757 "fuse: internal error: failed to read back from pipe: %s\n",
758 strerror(errno));
759 return -EIO;
760 }
761 if (res != len) {
762 fuse_log(FUSE_LOG_ERR,
763 "fuse: internal error: short read back from pipe: %i from %zd\n",
764 res, len);
765 return -EIO;
766 }
767 return 0;
768}
769
770static int grow_pipe_to_max(int pipefd)
771{
772 int res;
773 long max;
774 long maxfd;
775 char buf[32];
776
777 maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
778 if (maxfd < 0)
779 return -errno;
780
781 res = read(maxfd, buf, sizeof(buf) - 1);
782 if (res < 0) {
783 int saved_errno;
784
785 saved_errno = errno;
786 close(maxfd);
787 return -saved_errno;
788 }
789 close(maxfd);
790 buf[res] = '\0';
791
792 res = libfuse_strtol(buf, &max);
793 if (res)
794 return res;
795 res = fcntl(pipefd, F_SETPIPE_SZ, max);
796 if (res < 0)
797 return -errno;
798 return max;
799}
800
801static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
802 struct iovec *iov, int iov_count,
803 struct fuse_bufvec *buf, unsigned int flags,
804 fuse_req_t req)
805{
806 int res;
807 size_t len = fuse_buf_size(buf);
808 struct fuse_out_header *out = iov[0].iov_base;
809 struct fuse_ll_pipe *llp;
810 int splice_flags;
811 size_t pipesize;
812 size_t total_buf_size;
813 size_t idx;
814 size_t headerlen;
815 struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
816
817 if (se->broken_splice_nonblock)
818 goto fallback;
819
820 if (flags & FUSE_BUF_NO_SPLICE)
821 goto fallback;
822
823 total_buf_size = 0;
824 for (idx = buf->idx; idx < buf->count; idx++) {
825 total_buf_size += buf->buf[idx].size;
826 if (idx == buf->idx)
827 total_buf_size -= buf->off;
828 }
829 if (total_buf_size < 2 * pagesize)
830 goto fallback;
831
832 if (se->conn.proto_minor < 14 ||
833 !(se->conn.want_ext & FUSE_CAP_SPLICE_WRITE))
834 goto fallback;
835
836 llp = fuse_ll_get_pipe(se);
837 if (llp == NULL)
838 goto fallback;
839
840
841 headerlen = iov_length(iov, iov_count);
842
843 out->len = headerlen + len;
844
845 /*
846 * Heuristic for the required pipe size, does not work if the
847 * source contains less than page size fragments
848 */
849 pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
850
851 if (llp->size < pipesize) {
852 if (llp->can_grow) {
853 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
854 if (res == -1) {
855 res = grow_pipe_to_max(llp->pipe[0]);
856 if (res > 0)
857 llp->size = res;
858 llp->can_grow = 0;
859 goto fallback;
860 }
861 llp->size = res;
862 }
863 if (llp->size < pipesize)
864 goto fallback;
865 }
866
867
868 res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
869 if (res == -1)
870 goto fallback;
871
872 if (res != headerlen) {
873 res = -EIO;
874 fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
875 headerlen);
876 goto clear_pipe;
877 }
878
879 pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
880 pipe_buf.buf[0].fd = llp->pipe[1];
881
882 res = fuse_buf_copy(&pipe_buf, buf,
884 if (res < 0) {
885 if (res == -EAGAIN || res == -EINVAL) {
886 /*
887 * Should only get EAGAIN on kernels with
888 * broken SPLICE_F_NONBLOCK support (<=
889 * 2.6.35) where this error or a short read is
890 * returned even if the pipe itself is not
891 * full
892 *
893 * EINVAL might mean that splice can't handle
894 * this combination of input and output.
895 */
896 if (res == -EAGAIN)
897 se->broken_splice_nonblock = 1;
898
899 pthread_setspecific(se->pipe_key, NULL);
900 fuse_ll_pipe_free(llp);
901 goto fallback;
902 }
903 res = -res;
904 goto clear_pipe;
905 }
906
907 if (res != 0 && res < len) {
908 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
909 void *mbuf;
910 size_t now_len = res;
911 /*
912 * For regular files a short count is either
913 * 1) due to EOF, or
914 * 2) because of broken SPLICE_F_NONBLOCK (see above)
915 *
916 * For other inputs it's possible that we overflowed
917 * the pipe because of small buffer fragments.
918 */
919
920 res = posix_memalign(&mbuf, pagesize, len);
921 if (res != 0)
922 goto clear_pipe;
923
924 mem_buf.buf[0].mem = mbuf;
925 mem_buf.off = now_len;
926 res = fuse_buf_copy(&mem_buf, buf, 0);
927 if (res > 0) {
928 char *tmpbuf;
929 size_t extra_len = res;
930 /*
931 * Trickiest case: got more data. Need to get
932 * back the data from the pipe and then fall
933 * back to regular write.
934 */
935 tmpbuf = malloc(headerlen);
936 if (tmpbuf == NULL) {
937 free(mbuf);
938 res = ENOMEM;
939 goto clear_pipe;
940 }
941 res = read_back(llp->pipe[0], tmpbuf, headerlen);
942 free(tmpbuf);
943 if (res != 0) {
944 free(mbuf);
945 goto clear_pipe;
946 }
947 res = read_back(llp->pipe[0], mbuf, now_len);
948 if (res != 0) {
949 free(mbuf);
950 goto clear_pipe;
951 }
952 len = now_len + extra_len;
953 iov[iov_count].iov_base = mbuf;
954 iov[iov_count].iov_len = len;
955 iov_count++;
956 res = fuse_send_msg(se, ch, iov, iov_count, req);
957 free(mbuf);
958 return res;
959 }
960 free(mbuf);
961 res = now_len;
962 }
963 len = res;
964 out->len = headerlen + len;
965
966 if (se->debug) {
967 fuse_log(FUSE_LOG_DEBUG,
968 " unique: %llu, success, outsize: %i (splice)\n",
969 (unsigned long long) out->unique, out->len);
970 }
971
972 splice_flags = 0;
973 if ((flags & FUSE_BUF_SPLICE_MOVE) &&
974 (se->conn.want_ext & FUSE_CAP_SPLICE_MOVE))
975 splice_flags |= SPLICE_F_MOVE;
976
977 if (se->io != NULL && se->io->splice_send != NULL) {
978 res = se->io->splice_send(llp->pipe[0], NULL,
979 ch ? ch->fd : se->fd, NULL, out->len,
980 splice_flags, se->userdata);
981 } else {
982 res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
983 out->len, splice_flags);
984 }
985 if (res == -1) {
986 res = -errno;
987 perror("fuse: splice from pipe");
988 goto clear_pipe;
989 }
990 if (res != out->len) {
991 res = -EIO;
992 fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
993 res, out->len);
994 goto clear_pipe;
995 }
996 return 0;
997
998clear_pipe:
999 fuse_ll_clear_pipe(se);
1000 return res;
1001
1002fallback:
1003 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len, req);
1004}
1005#else
1006static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
1007 struct iovec *iov, int iov_count,
1008 struct fuse_bufvec *req_data, unsigned int flags,
1009 fuse_req_t req)
1010{
1011 size_t len = fuse_buf_size(req_data);
1012 (void) flags;
1013
1014 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, req_data, len, req);
1015}
1016#endif
1017
1018int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
1019 enum fuse_buf_copy_flags flags)
1020{
1021 struct iovec iov[2];
1022 struct fuse_out_header out;
1023 int res;
1024
1025 if (req->flags.is_uring)
1026 return fuse_reply_data_uring(req, bufv, flags);
1027
1028 iov[0].iov_base = &out;
1029 iov[0].iov_len = sizeof(struct fuse_out_header);
1030
1031 out.unique = req->unique;
1032 out.error = 0;
1033
1034 res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags, req);
1035 if (res <= 0) {
1036 fuse_free_req(req);
1037 return res;
1038 } else {
1039 return fuse_reply_err(req, res);
1040 }
1041}
1042
1043int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
1044{
1045 struct fuse_statfs_out arg;
1046 size_t size = req->se->conn.proto_minor < 4 ?
1047 FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
1048
1049 memset(&arg, 0, sizeof(arg));
1050 convert_statfs(stbuf, &arg.st);
1051
1052 return send_reply_ok(req, &arg, size);
1053}
1054
1055int fuse_reply_xattr(fuse_req_t req, size_t count)
1056{
1057 struct fuse_getxattr_out arg;
1058
1059 memset(&arg, 0, sizeof(arg));
1060 arg.size = count;
1061
1062 return send_reply_ok(req, &arg, sizeof(arg));
1063}
1064
1065int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
1066{
1067 struct fuse_lk_out arg;
1068
1069 memset(&arg, 0, sizeof(arg));
1070 arg.lk.type = lock->l_type;
1071 if (lock->l_type != F_UNLCK) {
1072 arg.lk.start = lock->l_start;
1073 if (lock->l_len == 0)
1074 arg.lk.end = OFFSET_MAX;
1075 else
1076 arg.lk.end = lock->l_start + lock->l_len - 1;
1077 }
1078 arg.lk.pid = lock->l_pid;
1079 return send_reply_ok(req, &arg, sizeof(arg));
1080}
1081
1082int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
1083{
1084 struct fuse_bmap_out arg;
1085
1086 memset(&arg, 0, sizeof(arg));
1087 arg.block = idx;
1088
1089 return send_reply_ok(req, &arg, sizeof(arg));
1090}
1091
1092static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
1093 size_t count)
1094{
1095 struct fuse_ioctl_iovec *fiov;
1096 size_t i;
1097
1098 fiov = malloc(sizeof(fiov[0]) * count);
1099 if (!fiov)
1100 return NULL;
1101
1102 for (i = 0; i < count; i++) {
1103 fiov[i].base = (uintptr_t) iov[i].iov_base;
1104 fiov[i].len = iov[i].iov_len;
1105 }
1106
1107 return fiov;
1108}
1109
1111 const struct iovec *in_iov, size_t in_count,
1112 const struct iovec *out_iov, size_t out_count)
1113{
1114 struct fuse_ioctl_out arg;
1115 struct fuse_ioctl_iovec *in_fiov = NULL;
1116 struct fuse_ioctl_iovec *out_fiov = NULL;
1117 struct iovec iov[4];
1118 size_t count = 1;
1119 int res;
1120
1121 memset(&arg, 0, sizeof(arg));
1122 arg.flags |= FUSE_IOCTL_RETRY;
1123 arg.in_iovs = in_count;
1124 arg.out_iovs = out_count;
1125 iov[count].iov_base = &arg;
1126 iov[count].iov_len = sizeof(arg);
1127 count++;
1128
1129 if (req->se->conn.proto_minor < 16) {
1130 if (in_count) {
1131 iov[count].iov_base = (void *)in_iov;
1132 iov[count].iov_len = sizeof(in_iov[0]) * in_count;
1133 count++;
1134 }
1135
1136 if (out_count) {
1137 iov[count].iov_base = (void *)out_iov;
1138 iov[count].iov_len = sizeof(out_iov[0]) * out_count;
1139 count++;
1140 }
1141 } else {
1142 /* Can't handle non-compat 64bit ioctls on 32bit */
1143 if (sizeof(void *) == 4 && req->flags.ioctl_64bit) {
1144 res = fuse_reply_err(req, EINVAL);
1145 goto out;
1146 }
1147
1148 if (in_count) {
1149 in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
1150 if (!in_fiov)
1151 goto enomem;
1152
1153 iov[count].iov_base = (void *)in_fiov;
1154 iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1155 count++;
1156 }
1157 if (out_count) {
1158 out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1159 if (!out_fiov)
1160 goto enomem;
1161
1162 iov[count].iov_base = (void *)out_fiov;
1163 iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1164 count++;
1165 }
1166 }
1167
1168 res = send_reply_iov(req, 0, iov, count);
1169out:
1170 free(in_fiov);
1171 free(out_fiov);
1172
1173 return res;
1174
1175enomem:
1176 res = fuse_reply_err(req, ENOMEM);
1177 goto out;
1178}
1179
1180int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1181{
1182 struct fuse_ioctl_out arg;
1183 struct iovec iov[3];
1184 size_t count = 1;
1185
1186 memset(&arg, 0, sizeof(arg));
1187 arg.result = result;
1188 iov[count].iov_base = &arg;
1189 iov[count].iov_len = sizeof(arg);
1190 count++;
1191
1192 if (size) {
1193 iov[count].iov_base = (char *) buf;
1194 iov[count].iov_len = size;
1195 count++;
1196 }
1197
1198 return send_reply_iov(req, 0, iov, count);
1199}
1200
1201int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1202 int count)
1203{
1204 struct iovec *padded_iov;
1205 struct fuse_ioctl_out arg;
1206 int res;
1207
1208 padded_iov = malloc((count + 2) * sizeof(struct iovec));
1209 if (padded_iov == NULL)
1210 return fuse_reply_err(req, ENOMEM);
1211
1212 memset(&arg, 0, sizeof(arg));
1213 arg.result = result;
1214 padded_iov[1].iov_base = &arg;
1215 padded_iov[1].iov_len = sizeof(arg);
1216
1217 memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1218
1219 res = send_reply_iov(req, 0, padded_iov, count + 2);
1220 free(padded_iov);
1221
1222 return res;
1223}
1224
1225int fuse_reply_poll(fuse_req_t req, unsigned revents)
1226{
1227 struct fuse_poll_out arg;
1228
1229 memset(&arg, 0, sizeof(arg));
1230 arg.revents = revents;
1231
1232 return send_reply_ok(req, &arg, sizeof(arg));
1233}
1234
1235int fuse_reply_lseek(fuse_req_t req, off_t off)
1236{
1237 struct fuse_lseek_out arg;
1238
1239 memset(&arg, 0, sizeof(arg));
1240 arg.offset = off;
1241
1242 return send_reply_ok(req, &arg, sizeof(arg));
1243}
1244
1245#ifdef HAVE_STATX
1246int fuse_reply_statx(fuse_req_t req, int flags, struct statx *statx,
1247 double attr_timeout)
1248{
1249 struct fuse_statx_out arg;
1250
1251 memset(&arg, 0, sizeof(arg));
1252 arg.flags = flags;
1253 arg.attr_valid = calc_timeout_sec(attr_timeout);
1254 arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
1255 memcpy(&arg.stat, statx, sizeof(arg.stat));
1256
1257 return send_reply_ok(req, &arg, sizeof(arg));
1258}
1259#else
1260int fuse_reply_statx(fuse_req_t req, int flags, struct statx *statx,
1261 double attr_timeout)
1262{
1263 (void)req;
1264 (void)flags;
1265 (void)statx;
1266 (void)attr_timeout;
1267
1268 return -ENOSYS;
1269}
1270#endif
1271
1272static void _do_lookup(fuse_req_t req, const fuse_ino_t nodeid,
1273 const void *op_in, const void *in_payload)
1274{
1275 (void)op_in;
1276
1277 char *name = (char *)in_payload;
1278
1279 if (req->se->op.lookup)
1280 req->se->op.lookup(req, nodeid, name);
1281 else
1282 fuse_reply_err(req, ENOSYS);
1283}
1284
1285static void do_lookup(fuse_req_t req, const fuse_ino_t nodeid,
1286 const void *inarg)
1287{
1288 _do_lookup(req, nodeid, NULL, inarg);
1289}
1290
1291static void _do_forget(fuse_req_t req, const fuse_ino_t nodeid,
1292 const void *op_in, const void *in_payload)
1293{
1294 (void)in_payload;
1295
1296 struct fuse_forget_in *arg = (struct fuse_forget_in *)op_in;
1297
1298 if (req->se->op.forget)
1299 req->se->op.forget(req, nodeid, arg->nlookup);
1300 else
1301 fuse_reply_none(req);
1302}
1303
1304static void do_forget(fuse_req_t req, const fuse_ino_t nodeid,
1305 const void *inarg)
1306{
1307 _do_forget(req, nodeid, inarg, NULL);
1308}
1309
1310static void _do_batch_forget(fuse_req_t req, const fuse_ino_t nodeid,
1311 const void *op_in, const void *in_payload)
1312{
1313 (void)nodeid;
1314 unsigned int i;
1315
1316 const struct fuse_batch_forget_in *arg = op_in;
1317 const struct fuse_forget_one *forgets = in_payload;
1318
1319 if (req->se->op.forget_multi) {
1320 req->se->op.forget_multi(req, arg->count,
1321 (struct fuse_forget_data *)in_payload);
1322 } else if (req->se->op.forget) {
1323 for (i = 0; i < arg->count; i++) {
1324 const struct fuse_forget_one *forget = &forgets[i];
1325 struct fuse_req *dummy_req;
1326
1327 dummy_req = fuse_ll_alloc_req(req->se);
1328 if (dummy_req == NULL)
1329 break;
1330
1331 dummy_req->unique = req->unique;
1332 dummy_req->ctx = req->ctx;
1333 dummy_req->ch = NULL;
1334
1335 req->se->op.forget(dummy_req, forget->nodeid,
1336 forget->nlookup);
1337 }
1338 fuse_reply_none(req);
1339 } else {
1340 fuse_reply_none(req);
1341 }
1342}
1343
1344static void do_batch_forget(fuse_req_t req, const fuse_ino_t nodeid,
1345 const void *inarg)
1346{
1347 struct fuse_batch_forget_in *arg = (void *)inarg;
1348 struct fuse_forget_one *param = (void *)PARAM(arg);
1349
1350 _do_batch_forget(req, nodeid, inarg, param);
1351}
1352
1353static void _do_getattr(fuse_req_t req, const fuse_ino_t nodeid,
1354 const void *op_in, const void *in_payload)
1355{
1356 struct fuse_getattr_in *arg = (struct fuse_getattr_in *)op_in;
1357 (void)in_payload;
1358
1359 struct fuse_file_info *fip = NULL;
1360 struct fuse_file_info fi;
1361
1362 if (req->se->conn.proto_minor >= 9) {
1363 if (arg->getattr_flags & FUSE_GETATTR_FH) {
1364 memset(&fi, 0, sizeof(fi));
1365 fi.fh = arg->fh;
1366 fip = &fi;
1367 }
1368 }
1369
1370 if (req->se->op.getattr)
1371 req->se->op.getattr(req, nodeid, fip);
1372 else
1373 fuse_reply_err(req, ENOSYS);
1374}
1375
1376static void do_getattr(fuse_req_t req, const fuse_ino_t nodeid,
1377 const void *inarg)
1378{
1379 _do_getattr(req, nodeid, inarg, NULL);
1380}
1381
1382static void _do_setattr(fuse_req_t req, const fuse_ino_t nodeid,
1383 const void *op_in, const void *in_payload)
1384{
1385 (void)in_payload;
1386 const struct fuse_setattr_in *arg = op_in;
1387 uint32_t valid = arg->valid;
1388
1389 if (req->se->op.setattr) {
1390 struct fuse_file_info *fi = NULL;
1391 struct fuse_file_info fi_store;
1392 struct stat stbuf;
1393 memset(&stbuf, 0, sizeof(stbuf));
1394 convert_attr(arg, &stbuf);
1395 if (arg->valid & FATTR_FH) {
1396 valid &= ~FATTR_FH;
1397 memset(&fi_store, 0, sizeof(fi_store));
1398 fi = &fi_store;
1399 fi->fh = arg->fh;
1400 }
1401 valid &= FUSE_SET_ATTR_MODE | FUSE_SET_ATTR_UID |
1402 FUSE_SET_ATTR_GID | FUSE_SET_ATTR_SIZE |
1403 FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME |
1404 FUSE_SET_ATTR_KILL_SUID | FUSE_SET_ATTR_KILL_SGID |
1405 FUSE_SET_ATTR_ATIME_NOW | FUSE_SET_ATTR_MTIME_NOW |
1406 FUSE_SET_ATTR_CTIME;
1407
1408 req->se->op.setattr(req, nodeid, &stbuf, valid, fi);
1409 } else
1410 fuse_reply_err(req, ENOSYS);
1411}
1412
1413static void do_setattr(fuse_req_t req, const fuse_ino_t nodeid,
1414 const void *inarg)
1415{
1416 _do_setattr(req, nodeid, inarg, NULL);
1417}
1418
1419static void _do_access(fuse_req_t req, const fuse_ino_t nodeid,
1420 const void *op_in, const void *in_payload)
1421{
1422 (void)in_payload;
1423 const struct fuse_access_in *arg = op_in;
1424
1425 if (req->se->op.access)
1426 req->se->op.access(req, nodeid, arg->mask);
1427 else
1428 fuse_reply_err(req, ENOSYS);
1429}
1430
1431static void do_access(fuse_req_t req, const fuse_ino_t nodeid,
1432 const void *inarg)
1433{
1434 _do_access(req, nodeid, inarg, NULL);
1435}
1436
1437static void _do_readlink(fuse_req_t req, const fuse_ino_t nodeid,
1438 const void *op_in, const void *in_payload)
1439{
1440 (void)op_in;
1441 (void)in_payload;
1442
1443 if (req->se->op.readlink)
1444 req->se->op.readlink(req, nodeid);
1445 else
1446 fuse_reply_err(req, ENOSYS);
1447}
1448
1449static void do_readlink(fuse_req_t req, const fuse_ino_t nodeid,
1450 const void *inarg)
1451{
1452 _do_readlink(req, nodeid, inarg, NULL);
1453}
1454
1455static void _do_mknod(fuse_req_t req, const fuse_ino_t nodeid,
1456 const void *op_in, const void *in_payload)
1457{
1458 const struct fuse_mknod_in *arg = (struct fuse_mknod_in *)op_in;
1459 const char *name = in_payload;
1460
1461 if (req->se->conn.proto_minor >= 12)
1462 req->ctx.umask = arg->umask;
1463
1464 if (req->se->op.mknod)
1465 req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1466 else
1467 fuse_reply_err(req, ENOSYS);
1468}
1469
1470static void do_mknod(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
1471{
1472 struct fuse_mknod_in *arg = (struct fuse_mknod_in *)inarg;
1473 char *name = PARAM(arg);
1474
1475 if (req->se->conn.proto_minor < 12)
1476 name = (char *)inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1477
1478 _do_mknod(req, nodeid, inarg, name);
1479}
1480
1481static void _do_mkdir(fuse_req_t req, const fuse_ino_t nodeid,
1482 const void *op_in, const void *in_payload)
1483{
1484 const char *name = in_payload;
1485 const struct fuse_mkdir_in *arg = op_in;
1486
1487 if (req->se->conn.proto_minor >= 12)
1488 req->ctx.umask = arg->umask;
1489
1490 if (req->se->op.mkdir)
1491 req->se->op.mkdir(req, nodeid, name, arg->mode);
1492 else
1493 fuse_reply_err(req, ENOSYS);
1494}
1495
1496static void do_mkdir(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
1497{
1498 const struct fuse_mkdir_in *arg = inarg;
1499 const char *name = PARAM(arg);
1500
1501 _do_mkdir(req, nodeid, inarg, name);
1502}
1503
1504static void _do_unlink(fuse_req_t req, const fuse_ino_t nodeid,
1505 const void *op_in, const void *in_payload)
1506{
1507 (void)op_in;
1508 const char *name = in_payload;
1509
1510 if (req->se->op.unlink)
1511 req->se->op.unlink(req, nodeid, name);
1512 else
1513 fuse_reply_err(req, ENOSYS);
1514}
1515
1516static void do_unlink(fuse_req_t req, const fuse_ino_t nodeid,
1517 const void *inarg)
1518{
1519 _do_unlink(req, nodeid, NULL, inarg);
1520}
1521
1522static void _do_rmdir(fuse_req_t req, const fuse_ino_t nodeid,
1523 const void *op_in, const void *in_payload)
1524{
1525 (void)op_in;
1526 const char *name = in_payload;
1527
1528 if (req->se->op.rmdir)
1529 req->se->op.rmdir(req, nodeid, name);
1530 else
1531 fuse_reply_err(req, ENOSYS);
1532}
1533
1534static void do_rmdir(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
1535{
1536 _do_rmdir(req, nodeid, NULL, inarg);
1537}
1538
1539static void _do_symlink(fuse_req_t req, const fuse_ino_t nodeid,
1540 const void *op_in, const void *in_payload)
1541{
1542 (void)op_in;
1543 const char *name = (char *)in_payload;
1544 const char *linkname = name + strlen(name) + 1;
1545
1546 if (req->se->op.symlink)
1547 req->se->op.symlink(req, linkname, nodeid, name);
1548 else
1549 fuse_reply_err(req, ENOSYS);
1550}
1551
1552static void do_symlink(fuse_req_t req, const fuse_ino_t nodeid,
1553 const void *inarg)
1554{
1555 _do_symlink(req, nodeid, NULL, inarg);
1556}
1557
1558static void _do_rename(fuse_req_t req, const fuse_ino_t nodeid,
1559 const void *op_in, const void *in_payload)
1560{
1561 const struct fuse_rename_in *arg = (struct fuse_rename_in *)op_in;
1562 const char *oldname = in_payload;
1563 const char *newname = oldname + strlen(oldname) + 1;
1564
1565 if (req->se->op.rename)
1566 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1567 0);
1568 else
1569 fuse_reply_err(req, ENOSYS);
1570}
1571
1572static void do_rename(fuse_req_t req, const fuse_ino_t nodeid,
1573 const void *inarg)
1574{
1575 const struct fuse_rename_in *arg = inarg;
1576 const void *payload = PARAM(arg);
1577
1578 _do_rename(req, nodeid, arg, payload);
1579}
1580
1581static void _do_rename2(fuse_req_t req, const fuse_ino_t nodeid,
1582 const void *op_in, const void *in_payload)
1583{
1584 const struct fuse_rename2_in *arg = op_in;
1585 const char *oldname = in_payload;
1586 const char *newname = oldname + strlen(oldname) + 1;
1587
1588 if (req->se->op.rename)
1589 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1590 arg->flags);
1591 else
1592 fuse_reply_err(req, ENOSYS);
1593}
1594
1595static void do_rename2(fuse_req_t req, const fuse_ino_t nodeid,
1596 const void *inarg)
1597{
1598 const struct fuse_rename2_in *arg = inarg;
1599 const void *payload = PARAM(arg);
1600
1601 _do_rename2(req, nodeid, arg, payload);
1602}
1603
1604static void _do_tmpfile(fuse_req_t req, fuse_ino_t nodeid, const void *op_in,
1605 const void *in_payload)
1606{
1607 (void)in_payload;
1608 const struct fuse_create_in *arg = op_in;
1609
1610 if (req->se->op.tmpfile) {
1611 struct fuse_file_info fi;
1612
1613 memset(&fi, 0, sizeof(fi));
1614 fi.flags = arg->flags;
1615
1616 if (req->se->conn.proto_minor >= 12)
1617 req->ctx.umask = arg->umask;
1618
1619 req->se->op.tmpfile(req, nodeid, arg->mode, &fi);
1620 } else
1621 fuse_reply_err(req, ENOSYS);
1622}
1623
1624static void do_tmpfile(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1625{
1626 struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1627
1628 _do_tmpfile(req, nodeid, arg, NULL);
1629}
1630
1631static void _do_link(fuse_req_t req, const fuse_ino_t nodeid, const void *op_in,
1632 const void *in_payload)
1633{
1634 struct fuse_link_in *arg = (struct fuse_link_in *)op_in;
1635
1636 if (req->se->op.link)
1637 req->se->op.link(req, arg->oldnodeid, nodeid, in_payload);
1638 else
1639 fuse_reply_err(req, ENOSYS);
1640}
1641
1642static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1643{
1644 const struct fuse_link_in *arg = inarg;
1645 const void *name = PARAM(arg);
1646
1647 _do_link(req, nodeid, inarg, name);
1648}
1649
1650static void _do_create(fuse_req_t req, const fuse_ino_t nodeid,
1651 const void *op_in, const void *in_payload)
1652{
1653 const struct fuse_create_in *arg = op_in;
1654 const char *name = in_payload;
1655
1656 if (req->se->op.create) {
1657 struct fuse_file_info fi;
1658
1659 memset(&fi, 0, sizeof(fi));
1660 fi.flags = arg->flags;
1661
1662 if (req->se->conn.proto_minor >= 12)
1663 req->ctx.umask = arg->umask;
1664
1665 /* XXX: fuse_create_in::open_flags */
1666
1667 req->se->op.create(req, nodeid, name, arg->mode, &fi);
1668 } else {
1669 fuse_reply_err(req, ENOSYS);
1670 }
1671}
1672
1673static void do_create(fuse_req_t req, const fuse_ino_t nodeid,
1674 const void *inarg)
1675{
1676 const struct fuse_create_in *arg = (struct fuse_create_in *)inarg;
1677 void *payload = PARAM(arg);
1678
1679 if (req->se->conn.proto_minor < 12)
1680 payload = (char *)inarg + sizeof(struct fuse_open_in);
1681
1682 _do_create(req, nodeid, arg, payload);
1683}
1684
1685static void _do_open(fuse_req_t req, const fuse_ino_t nodeid, const void *op_in,
1686 const void *in_payload)
1687{
1688 (void)in_payload;
1689 struct fuse_open_in *arg = (struct fuse_open_in *)op_in;
1690 struct fuse_file_info fi;
1691
1692 memset(&fi, 0, sizeof(fi));
1693 fi.flags = arg->flags;
1694
1695 /* XXX: fuse_open_in::open_flags */
1696
1697 if (req->se->op.open)
1698 req->se->op.open(req, nodeid, &fi);
1699 else if (req->se->conn.want_ext & FUSE_CAP_NO_OPEN_SUPPORT)
1700 fuse_reply_err(req, ENOSYS);
1701 else
1702 fuse_reply_open(req, &fi);
1703}
1704
1705static void do_open(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
1706{
1707 _do_open(req, nodeid, inarg, NULL);
1708}
1709
1710static void _do_read(fuse_req_t req, const fuse_ino_t nodeid, const void *op_in,
1711 const void *in_payload)
1712{
1713 (void)in_payload;
1714 struct fuse_read_in *arg = (struct fuse_read_in *)op_in;
1715
1716 if (req->se->op.read) {
1717 struct fuse_file_info fi;
1718
1719 memset(&fi, 0, sizeof(fi));
1720 fi.fh = arg->fh;
1721 if (req->se->conn.proto_minor >= 9) {
1722 fi.lock_owner = arg->lock_owner;
1723 fi.flags = arg->flags;
1724 }
1725 req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1726 } else
1727 fuse_reply_err(req, ENOSYS);
1728}
1729
1730static void do_read(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
1731{
1732 _do_read(req, nodeid, inarg, NULL);
1733}
1734
1735static void _do_write(fuse_req_t req, const fuse_ino_t nodeid,
1736 const void *op_in, const void *in_payload)
1737{
1738 struct fuse_write_in *arg = (struct fuse_write_in *)op_in;
1739 const char *buf = in_payload;
1740 struct fuse_file_info fi;
1741
1742 memset(&fi, 0, sizeof(fi));
1743 fi.fh = arg->fh;
1744 fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1745
1746 if (req->se->conn.proto_minor >= 9) {
1747 fi.lock_owner = arg->lock_owner;
1748 fi.flags = arg->flags;
1749 }
1750
1751 if (req->se->op.write)
1752 req->se->op.write(req, nodeid, buf, arg->size, arg->offset,
1753 &fi);
1754 else
1755 fuse_reply_err(req, ENOSYS);
1756}
1757
1758static void do_write(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
1759{
1760 struct fuse_write_in *arg = (struct fuse_write_in *)inarg;
1761 const void *payload;
1762
1763 if (req->se->conn.proto_minor < 9)
1764 payload = ((char *)arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1765 else
1766 payload = PARAM(arg);
1767
1768 _do_write(req, nodeid, arg, payload);
1769}
1770
1771static void _do_write_buf(fuse_req_t req, const fuse_ino_t nodeid,
1772 const void *op_in, struct fuse_bufvec *bufv)
1773{
1774 struct fuse_session *se = req->se;
1775 struct fuse_write_in *arg = (struct fuse_write_in *)op_in;
1776 struct fuse_file_info fi;
1777
1778 memset(&fi, 0, sizeof(fi));
1779 fi.fh = arg->fh;
1780 fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1781
1782 if (se->conn.proto_minor >= 9) {
1783 fi.lock_owner = arg->lock_owner;
1784 fi.flags = arg->flags;
1785 }
1786
1787 se->op.write_buf(req, nodeid, bufv, arg->offset, &fi);
1788}
1789
1790static void do_write_buf(fuse_req_t req, const fuse_ino_t nodeid,
1791 const void *inarg, const struct fuse_buf *ibuf)
1792{
1793 struct fuse_session *se = req->se;
1794 struct fuse_bufvec bufv = {
1795 .buf[0] = *ibuf,
1796 .count = 1,
1797 };
1798 struct fuse_write_in *arg = (struct fuse_write_in *)inarg;
1799
1800 if (se->conn.proto_minor < 9) {
1801 bufv.buf[0].mem = ((char *)arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1802 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1803 FUSE_COMPAT_WRITE_IN_SIZE;
1804 assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1805 } else {
1806 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1807 bufv.buf[0].mem = PARAM(arg);
1808
1809 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1810 sizeof(struct fuse_write_in);
1811 }
1812 if (bufv.buf[0].size < arg->size) {
1813 fuse_log(FUSE_LOG_ERR,
1814 "fuse: %s: buffer size too small\n", __func__);
1815 fuse_reply_err(req, EIO);
1816 goto out;
1817 }
1818 bufv.buf[0].size = arg->size;
1819
1820 _do_write_buf(req, nodeid, inarg, &bufv);
1821
1822out:
1823 /* Need to reset the pipe if ->write_buf() didn't consume all data */
1824 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1825 fuse_ll_clear_pipe(se);
1826}
1827
1828static void _do_flush(fuse_req_t req, const fuse_ino_t nodeid,
1829 const void *op_in, const void *in_payload)
1830{
1831 (void)in_payload;
1832 struct fuse_flush_in *arg = (struct fuse_flush_in *)op_in;
1833 struct fuse_file_info fi;
1834
1835 memset(&fi, 0, sizeof(fi));
1836 fi.fh = arg->fh;
1837 fi.flush = 1;
1838 if (req->se->conn.proto_minor >= 7)
1839 fi.lock_owner = arg->lock_owner;
1840
1841 if (req->se->op.flush)
1842 req->se->op.flush(req, nodeid, &fi);
1843 else
1844 fuse_reply_err(req, ENOSYS);
1845}
1846
1847static void do_flush(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
1848{
1849 _do_flush(req, nodeid, inarg, NULL);
1850}
1851
1852static void _do_release(fuse_req_t req, const fuse_ino_t nodeid,
1853 const void *op_in, const void *in_payload)
1854{
1855 (void)in_payload;
1856 const struct fuse_release_in *arg = op_in;
1857 struct fuse_file_info fi;
1858
1859 memset(&fi, 0, sizeof(fi));
1860 fi.flags = arg->flags;
1861 fi.fh = arg->fh;
1862 if (req->se->conn.proto_minor >= 8) {
1863 fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1864 fi.lock_owner = arg->lock_owner;
1865 }
1866 if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1867 fi.flock_release = 1;
1868 fi.lock_owner = arg->lock_owner;
1869 }
1870
1871 if (req->se->op.release)
1872 req->se->op.release(req, nodeid, &fi);
1873 else
1874 fuse_reply_err(req, 0);
1875}
1876
1877static void do_release(fuse_req_t req, const fuse_ino_t nodeid,
1878 const void *inarg)
1879{
1880 _do_release(req, nodeid, inarg, NULL);
1881}
1882
1883static void _do_fsync(fuse_req_t req, const fuse_ino_t nodeid,
1884 const void *op_in, const void *in_payload)
1885{
1886 (void)in_payload;
1887 const struct fuse_fsync_in *arg = op_in;
1888 struct fuse_file_info fi;
1889 int datasync = arg->fsync_flags & 1;
1890
1891 memset(&fi, 0, sizeof(fi));
1892 fi.fh = arg->fh;
1893
1894 if (req->se->op.fsync)
1895 req->se->op.fsync(req, nodeid, datasync, &fi);
1896 else
1897 fuse_reply_err(req, ENOSYS);
1898}
1899
1900static void do_fsync(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
1901{
1902 _do_fsync(req, nodeid, inarg, NULL);
1903}
1904
1905static void _do_opendir(fuse_req_t req, const fuse_ino_t nodeid,
1906 const void *op_in, const void *in_payload)
1907{
1908 (void)in_payload;
1909 const struct fuse_open_in *arg = op_in;
1910 struct fuse_file_info fi;
1911
1912 memset(&fi, 0, sizeof(fi));
1913 fi.flags = arg->flags;
1914 /* XXX: fuse_open_in::open_flags */
1915
1916 if (req->se->op.opendir)
1917 req->se->op.opendir(req, nodeid, &fi);
1918 else if (req->se->conn.want_ext & FUSE_CAP_NO_OPENDIR_SUPPORT)
1919 fuse_reply_err(req, ENOSYS);
1920 else
1921 fuse_reply_open(req, &fi);
1922}
1923
1924static void do_opendir(fuse_req_t req, const fuse_ino_t nodeid,
1925 const void *inarg)
1926{
1927 _do_opendir(req, nodeid, inarg, NULL);
1928}
1929
1930static void _do_readdir(fuse_req_t req, const fuse_ino_t nodeid,
1931 const void *op_in, const void *in_payload)
1932{
1933 (void)in_payload;
1934 struct fuse_read_in *arg = (struct fuse_read_in *)op_in;
1935 struct fuse_file_info fi;
1936
1937 memset(&fi, 0, sizeof(fi));
1938 fi.fh = arg->fh;
1939
1940 if (req->se->op.readdir)
1941 req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1942 else
1943 fuse_reply_err(req, ENOSYS);
1944}
1945
1946static void do_readdir(fuse_req_t req, const fuse_ino_t nodeid,
1947 const void *inarg)
1948{
1949 _do_readdir(req, nodeid, inarg, NULL);
1950}
1951
1952static void _do_readdirplus(fuse_req_t req, const fuse_ino_t nodeid,
1953 const void *op_in, const void *in_payload)
1954{
1955 (void)in_payload;
1956 struct fuse_read_in *arg = (struct fuse_read_in *)op_in;
1957 struct fuse_file_info fi;
1958
1959 memset(&fi, 0, sizeof(fi));
1960 fi.fh = arg->fh;
1961
1962 if (req->se->op.readdirplus)
1963 req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1964 else
1965 fuse_reply_err(req, ENOSYS);
1966}
1967
1968static void do_readdirplus(fuse_req_t req, const fuse_ino_t nodeid,
1969 const void *inarg)
1970{
1971 _do_readdirplus(req, nodeid, inarg, NULL);
1972}
1973
1974static void _do_releasedir(fuse_req_t req, const fuse_ino_t nodeid,
1975 const void *op_in, const void *in_payload)
1976{
1977 (void)in_payload;
1978 struct fuse_release_in *arg = (struct fuse_release_in *)op_in;
1979 struct fuse_file_info fi;
1980
1981 memset(&fi, 0, sizeof(fi));
1982 fi.flags = arg->flags;
1983 fi.fh = arg->fh;
1984
1985 if (req->se->op.releasedir)
1986 req->se->op.releasedir(req, nodeid, &fi);
1987 else
1988 fuse_reply_err(req, 0);
1989}
1990
1991static void do_releasedir(fuse_req_t req, const fuse_ino_t nodeid,
1992 const void *inarg)
1993{
1994 _do_releasedir(req, nodeid, inarg, NULL);
1995}
1996
1997static void _do_fsyncdir(fuse_req_t req, const fuse_ino_t nodeid,
1998 const void *op_in, const void *in_payload)
1999{
2000 (void)in_payload;
2001 struct fuse_fsync_in *arg = (struct fuse_fsync_in *)op_in;
2002 struct fuse_file_info fi;
2003 int datasync = arg->fsync_flags & 1;
2004
2005 memset(&fi, 0, sizeof(fi));
2006 fi.fh = arg->fh;
2007
2008 if (req->se->op.fsyncdir)
2009 req->se->op.fsyncdir(req, nodeid, datasync, &fi);
2010 else
2011 fuse_reply_err(req, ENOSYS);
2012}
2013static void do_fsyncdir(fuse_req_t req, const fuse_ino_t nodeid,
2014 const void *inarg)
2015{
2016 _do_fsyncdir(req, nodeid, inarg, NULL);
2017}
2018
2019static void _do_statfs(fuse_req_t req, const fuse_ino_t nodeid,
2020 const void *op_in, const void *in_payload)
2021{
2022 (void) nodeid;
2023 (void)op_in;
2024 (void)in_payload;
2025
2026 if (req->se->op.statfs)
2027 req->se->op.statfs(req, nodeid);
2028 else {
2029 struct statvfs buf = {
2030 .f_namemax = 255,
2031 .f_bsize = 512,
2032 };
2033 fuse_reply_statfs(req, &buf);
2034 }
2035}
2036static void do_statfs(fuse_req_t req, const fuse_ino_t nodeid,
2037 const void *inarg)
2038{
2039 _do_statfs(req, nodeid, inarg, NULL);
2040}
2041
2042static void _do_setxattr(fuse_req_t req, const fuse_ino_t nodeid,
2043 const void *op_in, const void *in_payload)
2044{
2045 struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *)op_in;
2046 const char *name = in_payload;
2047 const char *value = name + strlen(name) + 1;
2048
2049 /* XXX:The API should be extended to support extra_flags/setxattr_flags */
2050
2051 if (req->se->op.setxattr)
2052 req->se->op.setxattr(req, nodeid, name, value, arg->size,
2053 arg->flags);
2054 else
2055 fuse_reply_err(req, ENOSYS);
2056}
2057static void do_setxattr(fuse_req_t req, const fuse_ino_t nodeid,
2058 const void *inarg)
2059{
2060 struct fuse_session *se = req->se;
2061 unsigned int xattr_ext = !!(se->conn.want & FUSE_CAP_SETXATTR_EXT);
2062 const struct fuse_setxattr_in *arg = inarg;
2063 char *payload = xattr_ext ? PARAM(arg) :
2064 (char *)arg + FUSE_COMPAT_SETXATTR_IN_SIZE;
2065
2066 _do_setxattr(req, nodeid, arg, payload);
2067}
2068
2069static void _do_getxattr(fuse_req_t req, const fuse_ino_t nodeid,
2070 const void *op_in, const void *in_payload)
2071{
2072 const struct fuse_getxattr_in *arg = op_in;
2073
2074 if (req->se->op.getxattr)
2075 req->se->op.getxattr(req, nodeid, in_payload, arg->size);
2076 else
2077 fuse_reply_err(req, ENOSYS);
2078}
2079
2080static void do_getxattr(fuse_req_t req, const fuse_ino_t nodeid,
2081 const void *inarg)
2082{
2083 const struct fuse_getxattr_in *arg = inarg;
2084 const void *payload = PARAM(arg);
2085
2086 _do_getxattr(req, nodeid, arg, payload);
2087}
2088
2089static void _do_listxattr(fuse_req_t req, const fuse_ino_t nodeid,
2090 const void *inarg, const void *in_payload)
2091{
2092 (void)in_payload;
2093 const struct fuse_getxattr_in *arg = inarg;
2094
2095 if (req->se->op.listxattr)
2096 req->se->op.listxattr(req, nodeid, arg->size);
2097 else
2098 fuse_reply_err(req, ENOSYS);
2099}
2100static void do_listxattr(fuse_req_t req, const fuse_ino_t nodeid,
2101 const void *inarg)
2102{
2103 _do_listxattr(req, nodeid, inarg, NULL);
2104}
2105
2106static void _do_removexattr(fuse_req_t req, const fuse_ino_t nodeid,
2107 const void *inarg, const void *in_payload)
2108{
2109 (void)inarg;
2110 const char *name = in_payload;
2111
2112 if (req->se->op.removexattr)
2113 req->se->op.removexattr(req, nodeid, name);
2114 else
2115 fuse_reply_err(req, ENOSYS);
2116}
2117static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2118{
2119 _do_removexattr(req, nodeid, NULL, inarg);
2120}
2121
2122static void convert_fuse_file_lock(const struct fuse_file_lock *fl,
2123 struct flock *flock)
2124{
2125 memset(flock, 0, sizeof(struct flock));
2126 flock->l_type = fl->type;
2127 flock->l_whence = SEEK_SET;
2128 flock->l_start = fl->start;
2129 if (fl->end == OFFSET_MAX)
2130 flock->l_len = 0;
2131 else
2132 flock->l_len = fl->end - fl->start + 1;
2133 flock->l_pid = fl->pid;
2134}
2135
2136static void _do_getlk(fuse_req_t req, const fuse_ino_t nodeid,
2137 const void *op_in, const void *in_payload)
2138{
2139 (void)in_payload;
2140 const struct fuse_lk_in *arg = op_in;
2141 struct fuse_file_info fi;
2142 struct flock flock;
2143
2144 memset(&fi, 0, sizeof(fi));
2145 fi.fh = arg->fh;
2146 fi.lock_owner = arg->owner;
2147
2148 convert_fuse_file_lock(&arg->lk, &flock);
2149 if (req->se->op.getlk)
2150 req->se->op.getlk(req, nodeid, &fi, &flock);
2151 else
2152 fuse_reply_err(req, ENOSYS);
2153}
2154static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2155{
2156 _do_getlk(req, nodeid, inarg, NULL);
2157}
2158
2159static void do_setlk_common(fuse_req_t req, const fuse_ino_t nodeid,
2160 const void *op_in, int sleep)
2161{
2162 const struct fuse_lk_in *arg = op_in;
2163 struct fuse_file_info fi;
2164 struct flock flock;
2165
2166 memset(&fi, 0, sizeof(fi));
2167 fi.fh = arg->fh;
2168 fi.lock_owner = arg->owner;
2169
2170 if (arg->lk_flags & FUSE_LK_FLOCK) {
2171 int op = 0;
2172
2173 switch (arg->lk.type) {
2174 case F_RDLCK:
2175 op = LOCK_SH;
2176 break;
2177 case F_WRLCK:
2178 op = LOCK_EX;
2179 break;
2180 case F_UNLCK:
2181 op = LOCK_UN;
2182 break;
2183 }
2184 if (!sleep)
2185 op |= LOCK_NB;
2186
2187 if (req->se->op.flock)
2188 req->se->op.flock(req, nodeid, &fi, op);
2189 else
2190 fuse_reply_err(req, ENOSYS);
2191 } else {
2192 convert_fuse_file_lock(&arg->lk, &flock);
2193 if (req->se->op.setlk)
2194 req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
2195 else
2196 fuse_reply_err(req, ENOSYS);
2197 }
2198}
2199
2200static void _do_setlk(fuse_req_t req, const fuse_ino_t nodeid,
2201 const void *op_in, const void *in_payload)
2202{
2203 (void)in_payload;
2204 do_setlk_common(req, nodeid, op_in, 0);
2205}
2206
2207static void do_setlk(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
2208{
2209 _do_setlk(req, nodeid, inarg, NULL);
2210}
2211
2212static void _do_setlkw(fuse_req_t req, const fuse_ino_t nodeid,
2213 const void *op_in, const void *in_payload)
2214{
2215 (void)in_payload;
2216 do_setlk_common(req, nodeid, op_in, 1);
2217}
2218static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2219{
2220 _do_setlkw(req, nodeid, inarg, NULL);
2221}
2222
2223static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
2224{
2225 struct fuse_req *curr;
2226
2227 for (curr = se->list.next; curr != &se->list; curr = curr->next) {
2228 if (curr->unique == req->u.i.unique) {
2230 void *data;
2231
2232 curr->ref_cnt++;
2233 pthread_mutex_unlock(&se->lock);
2234
2235 /* Ugh, ugly locking */
2236 pthread_mutex_lock(&curr->lock);
2237 pthread_mutex_lock(&se->lock);
2238 curr->interrupted = 1;
2239 func = curr->u.ni.func;
2240 data = curr->u.ni.data;
2241 pthread_mutex_unlock(&se->lock);
2242 if (func)
2243 func(curr, data);
2244 pthread_mutex_unlock(&curr->lock);
2245
2246 pthread_mutex_lock(&se->lock);
2247 curr->ref_cnt--;
2248 if (!curr->ref_cnt) {
2249 destroy_req(curr);
2250 }
2251
2252 return 1;
2253 }
2254 }
2255 for (curr = se->interrupts.next; curr != &se->interrupts;
2256 curr = curr->next) {
2257 if (curr->u.i.unique == req->u.i.unique)
2258 return 1;
2259 }
2260 return 0;
2261}
2262
2263static void _do_interrupt(fuse_req_t req, const fuse_ino_t nodeid,
2264 const void *op_in, const void *in_payload)
2265{
2266 (void)in_payload;
2267 const struct fuse_interrupt_in *arg = op_in;
2268 struct fuse_session *se = req->se;
2269
2270 (void) nodeid;
2271 if (se->debug)
2272 fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
2273 (unsigned long long) arg->unique);
2274
2275 req->u.i.unique = arg->unique;
2276
2277 pthread_mutex_lock(&se->lock);
2278 if (find_interrupted(se, req)) {
2279 fuse_chan_put(req->ch);
2280 req->ch = NULL;
2281 destroy_req(req);
2282 } else
2283 list_add_req(req, &se->interrupts);
2284 pthread_mutex_unlock(&se->lock);
2285}
2286static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2287{
2288 _do_interrupt(req, nodeid, inarg, NULL);
2289}
2290
2291static struct fuse_req *check_interrupt(struct fuse_session *se,
2292 struct fuse_req *req)
2293{
2294 struct fuse_req *curr;
2295
2296 for (curr = se->interrupts.next; curr != &se->interrupts;
2297 curr = curr->next) {
2298 if (curr->u.i.unique == req->unique) {
2299 req->interrupted = 1;
2300 list_del_req(curr);
2301 fuse_chan_put(curr->ch);
2302 curr->ch = NULL;
2303 destroy_req(curr);
2304 return NULL;
2305 }
2306 }
2307 curr = se->interrupts.next;
2308 if (curr != &se->interrupts) {
2309 list_del_req(curr);
2310 list_init_req(curr);
2311 return curr;
2312 } else
2313 return NULL;
2314}
2315
2316static void _do_bmap(fuse_req_t req, const fuse_ino_t nodeid, const void *op_in,
2317 const void *in_payload)
2318{
2319 (void)in_payload;
2320 const struct fuse_bmap_in *arg = op_in;
2321
2322 if (req->se->op.bmap)
2323 req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
2324 else
2325 fuse_reply_err(req, ENOSYS);
2326}
2327static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2328{
2329 _do_bmap(req, nodeid, inarg, NULL);
2330}
2331
2332static void _do_ioctl(fuse_req_t req, const fuse_ino_t nodeid,
2333 const void *op_in, const void *in_payload)
2334{
2335 struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *)op_in;
2336 unsigned int flags = arg->flags;
2337 const void *in_buf = in_payload;
2338 struct fuse_file_info fi;
2339
2340 if (flags & FUSE_IOCTL_DIR &&
2341 !(req->se->conn.want_ext & FUSE_CAP_IOCTL_DIR)) {
2342 fuse_reply_err(req, ENOTTY);
2343 return;
2344 }
2345
2346 memset(&fi, 0, sizeof(fi));
2347 fi.fh = arg->fh;
2348
2349 if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
2350 !(flags & FUSE_IOCTL_32BIT)) {
2351 req->flags.ioctl_64bit = 1;
2352 }
2353
2354 if (req->se->op.ioctl)
2355 req->se->op.ioctl(req, nodeid, arg->cmd,
2356 (void *)(uintptr_t)arg->arg, &fi, flags,
2357 in_buf, arg->in_size, arg->out_size);
2358 else
2359 fuse_reply_err(req, ENOSYS);
2360}
2361static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2362{
2363 const struct fuse_ioctl_in *arg = inarg;
2364 void *in_buf = arg->in_size ? PARAM(arg) : NULL;
2365
2366 _do_ioctl(req, nodeid, arg, in_buf);
2367}
2368
2369void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
2370{
2371 free(ph);
2372}
2373
2374static void _do_poll(fuse_req_t req, const fuse_ino_t nodeid, const void *op_in,
2375 const void *in_payload)
2376{
2377 (void)in_payload;
2378 struct fuse_poll_in *arg = (struct fuse_poll_in *)op_in;
2379 struct fuse_file_info fi;
2380
2381 memset(&fi, 0, sizeof(fi));
2382 fi.fh = arg->fh;
2383 fi.poll_events = arg->events;
2384
2385 if (req->se->op.poll) {
2386 struct fuse_pollhandle *ph = NULL;
2387
2388 if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
2389 ph = malloc(sizeof(struct fuse_pollhandle));
2390 if (ph == NULL) {
2391 fuse_reply_err(req, ENOMEM);
2392 return;
2393 }
2394 ph->kh = arg->kh;
2395 ph->se = req->se;
2396 }
2397
2398 req->se->op.poll(req, nodeid, &fi, ph);
2399 } else {
2400 fuse_reply_err(req, ENOSYS);
2401 }
2402}
2403
2404static void do_poll(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
2405{
2406 _do_poll(req, nodeid, inarg, NULL);
2407}
2408
2409static void _do_fallocate(fuse_req_t req, const fuse_ino_t nodeid,
2410 const void *op_in, const void *in_payload)
2411{
2412 (void)in_payload;
2413 const struct fuse_fallocate_in *arg = op_in;
2414 struct fuse_file_info fi;
2415
2416 memset(&fi, 0, sizeof(fi));
2417 fi.fh = arg->fh;
2418
2419 if (req->se->op.fallocate)
2420 req->se->op.fallocate(req, nodeid, arg->mode, arg->offset,
2421 arg->length, &fi);
2422 else
2423 fuse_reply_err(req, ENOSYS);
2424}
2425
2426static void do_fallocate(fuse_req_t req, const fuse_ino_t nodeid,
2427 const void *inarg)
2428{
2429 _do_fallocate(req, nodeid, inarg, NULL);
2430}
2431
2432static void copy_file_range_common(fuse_req_t req, const fuse_ino_t nodeid_in,
2433 const struct fuse_copy_file_range_in *arg)
2434{
2435 struct fuse_file_info fi_in, fi_out;
2436
2437 memset(&fi_in, 0, sizeof(fi_in));
2438 fi_in.fh = arg->fh_in;
2439
2440 memset(&fi_out, 0, sizeof(fi_out));
2441 fi_out.fh = arg->fh_out;
2442
2443 if (req->se->op.copy_file_range)
2444 req->se->op.copy_file_range(req, nodeid_in, arg->off_in, &fi_in,
2445 arg->nodeid_out, arg->off_out,
2446 &fi_out, arg->len, arg->flags);
2447 else
2448 fuse_reply_err(req, ENOSYS);
2449}
2450
2451static void _do_copy_file_range(fuse_req_t req, const fuse_ino_t nodeid_in,
2452 const void *op_in, const void *in_payload)
2453{
2454 const struct fuse_copy_file_range_in *arg = op_in;
2455 struct fuse_copy_file_range_in arg_tmp;
2456
2457 (void) in_payload;
2458 /* fuse_write_out can only handle 32bit copy size */
2459 if (arg->len > 0xfffff000) {
2460 arg_tmp = *arg;
2461 arg_tmp.len = 0xfffff000;
2462 arg = &arg_tmp;
2463 }
2464 copy_file_range_common(req, nodeid_in, arg);
2465}
2466
2467static void do_copy_file_range(fuse_req_t req, const fuse_ino_t nodeid_in,
2468 const void *inarg)
2469{
2470 _do_copy_file_range(req, nodeid_in, inarg, NULL);
2471}
2472
2473static void _do_copy_file_range_64(fuse_req_t req, const fuse_ino_t nodeid_in,
2474 const void *op_in, const void *in_payload)
2475{
2476 (void) in_payload;
2477 req->flags.is_copy_file_range_64 = 1;
2478 /* Limit size on 32bit userspace to avoid conversion overflow */
2479 if (sizeof(size_t) == 4)
2480 _do_copy_file_range(req, nodeid_in, op_in, NULL);
2481 else
2482 copy_file_range_common(req, nodeid_in, op_in);
2483}
2484
2485static void do_copy_file_range_64(fuse_req_t req, const fuse_ino_t nodeid_in,
2486 const void *inarg)
2487{
2488 _do_copy_file_range_64(req, nodeid_in, inarg, NULL);
2489}
2490
2491/*
2492 * Note that the uint64_t offset in struct fuse_lseek_in is derived from
2493 * linux kernel loff_t and is therefore signed.
2494 */
2495static void _do_lseek(fuse_req_t req, const fuse_ino_t nodeid,
2496 const void *op_in, const void *in_payload)
2497{
2498 (void)in_payload;
2499 const struct fuse_lseek_in *arg = op_in;
2500 struct fuse_file_info fi;
2501
2502 memset(&fi, 0, sizeof(fi));
2503 fi.fh = arg->fh;
2504
2505 if (req->se->op.lseek)
2506 req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
2507 else
2508 fuse_reply_err(req, ENOSYS);
2509}
2510
2511static void do_lseek(fuse_req_t req, const fuse_ino_t nodeid, const void *inarg)
2512{
2513 _do_lseek(req, nodeid, inarg, NULL);
2514}
2515
2516#ifdef HAVE_STATX
2517static void _do_statx(fuse_req_t req, const fuse_ino_t nodeid,
2518 const void *op_in, const void *in_payload)
2519{
2520 (void)in_payload;
2521 const struct fuse_statx_in *arg = op_in;
2522 struct fuse_file_info *fip = NULL;
2523 struct fuse_file_info fi;
2524
2525 if (arg->getattr_flags & FUSE_GETATTR_FH) {
2526 memset(&fi, 0, sizeof(fi));
2527 fi.fh = arg->fh;
2528 fip = &fi;
2529 }
2530
2531 if (req->se->op.statx)
2532 req->se->op.statx(req, nodeid, arg->sx_flags, arg->sx_mask, fip);
2533 else
2534 fuse_reply_err(req, ENOSYS);
2535}
2536#else
2537static void _do_statx(fuse_req_t req, const fuse_ino_t nodeid,
2538 const void *op_in, const void *in_payload)
2539{
2540 (void)in_payload;
2541 (void)req;
2542 (void)nodeid;
2543 (void)op_in;
2544}
2545#endif
2546
2547static void do_statx(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2548{
2549 _do_statx(req, nodeid, inarg, NULL);
2550}
2551
2552static bool want_flags_valid(uint64_t capable, uint64_t want)
2553{
2554 uint64_t unknown_flags = want & (~capable);
2555 if (unknown_flags != 0) {
2556 fuse_log(FUSE_LOG_ERR,
2557 "fuse: unknown connection 'want' flags: 0x%08llx\n",
2558 (unsigned long long)unknown_flags);
2559 return false;
2560 }
2561 return true;
2562}
2563
2568{
2569 struct fuse_session *se = container_of(conn, struct fuse_session, conn);
2570
2571 /*
2572 * Convert want to want_ext if necessary.
2573 * For the high level interface this function might be called
2574 * twice, once from the high level interface and once from the
2575 * low level interface. Both, with different want_ext_default and
2576 * want_default values. In order to suppress a failure for the
2577 * second call, we check if the lower 32 bits of want_ext are
2578 * already set to the value of want.
2579 */
2580 if (conn->want != se->conn_want &&
2581 fuse_lower_32_bits(conn->want_ext) != conn->want) {
2582 if (conn->want_ext != se->conn_want_ext) {
2583 fuse_log(FUSE_LOG_ERR,
2584 "%s: Both conn->want_ext and conn->want are set.\n"
2585 "want=%x want_ext=%llx, se->want=%x se->want_ext=%llx\n",
2586 __func__, conn->want,
2587 (unsigned long long)conn->want_ext,
2588 se->conn_want,
2589 (unsigned long long)se->conn_want_ext);
2590 return -EINVAL;
2591 }
2592
2593 /* high bits from want_ext, low bits from want */
2594 conn->want_ext = fuse_higher_32_bits(conn->want_ext) |
2595 conn->want;
2596 }
2597
2598 /* ensure there won't be a second conversion */
2599 conn->want = fuse_lower_32_bits(conn->want_ext);
2600
2601 return 0;
2602}
2603
2604bool fuse_set_feature_flag(struct fuse_conn_info *conn,
2605 uint64_t flag)
2606{
2607 struct fuse_session *se = container_of(conn, struct fuse_session, conn);
2608
2609 if (conn->capable_ext & flag) {
2610 conn->want_ext |= flag;
2611 se->conn_want_ext |= flag;
2612 conn->want |= flag;
2613 se->conn_want |= flag;
2614 return true;
2615 }
2616 return false;
2617}
2618
2619void fuse_unset_feature_flag(struct fuse_conn_info *conn,
2620 uint64_t flag)
2621{
2622 struct fuse_session *se = container_of(conn, struct fuse_session, conn);
2623
2624 conn->want_ext &= ~flag;
2625 se->conn_want_ext &= ~flag;
2626 conn->want &= ~flag;
2627 se->conn_want &= ~flag;
2628}
2629
2630bool fuse_get_feature_flag(struct fuse_conn_info *conn,
2631 uint64_t flag)
2632{
2633 return conn->capable_ext & flag ? true : false;
2634}
2635
2636/* Prevent bogus data races (bogus since "init" is called before
2637 * multi-threading becomes relevant */
2638static __attribute__((no_sanitize("thread"))) void
2639_do_init(fuse_req_t req, const fuse_ino_t nodeid, const void *op_in,
2640 const void *in_payload)
2641{
2642 (void)in_payload;
2643 const struct fuse_init_in *arg = op_in;
2644 struct fuse_init_out outarg;
2645 struct fuse_session *se = req->se;
2646 size_t bufsize = se->bufsize;
2647 size_t outargsize = sizeof(outarg);
2648 uint64_t inargflags = 0;
2649 uint64_t outargflags = 0;
2650 bool buf_reallocable = se->buf_reallocable;
2651 (void) nodeid;
2652 bool enable_io_uring = false;
2653
2654 if (se->debug) {
2655 fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
2656 if (arg->major == 7 && arg->minor >= 6) {
2657 fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
2658 fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
2659 arg->max_readahead);
2660 }
2661 }
2662 se->conn.proto_major = arg->major;
2663 se->conn.proto_minor = arg->minor;
2664 se->conn.capable_ext = 0;
2665 se->conn.want_ext = 0;
2666
2667 memset(&outarg, 0, sizeof(outarg));
2668 outarg.major = FUSE_KERNEL_VERSION;
2669 outarg.minor = FUSE_KERNEL_MINOR_VERSION;
2670
2671 if (arg->major < 7) {
2672 fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
2673 arg->major, arg->minor);
2674 fuse_reply_err(req, EPROTO);
2675 return;
2676 }
2677
2678 if (arg->major > 7) {
2679 /* Wait for a second INIT request with a 7.X version */
2680 send_reply_ok(req, &outarg, sizeof(outarg));
2681 return;
2682 }
2683
2684 if (arg->minor >= 6) {
2685 if (arg->max_readahead < se->conn.max_readahead)
2686 se->conn.max_readahead = arg->max_readahead;
2687 inargflags = arg->flags;
2688 if (inargflags & FUSE_INIT_EXT)
2689 inargflags = inargflags | (uint64_t) arg->flags2 << 32;
2690 if (inargflags & FUSE_ASYNC_READ)
2691 se->conn.capable_ext |= FUSE_CAP_ASYNC_READ;
2692 if (inargflags & FUSE_POSIX_LOCKS)
2693 se->conn.capable_ext |= FUSE_CAP_POSIX_LOCKS;
2694 if (inargflags & FUSE_ATOMIC_O_TRUNC)
2695 se->conn.capable_ext |= FUSE_CAP_ATOMIC_O_TRUNC;
2696 if (inargflags & FUSE_EXPORT_SUPPORT)
2697 se->conn.capable_ext |= FUSE_CAP_EXPORT_SUPPORT;
2698 if (inargflags & FUSE_DONT_MASK)
2699 se->conn.capable_ext |= FUSE_CAP_DONT_MASK;
2700 if (inargflags & FUSE_FLOCK_LOCKS)
2701 se->conn.capable_ext |= FUSE_CAP_FLOCK_LOCKS;
2702 if (inargflags & FUSE_AUTO_INVAL_DATA)
2703 se->conn.capable_ext |= FUSE_CAP_AUTO_INVAL_DATA;
2704 if (inargflags & FUSE_DO_READDIRPLUS)
2705 se->conn.capable_ext |= FUSE_CAP_READDIRPLUS;
2706 if (inargflags & FUSE_READDIRPLUS_AUTO)
2707 se->conn.capable_ext |= FUSE_CAP_READDIRPLUS_AUTO;
2708 if (inargflags & FUSE_ASYNC_DIO)
2709 se->conn.capable_ext |= FUSE_CAP_ASYNC_DIO;
2710 if (inargflags & FUSE_WRITEBACK_CACHE)
2711 se->conn.capable_ext |= FUSE_CAP_WRITEBACK_CACHE;
2712 if (inargflags & FUSE_NO_OPEN_SUPPORT)
2713 se->conn.capable_ext |= FUSE_CAP_NO_OPEN_SUPPORT;
2714 if (inargflags & FUSE_PARALLEL_DIROPS)
2715 se->conn.capable_ext |= FUSE_CAP_PARALLEL_DIROPS;
2716 if (inargflags & FUSE_POSIX_ACL)
2717 se->conn.capable_ext |= FUSE_CAP_POSIX_ACL;
2718 if (inargflags & FUSE_HANDLE_KILLPRIV)
2719 se->conn.capable_ext |= FUSE_CAP_HANDLE_KILLPRIV;
2720 if (inargflags & FUSE_HANDLE_KILLPRIV_V2)
2721 se->conn.capable_ext |= FUSE_CAP_HANDLE_KILLPRIV_V2;
2722 if (inargflags & FUSE_CACHE_SYMLINKS)
2723 se->conn.capable_ext |= FUSE_CAP_CACHE_SYMLINKS;
2724 if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
2725 se->conn.capable_ext |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2726 if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2727 se->conn.capable_ext |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2728 if (inargflags & FUSE_SETXATTR_EXT)
2729 se->conn.capable_ext |= FUSE_CAP_SETXATTR_EXT;
2730 if (!(inargflags & FUSE_MAX_PAGES)) {
2731 size_t max_bufsize =
2732 FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2733 + FUSE_BUFFER_HEADER_SIZE;
2734 if (bufsize > max_bufsize) {
2735 bufsize = max_bufsize;
2736 }
2737 buf_reallocable = false;
2738 }
2739 if (inargflags & FUSE_DIRECT_IO_ALLOW_MMAP)
2740 se->conn.capable_ext |= FUSE_CAP_DIRECT_IO_ALLOW_MMAP;
2741 if (arg->minor >= 38 || (inargflags & FUSE_HAS_EXPIRE_ONLY))
2742 se->conn.capable_ext |= FUSE_CAP_EXPIRE_ONLY;
2743 if (inargflags & FUSE_PASSTHROUGH)
2744 se->conn.capable_ext |= FUSE_CAP_PASSTHROUGH;
2745 if (inargflags & FUSE_NO_EXPORT_SUPPORT)
2746 se->conn.capable_ext |= FUSE_CAP_NO_EXPORT_SUPPORT;
2747 if (inargflags & FUSE_OVER_IO_URING)
2748 se->conn.capable_ext |= FUSE_CAP_OVER_IO_URING;
2749
2750 } else {
2751 se->conn.max_readahead = 0;
2752 }
2753
2754 if (se->conn.proto_minor >= 14) {
2755#ifdef HAVE_SPLICE
2756#ifdef HAVE_VMSPLICE
2757 if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2758 se->conn.capable_ext |= FUSE_CAP_SPLICE_WRITE |
2760 }
2761#endif
2762 if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2763 se->conn.capable_ext |= FUSE_CAP_SPLICE_READ;
2764 }
2765#endif
2766 }
2767 if (se->conn.proto_minor >= 18)
2768 se->conn.capable_ext |= FUSE_CAP_IOCTL_DIR;
2769
2770 /* Default settings for modern filesystems.
2771 *
2772 * Most of these capabilities were disabled by default in
2773 * libfuse2 for backwards compatibility reasons. In libfuse3,
2774 * we can finally enable them by default (as long as they're
2775 * supported by the kernel).
2776 */
2777#define LL_SET_DEFAULT(cond, cap) \
2778 if ((cond)) \
2779 fuse_set_feature_flag(&se->conn, cap)
2780
2781 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2782 LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2783 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2784 LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2785 LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2786 LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2787 LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2789 LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2790 LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2791 LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2793 LL_SET_DEFAULT(1, FUSE_CAP_OVER_IO_URING);
2794
2795 /* This could safely become default, but libfuse needs an API extension
2796 * to support it
2797 * LL_SET_DEFAULT(1, FUSE_CAP_SETXATTR_EXT);
2798 */
2799
2800 se->conn.time_gran = 1;
2801
2802 if (se->op.init) {
2803 // Apply the first 32 bits of capable_ext to capable
2804 se->conn.capable = fuse_lower_32_bits(se->conn.capable_ext);
2805
2806 se->op.init(se->userdata, &se->conn);
2807
2808 /*
2809 * se->conn.want is 32-bit value and deprecated in favour of
2810 * se->conn.want_ext
2811 * Userspace might still use conn.want - we need to convert it
2812 */
2814 }
2815
2816 if (!want_flags_valid(se->conn.capable_ext, se->conn.want_ext)) {
2817 fuse_reply_err(req, EPROTO);
2818 se->error = -EPROTO;
2820 return;
2821 }
2822
2823 unsigned max_read_mo = get_max_read(se->mo);
2824 if (se->conn.max_read != max_read_mo) {
2825 fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2826 "requested different maximum read size (%u vs %u)\n",
2827 se->conn.max_read, max_read_mo);
2828 fuse_reply_err(req, EPROTO);
2829 se->error = -EPROTO;
2831 return;
2832 }
2833
2834 if (bufsize < FUSE_MIN_READ_BUFFER) {
2835 fuse_log(FUSE_LOG_ERR,
2836 "fuse: warning: buffer size too small: %zu\n",
2837 bufsize);
2838 bufsize = FUSE_MIN_READ_BUFFER;
2839 }
2840
2841 if (buf_reallocable)
2842 bufsize = UINT_MAX;
2843 se->conn.max_write = MIN(se->conn.max_write, bufsize - FUSE_BUFFER_HEADER_SIZE);
2844 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2845
2846 if (arg->flags & FUSE_MAX_PAGES) {
2847 outarg.flags |= FUSE_MAX_PAGES;
2848 outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2849 }
2850 outargflags = outarg.flags;
2851 /* Always enable big writes, this is superseded
2852 by the max_write option */
2853 outargflags |= FUSE_BIG_WRITES;
2854
2855 if (se->conn.want_ext & FUSE_CAP_ASYNC_READ)
2856 outargflags |= FUSE_ASYNC_READ;
2857 if (se->conn.want_ext & FUSE_CAP_POSIX_LOCKS)
2858 outargflags |= FUSE_POSIX_LOCKS;
2859 if (se->conn.want_ext & FUSE_CAP_ATOMIC_O_TRUNC)
2860 outargflags |= FUSE_ATOMIC_O_TRUNC;
2861 if (se->conn.want_ext & FUSE_CAP_EXPORT_SUPPORT)
2862 outargflags |= FUSE_EXPORT_SUPPORT;
2863 if (se->conn.want_ext & FUSE_CAP_DONT_MASK)
2864 outargflags |= FUSE_DONT_MASK;
2865 if (se->conn.want_ext & FUSE_CAP_FLOCK_LOCKS)
2866 outargflags |= FUSE_FLOCK_LOCKS;
2867 if (se->conn.want_ext & FUSE_CAP_AUTO_INVAL_DATA)
2868 outargflags |= FUSE_AUTO_INVAL_DATA;
2869 if (se->conn.want_ext & FUSE_CAP_READDIRPLUS)
2870 outargflags |= FUSE_DO_READDIRPLUS;
2871 if (se->conn.want_ext & FUSE_CAP_READDIRPLUS_AUTO)
2872 outargflags |= FUSE_READDIRPLUS_AUTO;
2873 if (se->conn.want_ext & FUSE_CAP_ASYNC_DIO)
2874 outargflags |= FUSE_ASYNC_DIO;
2875 if (se->conn.want_ext & FUSE_CAP_WRITEBACK_CACHE)
2876 outargflags |= FUSE_WRITEBACK_CACHE;
2877 if (se->conn.want_ext & FUSE_CAP_PARALLEL_DIROPS)
2878 outargflags |= FUSE_PARALLEL_DIROPS;
2879 if (se->conn.want_ext & FUSE_CAP_POSIX_ACL)
2880 outargflags |= FUSE_POSIX_ACL;
2881 if (se->conn.want_ext & FUSE_CAP_HANDLE_KILLPRIV)
2882 outargflags |= FUSE_HANDLE_KILLPRIV;
2883 if (se->conn.want_ext & FUSE_CAP_HANDLE_KILLPRIV_V2)
2884 outargflags |= FUSE_HANDLE_KILLPRIV_V2;
2885 if (se->conn.want_ext & FUSE_CAP_CACHE_SYMLINKS)
2886 outargflags |= FUSE_CACHE_SYMLINKS;
2887 if (se->conn.want_ext & FUSE_CAP_EXPLICIT_INVAL_DATA)
2888 outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2889 if (se->conn.want_ext & FUSE_CAP_SETXATTR_EXT)
2890 outargflags |= FUSE_SETXATTR_EXT;
2891 if (se->conn.want_ext & FUSE_CAP_DIRECT_IO_ALLOW_MMAP)
2892 outargflags |= FUSE_DIRECT_IO_ALLOW_MMAP;
2893 if (se->conn.want_ext & FUSE_CAP_PASSTHROUGH) {
2894 outargflags |= FUSE_PASSTHROUGH;
2895 /*
2896 * outarg.max_stack_depth includes the fuse stack layer,
2897 * so it is one more than max_backing_stack_depth.
2898 */
2899 outarg.max_stack_depth = se->conn.max_backing_stack_depth + 1;
2900 }
2901 if (se->conn.want_ext & FUSE_CAP_NO_EXPORT_SUPPORT)
2902 outargflags |= FUSE_NO_EXPORT_SUPPORT;
2903 if (se->uring.enable && se->conn.want_ext & FUSE_CAP_OVER_IO_URING) {
2904 outargflags |= FUSE_OVER_IO_URING;
2905 enable_io_uring = true;
2906 }
2907
2908 if ((inargflags & FUSE_REQUEST_TIMEOUT) && se->conn.request_timeout) {
2909 outargflags |= FUSE_REQUEST_TIMEOUT;
2910 outarg.request_timeout = se->conn.request_timeout;
2911 }
2912
2913 outarg.max_readahead = se->conn.max_readahead;
2914 outarg.max_write = se->conn.max_write;
2915 if (se->conn.proto_minor >= 13) {
2916 if (se->conn.max_background >= (1 << 16))
2917 se->conn.max_background = (1 << 16) - 1;
2918 if (se->conn.congestion_threshold > se->conn.max_background)
2919 se->conn.congestion_threshold = se->conn.max_background;
2920 if (!se->conn.congestion_threshold) {
2921 se->conn.congestion_threshold =
2922 se->conn.max_background * 3 / 4;
2923 }
2924
2925 outarg.max_background = se->conn.max_background;
2926 outarg.congestion_threshold = se->conn.congestion_threshold;
2927 }
2928 if (se->conn.proto_minor >= 23)
2929 outarg.time_gran = se->conn.time_gran;
2930
2931 if (se->debug) {
2932 fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2933 fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2934 fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2935 outarg.max_readahead);
2936 fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2937 fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2938 outarg.max_background);
2939 fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2940 outarg.congestion_threshold);
2941 fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2942 outarg.time_gran);
2943 if (se->conn.want_ext & FUSE_CAP_PASSTHROUGH)
2944 fuse_log(FUSE_LOG_DEBUG, " max_stack_depth=%u\n",
2945 outarg.max_stack_depth);
2946 }
2947 if (arg->minor < 5)
2948 outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2949 else if (arg->minor < 23)
2950 outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2951
2952 /* XXX: Add an option to make non-available io-uring fatal */
2953 if (enable_io_uring) {
2954 int ring_rc = fuse_uring_start(se);
2955
2956 if (ring_rc != 0) {
2957 fuse_log(FUSE_LOG_INFO,
2958 "fuse: failed to start io-uring: %s\n",
2959 strerror(ring_rc));
2960 outargflags &= ~FUSE_OVER_IO_URING;
2961 enable_io_uring = false;
2962 }
2963 }
2964
2965 if (inargflags & FUSE_INIT_EXT) {
2966 outargflags |= FUSE_INIT_EXT;
2967 outarg.flags2 = outargflags >> 32;
2968 }
2969 outarg.flags = outargflags;
2970
2971 /*
2972 * Has to be set before replying, as new kernel requests might
2973 * immediately arrive and got_init is used for op-code sanity.
2974 * Especially with external handlers, where we have no control
2975 * over the thread scheduling.
2976 */
2977 se->got_init = 1;
2978 send_reply_ok(req, &outarg, outargsize);
2979 if (enable_io_uring)
2980 fuse_uring_wake_ring_threads(se);
2981}
2982
2983static __attribute__((no_sanitize("thread"))) void
2984do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2985{
2986 _do_init(req, nodeid, inarg, NULL);
2987}
2988
2989static void _do_destroy(fuse_req_t req, const fuse_ino_t nodeid,
2990 const void *op_in, const void *in_payload)
2991{
2992 struct fuse_session *se = req->se;
2993 char *mountpoint;
2994
2995 (void) nodeid;
2996 (void)op_in;
2997 (void)in_payload;
2998
2999 mountpoint = atomic_exchange(&se->mountpoint, NULL);
3000 free(mountpoint);
3001
3002 se->got_destroy = 1;
3003 se->got_init = 0;
3004 if (se->op.destroy)
3005 se->op.destroy(se->userdata);
3006
3007 send_reply_ok(req, NULL, 0);
3008}
3009
3010static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
3011{
3012 _do_destroy(req, nodeid, inarg, NULL);
3013}
3014
3015static void list_del_nreq(struct fuse_notify_req *nreq)
3016{
3017 struct fuse_notify_req *prev = nreq->prev;
3018 struct fuse_notify_req *next = nreq->next;
3019 prev->next = next;
3020 next->prev = prev;
3021}
3022
3023static void list_add_nreq(struct fuse_notify_req *nreq,
3024 struct fuse_notify_req *next)
3025{
3026 struct fuse_notify_req *prev = next->prev;
3027 nreq->next = next;
3028 nreq->prev = prev;
3029 prev->next = nreq;
3030 next->prev = nreq;
3031}
3032
3033static void list_init_nreq(struct fuse_notify_req *nreq)
3034{
3035 nreq->next = nreq;
3036 nreq->prev = nreq;
3037}
3038
3039static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
3040 const void *inarg, const struct fuse_buf *buf)
3041{
3042 struct fuse_session *se = req->se;
3043 struct fuse_notify_req *nreq;
3044 struct fuse_notify_req *head;
3045
3046 pthread_mutex_lock(&se->lock);
3047 head = &se->notify_list;
3048 for (nreq = head->next; nreq != head; nreq = nreq->next) {
3049 if (nreq->unique == req->unique) {
3050 list_del_nreq(nreq);
3051 break;
3052 }
3053 }
3054 pthread_mutex_unlock(&se->lock);
3055
3056 if (nreq != head)
3057 nreq->reply(nreq, req, nodeid, inarg, buf);
3058}
3059
3060static int send_notify_iov(struct fuse_session *se, int notify_code,
3061 struct iovec *iov, int count)
3062{
3063 struct fuse_out_header out;
3064 struct fuse_req *req = NULL;
3065
3066 if (!se->got_init)
3067 return -ENOTCONN;
3068
3069 out.unique = 0;
3070 out.error = notify_code;
3071 iov[0].iov_base = &out;
3072 iov[0].iov_len = sizeof(struct fuse_out_header);
3073
3074 return fuse_send_msg(se, NULL, iov, count, req);
3075}
3076
3077int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
3078{
3079 if (ph != NULL) {
3080 struct fuse_notify_poll_wakeup_out outarg;
3081 struct iovec iov[2];
3082
3083 outarg.kh = ph->kh;
3084
3085 iov[1].iov_base = &outarg;
3086 iov[1].iov_len = sizeof(outarg);
3087
3088 return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
3089 } else {
3090 return 0;
3091 }
3092}
3093
3094int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
3095 off_t off, off_t len)
3096{
3097 struct fuse_notify_inval_inode_out outarg;
3098 struct iovec iov[2];
3099
3100 if (!se)
3101 return -EINVAL;
3102
3103 if (se->conn.proto_minor < 12)
3104 return -ENOSYS;
3105
3106 outarg.ino = ino;
3107 outarg.off = off;
3108 outarg.len = len;
3109
3110 iov[1].iov_base = &outarg;
3111 iov[1].iov_len = sizeof(outarg);
3112
3113 return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
3114}
3115
3116int fuse_lowlevel_notify_increment_epoch(struct fuse_session *se)
3117{
3118 struct iovec iov[1];
3119
3120 if (!se)
3121 return -EINVAL;
3122
3123 if (se->conn.proto_minor < 44)
3124 return -ENOSYS;
3125
3126 return send_notify_iov(se, FUSE_NOTIFY_INC_EPOCH, iov, 1);
3127}
3128
3148static int fuse_lowlevel_notify_entry(struct fuse_session *se, fuse_ino_t parent,
3149 const char *name, size_t namelen,
3150 enum fuse_notify_entry_flags flags)
3151{
3152 struct fuse_notify_inval_entry_out outarg;
3153 struct iovec iov[3];
3154
3155 if (!se)
3156 return -EINVAL;
3157
3158 if (se->conn.proto_minor < 12)
3159 return -ENOSYS;
3160
3161 outarg.parent = parent;
3162 outarg.namelen = namelen;
3163 outarg.flags = 0;
3164 if (flags & FUSE_LL_EXPIRE_ONLY)
3165 outarg.flags |= FUSE_EXPIRE_ONLY;
3166
3167 iov[1].iov_base = &outarg;
3168 iov[1].iov_len = sizeof(outarg);
3169 iov[2].iov_base = (void *)name;
3170 iov[2].iov_len = namelen + 1;
3171
3172 return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
3173}
3174
3175int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
3176 const char *name, size_t namelen)
3177{
3178 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_INVALIDATE);
3179}
3180
3181int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
3182 const char *name, size_t namelen)
3183{
3184 if (!se)
3185 return -EINVAL;
3186
3187 if (!(se->conn.capable_ext & FUSE_CAP_EXPIRE_ONLY))
3188 return -ENOSYS;
3189
3190 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_EXPIRE_ONLY);
3191}
3192
3193
3194int fuse_lowlevel_notify_delete(struct fuse_session *se,
3195 fuse_ino_t parent, fuse_ino_t child,
3196 const char *name, size_t namelen)
3197{
3198 struct fuse_notify_delete_out outarg;
3199 struct iovec iov[3];
3200
3201 if (!se)
3202 return -EINVAL;
3203
3204 if (se->conn.proto_minor < 18)
3205 return -ENOSYS;
3206
3207 outarg.parent = parent;
3208 outarg.child = child;
3209 outarg.namelen = namelen;
3210 outarg.padding = 0;
3211
3212 iov[1].iov_base = &outarg;
3213 iov[1].iov_len = sizeof(outarg);
3214 iov[2].iov_base = (void *)name;
3215 iov[2].iov_len = namelen + 1;
3216
3217 return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
3218}
3219
3220int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
3221 off_t offset, struct fuse_bufvec *bufv,
3222 enum fuse_buf_copy_flags flags)
3223{
3224 struct fuse_out_header out;
3225 struct fuse_notify_store_out outarg;
3226 struct iovec iov[3];
3227 size_t size = fuse_buf_size(bufv);
3228 int res;
3229 struct fuse_req *req = NULL;
3230
3231 if (!se)
3232 return -EINVAL;
3233
3234 if (se->conn.proto_minor < 15)
3235 return -ENOSYS;
3236
3237 out.unique = 0;
3238 out.error = FUSE_NOTIFY_STORE;
3239
3240 outarg.nodeid = ino;
3241 outarg.offset = offset;
3242 outarg.size = size;
3243 outarg.padding = 0;
3244
3245 iov[0].iov_base = &out;
3246 iov[0].iov_len = sizeof(out);
3247 iov[1].iov_base = &outarg;
3248 iov[1].iov_len = sizeof(outarg);
3249
3250 res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags, req);
3251 if (res > 0)
3252 res = -res;
3253
3254 return res;
3255}
3256
3257struct fuse_retrieve_req {
3258 struct fuse_notify_req nreq;
3259 void *cookie;
3260};
3261
3262static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
3263 fuse_req_t req, fuse_ino_t ino,
3264 const void *inarg,
3265 const struct fuse_buf *ibuf)
3266{
3267 struct fuse_session *se = req->se;
3268 struct fuse_retrieve_req *rreq =
3269 container_of(nreq, struct fuse_retrieve_req, nreq);
3270 const struct fuse_notify_retrieve_in *arg = inarg;
3271 struct fuse_bufvec bufv = {
3272 .buf[0] = *ibuf,
3273 .count = 1,
3274 };
3275
3276 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
3277 bufv.buf[0].mem = PARAM(arg);
3278
3279 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
3280 sizeof(struct fuse_notify_retrieve_in);
3281
3282 if (bufv.buf[0].size < arg->size) {
3283 fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
3284 fuse_reply_none(req);
3285 goto out;
3286 }
3287 bufv.buf[0].size = arg->size;
3288
3289 if (se->op.retrieve_reply) {
3290 se->op.retrieve_reply(req, rreq->cookie, ino,
3291 arg->offset, &bufv);
3292 } else {
3293 fuse_reply_none(req);
3294 }
3295out:
3296 free(rreq);
3297 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
3298 fuse_ll_clear_pipe(se);
3299}
3300
3301int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
3302 size_t size, off_t offset, void *cookie)
3303{
3304 struct fuse_notify_retrieve_out outarg;
3305 struct iovec iov[2];
3306 struct fuse_retrieve_req *rreq;
3307 int err;
3308
3309 if (!se)
3310 return -EINVAL;
3311
3312 if (se->conn.proto_minor < 15)
3313 return -ENOSYS;
3314
3315 rreq = malloc(sizeof(*rreq));
3316 if (rreq == NULL)
3317 return -ENOMEM;
3318
3319 pthread_mutex_lock(&se->lock);
3320 rreq->cookie = cookie;
3321 rreq->nreq.unique = se->notify_ctr++;
3322 rreq->nreq.reply = fuse_ll_retrieve_reply;
3323 list_add_nreq(&rreq->nreq, &se->notify_list);
3324 pthread_mutex_unlock(&se->lock);
3325
3326 outarg.notify_unique = rreq->nreq.unique;
3327 outarg.nodeid = ino;
3328 outarg.offset = offset;
3329 outarg.size = size;
3330 outarg.padding = 0;
3331
3332 iov[1].iov_base = &outarg;
3333 iov[1].iov_len = sizeof(outarg);
3334
3335 err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
3336 if (err) {
3337 pthread_mutex_lock(&se->lock);
3338 list_del_nreq(&rreq->nreq);
3339 pthread_mutex_unlock(&se->lock);
3340 free(rreq);
3341 }
3342
3343 return err;
3344}
3345
3347{
3348 return req->se->userdata;
3349}
3350
3351const struct fuse_ctx *fuse_req_ctx(fuse_req_t req)
3352{
3353 return &req->ctx;
3354}
3355
3357 void *data)
3358{
3359 pthread_mutex_lock(&req->lock);
3360 pthread_mutex_lock(&req->se->lock);
3361 req->u.ni.func = func;
3362 req->u.ni.data = data;
3363 pthread_mutex_unlock(&req->se->lock);
3364 if (req->interrupted && func)
3365 func(req, data);
3366 pthread_mutex_unlock(&req->lock);
3367}
3368
3370{
3371 int interrupted;
3372
3373 pthread_mutex_lock(&req->se->lock);
3374 interrupted = req->interrupted;
3375 pthread_mutex_unlock(&req->se->lock);
3376
3377 return interrupted;
3378}
3379
3381{
3382 return req->flags.is_uring;
3383}
3384
3385#ifndef HAVE_URING
3386int fuse_req_get_payload(fuse_req_t req, char **payload, size_t *payload_sz,
3387 void **mr)
3388{
3389 (void)req;
3390 (void)payload;
3391 (void)payload_sz;
3392 (void)mr;
3393 return -ENOTSUP;
3394}
3395#endif
3396
3397static struct {
3398 void (*func)(fuse_req_t req, const fuse_ino_t node, const void *arg);
3399 const char *name;
3400} fuse_ll_ops[] = {
3401 [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
3402 [FUSE_FORGET] = { do_forget, "FORGET" },
3403 [FUSE_GETATTR] = { do_getattr, "GETATTR" },
3404 [FUSE_SETATTR] = { do_setattr, "SETATTR" },
3405 [FUSE_READLINK] = { do_readlink, "READLINK" },
3406 [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
3407 [FUSE_MKNOD] = { do_mknod, "MKNOD" },
3408 [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
3409 [FUSE_UNLINK] = { do_unlink, "UNLINK" },
3410 [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
3411 [FUSE_RENAME] = { do_rename, "RENAME" },
3412 [FUSE_LINK] = { do_link, "LINK" },
3413 [FUSE_OPEN] = { do_open, "OPEN" },
3414 [FUSE_READ] = { do_read, "READ" },
3415 [FUSE_WRITE] = { do_write, "WRITE" },
3416 [FUSE_STATFS] = { do_statfs, "STATFS" },
3417 [FUSE_RELEASE] = { do_release, "RELEASE" },
3418 [FUSE_FSYNC] = { do_fsync, "FSYNC" },
3419 [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
3420 [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
3421 [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
3422 [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
3423 [FUSE_FLUSH] = { do_flush, "FLUSH" },
3424 [FUSE_INIT] = { do_init, "INIT" },
3425 [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
3426 [FUSE_READDIR] = { do_readdir, "READDIR" },
3427 [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
3428 [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
3429 [FUSE_GETLK] = { do_getlk, "GETLK" },
3430 [FUSE_SETLK] = { do_setlk, "SETLK" },
3431 [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
3432 [FUSE_ACCESS] = { do_access, "ACCESS" },
3433 [FUSE_CREATE] = { do_create, "CREATE" },
3434 [FUSE_TMPFILE] = { do_tmpfile, "TMPFILE" },
3435 [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
3436 [FUSE_BMAP] = { do_bmap, "BMAP" },
3437 [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
3438 [FUSE_POLL] = { do_poll, "POLL" },
3439 [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
3440 [FUSE_DESTROY] = { do_destroy, "DESTROY" },
3441 [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
3442 [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
3443 [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
3444 [FUSE_RENAME2] = { do_rename2, "RENAME2" },
3445 [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
3446 [FUSE_COPY_FILE_RANGE_64] = { do_copy_file_range_64, "COPY_FILE_RANGE_64" },
3447 [FUSE_LSEEK] = { do_lseek, "LSEEK" },
3448 [FUSE_STATX] = { do_statx, "STATX" },
3449 [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
3450};
3451
3452static struct {
3453 void (*func)(fuse_req_t req, const fuse_ino_t ino, const void *op_in,
3454 const void *op_payload);
3455 const char *name;
3456} fuse_ll_ops2[] __attribute__((unused)) = {
3457 [FUSE_LOOKUP] = { _do_lookup, "LOOKUP" },
3458 [FUSE_FORGET] = { _do_forget, "FORGET" },
3459 [FUSE_GETATTR] = { _do_getattr, "GETATTR" },
3460 [FUSE_SETATTR] = { _do_setattr, "SETATTR" },
3461 [FUSE_READLINK] = { _do_readlink, "READLINK" },
3462 [FUSE_SYMLINK] = { _do_symlink, "SYMLINK" },
3463 [FUSE_MKNOD] = { _do_mknod, "MKNOD" },
3464 [FUSE_MKDIR] = { _do_mkdir, "MKDIR" },
3465 [FUSE_UNLINK] = { _do_unlink, "UNLINK" },
3466 [FUSE_RMDIR] = { _do_rmdir, "RMDIR" },
3467 [FUSE_RENAME] = { _do_rename, "RENAME" },
3468 [FUSE_LINK] = { _do_link, "LINK" },
3469 [FUSE_OPEN] = { _do_open, "OPEN" },
3470 [FUSE_READ] = { _do_read, "READ" },
3471 [FUSE_WRITE] = { _do_write, "WRITE" },
3472 [FUSE_STATFS] = { _do_statfs, "STATFS" },
3473 [FUSE_RELEASE] = { _do_release, "RELEASE" },
3474 [FUSE_FSYNC] = { _do_fsync, "FSYNC" },
3475 [FUSE_SETXATTR] = { _do_setxattr, "SETXATTR" },
3476 [FUSE_GETXATTR] = { _do_getxattr, "GETXATTR" },
3477 [FUSE_LISTXATTR] = { _do_listxattr, "LISTXATTR" },
3478 [FUSE_REMOVEXATTR] = { _do_removexattr, "REMOVEXATTR" },
3479 [FUSE_FLUSH] = { _do_flush, "FLUSH" },
3480 [FUSE_INIT] = { _do_init, "INIT" },
3481 [FUSE_OPENDIR] = { _do_opendir, "OPENDIR" },
3482 [FUSE_READDIR] = { _do_readdir, "READDIR" },
3483 [FUSE_RELEASEDIR] = { _do_releasedir, "RELEASEDIR" },
3484 [FUSE_FSYNCDIR] = { _do_fsyncdir, "FSYNCDIR" },
3485 [FUSE_GETLK] = { _do_getlk, "GETLK" },
3486 [FUSE_SETLK] = { _do_setlk, "SETLK" },
3487 [FUSE_SETLKW] = { _do_setlkw, "SETLKW" },
3488 [FUSE_ACCESS] = { _do_access, "ACCESS" },
3489 [FUSE_CREATE] = { _do_create, "CREATE" },
3490 [FUSE_TMPFILE] = { _do_tmpfile, "TMPFILE" },
3491 [FUSE_INTERRUPT] = { _do_interrupt, "INTERRUPT" },
3492 [FUSE_BMAP] = { _do_bmap, "BMAP" },
3493 [FUSE_IOCTL] = { _do_ioctl, "IOCTL" },
3494 [FUSE_POLL] = { _do_poll, "POLL" },
3495 [FUSE_FALLOCATE] = { _do_fallocate, "FALLOCATE" },
3496 [FUSE_DESTROY] = { _do_destroy, "DESTROY" },
3497 [FUSE_NOTIFY_REPLY] = { (void *)1, "NOTIFY_REPLY" },
3498 [FUSE_BATCH_FORGET] = { _do_batch_forget, "BATCH_FORGET" },
3499 [FUSE_READDIRPLUS] = { _do_readdirplus, "READDIRPLUS" },
3500 [FUSE_RENAME2] = { _do_rename2, "RENAME2" },
3501 [FUSE_COPY_FILE_RANGE] = { _do_copy_file_range, "COPY_FILE_RANGE" },
3502 [FUSE_COPY_FILE_RANGE_64] = { _do_copy_file_range_64, "COPY_FILE_RANGE_64" },
3503 [FUSE_LSEEK] = { _do_lseek, "LSEEK" },
3504 [FUSE_STATX] = { _do_statx, "STATX" },
3505 [CUSE_INIT] = { _cuse_lowlevel_init, "CUSE_INIT" },
3506};
3507
3508/*
3509 * For ABI compatibility we cannot allow higher values than CUSE_INIT.
3510 * Without ABI compatibility we could use the size of the array.
3511 * #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
3512 */
3513#define FUSE_MAXOP (CUSE_INIT + 1)
3514
3515
3520static inline int
3521fuse_req_opcode_sanity_ok(struct fuse_session *se, enum fuse_opcode in_op)
3522{
3523 int err = EIO;
3524
3525 if (!se->got_init) {
3526 enum fuse_opcode expected;
3527
3528 expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
3529 if (in_op != expected)
3530 return err;
3531 } else if (in_op == FUSE_INIT || in_op == CUSE_INIT)
3532 return err;
3533
3534 return 0;
3535}
3536
3537static inline void
3538fuse_session_in2req(struct fuse_req *req, struct fuse_in_header *in)
3539{
3540 req->unique = in->unique;
3541 req->ctx.uid = in->uid;
3542 req->ctx.gid = in->gid;
3543 req->ctx.pid = in->pid;
3544}
3545
3549static inline int
3550fuse_req_check_allow_root(struct fuse_session *se, enum fuse_opcode in_op,
3551 uid_t in_uid)
3552{
3553 int err = EACCES;
3554
3555 if (se->deny_others && in_uid != se->owner && in_uid != 0 &&
3556 in_op != FUSE_INIT && in_op != FUSE_READ &&
3557 in_op != FUSE_WRITE && in_op != FUSE_FSYNC &&
3558 in_op != FUSE_RELEASE && in_op != FUSE_READDIR &&
3559 in_op != FUSE_FSYNCDIR && in_op != FUSE_RELEASEDIR &&
3560 in_op != FUSE_NOTIFY_REPLY &&
3561 in_op != FUSE_READDIRPLUS)
3562 return err;
3563
3564 return 0;
3565}
3566
3567static const char *opname(enum fuse_opcode opcode)
3568{
3569 if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
3570 return "???";
3571 else
3572 return fuse_ll_ops[opcode].name;
3573}
3574
3575static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
3576 struct fuse_bufvec *src)
3577{
3578 ssize_t res = fuse_buf_copy(dst, src, 0);
3579 if (res < 0) {
3580 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
3581 return res;
3582 }
3583 if ((size_t)res < fuse_buf_size(dst)) {
3584 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
3585 return -1;
3586 }
3587 return 0;
3588}
3589
3590void fuse_session_process_buf(struct fuse_session *se,
3591 const struct fuse_buf *buf)
3592{
3593 fuse_session_process_buf_internal(se, buf, NULL);
3594}
3595
3596/* libfuse internal handler */
3597void fuse_session_process_buf_internal(struct fuse_session *se,
3598 const struct fuse_buf *buf, struct fuse_chan *ch)
3599{
3600 const size_t write_header_size = sizeof(struct fuse_in_header) +
3601 sizeof(struct fuse_write_in);
3602 struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
3603 struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
3604 struct fuse_in_header *in;
3605 const void *inarg;
3606 struct fuse_req *req;
3607 void *mbuf = NULL;
3608 int err;
3609 int res;
3610
3611 if (buf->flags & FUSE_BUF_IS_FD) {
3612 if (buf->size < tmpbuf.buf[0].size)
3613 tmpbuf.buf[0].size = buf->size;
3614
3615 mbuf = malloc(tmpbuf.buf[0].size);
3616 if (mbuf == NULL) {
3617 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
3618 goto clear_pipe;
3619 }
3620 tmpbuf.buf[0].mem = mbuf;
3621
3622 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
3623 if (res < 0)
3624 goto clear_pipe;
3625
3626 in = mbuf;
3627 } else {
3628 in = buf->mem;
3629 }
3630
3631 trace_request_process(in->opcode, in->unique);
3632
3633 if (se->debug) {
3634 fuse_log(FUSE_LOG_DEBUG,
3635 "dev unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
3636 (unsigned long long) in->unique,
3637 opname((enum fuse_opcode) in->opcode), in->opcode,
3638 (unsigned long long) in->nodeid, buf->size, in->pid);
3639 }
3640
3641 req = fuse_ll_alloc_req(se);
3642 if (req == NULL) {
3643 struct fuse_out_header out = {
3644 .unique = in->unique,
3645 .error = -ENOMEM,
3646 };
3647 struct iovec iov = {
3648 .iov_base = &out,
3649 .iov_len = sizeof(struct fuse_out_header),
3650 };
3651
3652 fuse_send_msg(se, ch, &iov, 1, NULL);
3653 goto clear_pipe;
3654 }
3655
3656 fuse_session_in2req(req, in);
3657 req->ch = ch ? fuse_chan_get(ch) : NULL;
3658
3659 err = fuse_req_opcode_sanity_ok(se, in->opcode);
3660 if (err)
3661 goto reply_err;
3662
3663 err = fuse_req_check_allow_root(se, in->opcode, in->uid);
3664 if (err)
3665 goto reply_err;
3666
3667 err = ENOSYS;
3668 if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
3669 goto reply_err;
3670 /* Do not process interrupt request */
3671 if (se->conn.no_interrupt && in->opcode == FUSE_INTERRUPT) {
3672 if (se->debug)
3673 fuse_log(FUSE_LOG_DEBUG, "FUSE_INTERRUPT: reply to kernel to disable interrupt\n");
3674 goto reply_err;
3675 }
3676 if (!se->conn.no_interrupt && in->opcode != FUSE_INTERRUPT) {
3677 struct fuse_req *intr;
3678 pthread_mutex_lock(&se->lock);
3679 intr = check_interrupt(se, req);
3680 list_add_req(req, &se->list);
3681 pthread_mutex_unlock(&se->lock);
3682 if (intr)
3683 fuse_reply_err(intr, EAGAIN);
3684 }
3685
3686 if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
3687 (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
3688 in->opcode != FUSE_NOTIFY_REPLY) {
3689 void *newmbuf;
3690
3691 err = ENOMEM;
3692 newmbuf = realloc(mbuf, buf->size);
3693 if (newmbuf == NULL)
3694 goto reply_err;
3695 mbuf = newmbuf;
3696
3697 tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
3698 tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
3699
3700 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
3701 err = -res;
3702 if (res < 0)
3703 goto reply_err;
3704
3705 in = mbuf;
3706 }
3707
3708 inarg = (void *) &in[1];
3709 if (in->opcode == FUSE_WRITE && se->op.write_buf)
3710 do_write_buf(req, in->nodeid, inarg, buf);
3711 else if (in->opcode == FUSE_NOTIFY_REPLY)
3712 do_notify_reply(req, in->nodeid, inarg, buf);
3713 else
3714 fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
3715
3716out_free:
3717 free(mbuf);
3718 return;
3719
3720reply_err:
3721 fuse_reply_err(req, err);
3722clear_pipe:
3723 if (buf->flags & FUSE_BUF_IS_FD)
3724 fuse_ll_clear_pipe(se);
3725 goto out_free;
3726}
3727
3728void fuse_session_process_uring_cqe(struct fuse_session *se,
3729 struct fuse_req *req,
3730 struct fuse_in_header *in, void *op_in,
3731 void *op_payload, size_t payload_len)
3732{
3733 int err;
3734
3735 fuse_session_in2req(req, in);
3736
3737 err = fuse_req_opcode_sanity_ok(se, in->opcode);
3738 if (err)
3739 goto reply_err;
3740
3741 err = fuse_req_check_allow_root(se, in->opcode, in->uid);
3742 if (err)
3743 goto reply_err;
3744
3745 err = ENOSYS;
3746 if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
3747 goto reply_err;
3748
3749 if (se->debug) {
3750 fuse_log(
3751 FUSE_LOG_DEBUG,
3752 "cqe unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
3753 (unsigned long long)in->unique,
3754 opname((enum fuse_opcode)in->opcode), in->opcode,
3755 (unsigned long long)in->nodeid, payload_len, in->pid);
3756 }
3757
3758 if (in->opcode == FUSE_WRITE && se->op.write_buf) {
3759 struct fuse_bufvec bufv = {
3760 .buf[0] = { .size = payload_len,
3761 .flags = 0,
3762 .mem = op_payload },
3763 .count = 1,
3764 };
3765 _do_write_buf(req, in->nodeid, op_in, &bufv);
3766 } else if (in->opcode == FUSE_NOTIFY_REPLY) {
3767 struct fuse_buf buf = { .size = payload_len,
3768 .mem = op_payload };
3769 do_notify_reply(req, in->nodeid, op_in, &buf);
3770 } else {
3771 fuse_ll_ops2[in->opcode].func(req, in->nodeid, op_in,
3772 op_payload);
3773 }
3774
3775 return;
3776
3777reply_err:
3778 fuse_reply_err(req, err);
3779}
3780
3781#define LL_OPTION(n,o,v) \
3782 { n, offsetof(struct fuse_session, o), v }
3783
3784static const struct fuse_opt fuse_ll_opts[] = {
3785 LL_OPTION("debug", debug, 1),
3786 LL_OPTION("-d", debug, 1),
3787 LL_OPTION("--debug", debug, 1),
3788 LL_OPTION("allow_root", deny_others, 1),
3789 LL_OPTION("io_uring", uring.enable, 1),
3790 LL_OPTION("io_uring_q_depth=%u", uring.q_depth, -1),
3792};
3793
3794void fuse_lowlevel_version(void)
3795{
3796 printf("using FUSE kernel interface version %i.%i\n",
3797 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
3798 fuse_mount_version();
3799}
3800
3801void fuse_lowlevel_help(void)
3802{
3803 /* These are not all options, but the ones that are
3804 potentially of interest to an end-user */
3805 printf(
3806" -o allow_other allow access by all users\n"
3807" -o allow_root allow access by root\n"
3808" -o auto_unmount auto unmount on process termination\n"
3809" -o io_uring enable io-uring\n"
3810" -o io_uring_q_depth=<n> io-uring queue depth\n"
3811);
3812}
3813
3814void fuse_session_destroy(struct fuse_session *se)
3815{
3816 struct fuse_ll_pipe *llp;
3817
3818 if (se->got_init && !se->got_destroy) {
3819 if (se->op.destroy)
3820 se->op.destroy(se->userdata);
3821 }
3822 llp = pthread_getspecific(se->pipe_key);
3823 if (llp != NULL)
3824 fuse_ll_pipe_free(llp);
3825 pthread_key_delete(se->pipe_key);
3826 sem_destroy(&se->mt_finish);
3827 pthread_mutex_destroy(&se->mt_lock);
3828 pthread_mutex_destroy(&se->lock);
3829 free(se->cuse_data);
3830 if (se->fd != -1)
3831 close(se->fd);
3832 if (se->io != NULL)
3833 free(se->io);
3834 destroy_mount_opts(se->mo);
3835 free(se);
3836}
3837
3838
3839static void fuse_ll_pipe_destructor(void *data)
3840{
3841 struct fuse_ll_pipe *llp = data;
3842 fuse_ll_pipe_free(llp);
3843}
3844
3845void fuse_buf_free(struct fuse_buf *buf)
3846{
3847 if (buf->mem == NULL)
3848 return;
3849
3850 size_t write_header_sz =
3851 sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in);
3852
3853 char *ptr = (char *)buf->mem - pagesize + write_header_sz;
3854 free(ptr);
3855 buf->mem = NULL;
3856}
3857
3858/*
3859 * This is used to allocate buffers that hold fuse requests
3860 */
3861static void *buf_alloc(size_t size, bool internal)
3862{
3863 /*
3864 * For libfuse internal caller add in alignment. That cannot be done
3865 * for an external caller, as it is not guaranteed that the external
3866 * caller frees the raw pointer.
3867 */
3868 if (internal) {
3869 size_t write_header_sz = sizeof(struct fuse_in_header) +
3870 sizeof(struct fuse_write_in);
3871 size_t new_size = ROUND_UP(size + write_header_sz, pagesize);
3872
3873 char *buf = aligned_alloc(pagesize, new_size);
3874 if (buf == NULL)
3875 return NULL;
3876
3877 buf += pagesize - write_header_sz;
3878
3879 return buf;
3880 } else {
3881 return malloc(size);
3882 }
3883}
3884
3885/*
3886 *@param internal true if called from libfuse internal code
3887 */
3888static int _fuse_session_receive_buf(struct fuse_session *se,
3889 struct fuse_buf *buf, struct fuse_chan *ch,
3890 bool internal)
3891{
3892 int err;
3893 ssize_t res;
3894 size_t bufsize;
3895#ifdef HAVE_SPLICE
3896 struct fuse_ll_pipe *llp;
3897 struct fuse_buf tmpbuf;
3898
3899pipe_retry:
3900 bufsize = se->bufsize;
3901
3902 if (se->conn.proto_minor < 14 ||
3903 !(se->conn.want_ext & FUSE_CAP_SPLICE_READ))
3904 goto fallback;
3905
3906 llp = fuse_ll_get_pipe(se);
3907 if (llp == NULL)
3908 goto fallback;
3909
3910 if (llp->size < bufsize) {
3911 if (llp->can_grow) {
3912 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
3913 if (res == -1) {
3914 llp->can_grow = 0;
3915 res = grow_pipe_to_max(llp->pipe[0]);
3916 if (res > 0)
3917 llp->size = res;
3918 goto fallback;
3919 }
3920 llp->size = res;
3921 }
3922 if (llp->size < bufsize)
3923 goto fallback;
3924 }
3925
3926 if (se->io != NULL && se->io->splice_receive != NULL) {
3927 res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
3928 llp->pipe[1], NULL, bufsize, 0,
3929 se->userdata);
3930 } else {
3931 res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
3932 bufsize, 0);
3933 }
3934 err = errno;
3935 trace_request_receive(err);
3936
3937 if (fuse_session_exited(se))
3938 return 0;
3939
3940 if (res == -1) {
3941 if (err == ENODEV) {
3942 /* Filesystem was unmounted, or connection was aborted
3943 via /sys/fs/fuse/connections */
3945 return 0;
3946 }
3947
3948 /* FUSE_INIT might have increased the required bufsize */
3949 if (err == EINVAL && bufsize < se->bufsize) {
3950 fuse_ll_clear_pipe(se);
3951 goto pipe_retry;
3952 }
3953
3954 if (err != EINTR && err != EAGAIN)
3955 perror("fuse: splice from device");
3956 return -err;
3957 }
3958
3959 if (res < sizeof(struct fuse_in_header)) {
3960 fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
3961 return -EIO;
3962 }
3963
3964 tmpbuf = (struct fuse_buf){
3965 .size = res,
3966 .flags = FUSE_BUF_IS_FD,
3967 .fd = llp->pipe[0],
3968 };
3969
3970 /*
3971 * Don't bother with zero copy for small requests.
3972 * fuse_loop_mt() needs to check for FORGET so this more than
3973 * just an optimization.
3974 */
3975 if (res < sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in) +
3976 pagesize) {
3977 struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
3978 struct fuse_bufvec dst = { .count = 1 };
3979
3980 if (!buf->mem) {
3981 buf->mem = buf_alloc(bufsize, internal);
3982 if (!buf->mem) {
3983 fuse_log(
3984 FUSE_LOG_ERR,
3985 "fuse: failed to allocate read buffer\n");
3986 return -ENOMEM;
3987 }
3988 buf->mem_size = bufsize;
3989 }
3990 buf->size = bufsize;
3991 buf->flags = 0;
3992 dst.buf[0] = *buf;
3993
3994 res = fuse_buf_copy(&dst, &src, 0);
3995 if (res < 0) {
3996 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
3997 strerror(-res));
3998 fuse_ll_clear_pipe(se);
3999 return res;
4000 }
4001 if (res < tmpbuf.size) {
4002 fuse_log(FUSE_LOG_ERR,
4003 "fuse: copy from pipe: short read\n");
4004 fuse_ll_clear_pipe(se);
4005 return -EIO;
4006 }
4007 assert(res == tmpbuf.size);
4008
4009 } else {
4010 /* Don't overwrite buf->mem, as that would cause a leak */
4011 buf->fd = tmpbuf.fd;
4012 buf->flags = tmpbuf.flags;
4013 }
4014 buf->size = tmpbuf.size;
4015
4016 return res;
4017
4018fallback:
4019#endif
4020 bufsize = internal ? buf->mem_size : se->bufsize;
4021 if (!buf->mem) {
4022 bufsize = se->bufsize; /* might have changed */
4023 buf->mem = buf_alloc(bufsize, internal);
4024 if (!buf->mem) {
4025 fuse_log(FUSE_LOG_ERR,
4026 "fuse: failed to allocate read buffer\n");
4027 return -ENOMEM;
4028 }
4029
4030 if (internal)
4031 buf->mem_size = bufsize;
4032 }
4033
4034restart:
4035 if (se->io != NULL) {
4036 /* se->io->read is never NULL if se->io is not NULL as
4037 specified by fuse_session_custom_io()*/
4038 res = se->io->read(ch ? ch->fd : se->fd, buf->mem, bufsize,
4039 se->userdata);
4040 } else {
4041 res = read(ch ? ch->fd : se->fd, buf->mem, bufsize);
4042 }
4043 err = errno;
4044 trace_request_receive(err);
4045
4046 if (fuse_session_exited(se))
4047 return 0;
4048 if (res == -1) {
4049 if (err == EINVAL && internal && se->bufsize > bufsize) {
4050 /* FUSE_INIT might have increased the required bufsize */
4051 bufsize = se->bufsize;
4052 void *newbuf = buf_alloc(bufsize, internal);
4053 if (!newbuf) {
4054 fuse_log(
4055 FUSE_LOG_ERR,
4056 "fuse: failed to (re)allocate read buffer\n");
4057 return -ENOMEM;
4058 }
4059 fuse_buf_free(buf);
4060 buf->mem = newbuf;
4061 buf->mem_size = bufsize;
4062 goto restart;
4063 }
4064
4065 /* ENOENT means the operation was interrupted, it's safe
4066 to restart */
4067 if (err == ENOENT)
4068 goto restart;
4069
4070 if (err == ENODEV) {
4071 /* Filesystem was unmounted, or connection was aborted
4072 via /sys/fs/fuse/connections */
4074 return 0;
4075 }
4076 /* Errors occurring during normal operation: EINTR (read
4077 interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
4078 umounted) */
4079 if (err != EINTR && err != EAGAIN)
4080 perror("fuse: reading device");
4081 return -err;
4082 }
4083 if ((size_t)res < sizeof(struct fuse_in_header)) {
4084 fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
4085 return -EIO;
4086 }
4087
4088 buf->size = res;
4089
4090 return res;
4091}
4092
4093int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
4094{
4095 return _fuse_session_receive_buf(se, buf, NULL, false);
4096}
4097
4098/* libfuse internal handler */
4099int fuse_session_receive_buf_internal(struct fuse_session *se,
4100 struct fuse_buf *buf,
4101 struct fuse_chan *ch)
4102{
4103 /*
4104 * if run internally thread buffers are from libfuse - we can
4105 * reallocate them
4106 */
4107 if (unlikely(!se->got_init) && !se->buf_reallocable)
4108 se->buf_reallocable = true;
4109
4110 return _fuse_session_receive_buf(se, buf, ch, true);
4111}
4112
4113struct fuse_session *
4114fuse_session_new_versioned(struct fuse_args *args,
4115 const struct fuse_lowlevel_ops *op, size_t op_size,
4116 struct libfuse_version *version, void *userdata)
4117{
4118 int err;
4119 struct fuse_session *se;
4120 struct mount_opts *mo;
4121
4122 if (op == NULL || op_size == 0) {
4123 fuse_log(FUSE_LOG_ERR,
4124 "fuse: warning: empty op list passed to fuse_session_new()\n");
4125 return NULL;
4126 }
4127
4128 if (version == NULL) {
4129 fuse_log(FUSE_LOG_ERR, "fuse: warning: version not passed to fuse_session_new()\n");
4130 return NULL;
4131 }
4132
4133 if (sizeof(struct fuse_lowlevel_ops) < op_size) {
4134 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
4135 op_size = sizeof(struct fuse_lowlevel_ops);
4136 }
4137
4138 if (args == NULL || args->argc == 0) {
4139 fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
4140 return NULL;
4141 }
4142
4143 se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
4144 if (se == NULL) {
4145 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
4146 goto out1;
4147 }
4148 se->fd = -1;
4149 se->conn.max_write = FUSE_DEFAULT_MAX_PAGES_LIMIT * getpagesize();
4150 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
4151 se->conn.max_readahead = UINT_MAX;
4152
4153 /*
4154 * Allow overriding with env, mostly to avoid the need to modify
4155 * all tests. I.e. to test with and without io-uring being enabled.
4156 */
4157 se->uring.enable = getenv("FUSE_URING_ENABLE") ?
4158 atoi(getenv("FUSE_URING_ENABLE")) :
4159 SESSION_DEF_URING_ENABLE;
4160 se->uring.q_depth = getenv("FUSE_URING_QUEUE_DEPTH") ?
4161 atoi(getenv("FUSE_URING_QUEUE_DEPTH")) :
4162 SESSION_DEF_URING_Q_DEPTH;
4163
4164 /* Parse options */
4165 if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
4166 goto out2;
4167 if(se->deny_others) {
4168 /* Allowing access only by root is done by instructing
4169 * kernel to allow access by everyone, and then restricting
4170 * access to root and mountpoint owner in libfuse.
4171 */
4172 // We may be adding the option a second time, but
4173 // that doesn't hurt.
4174 if(fuse_opt_add_arg(args, "-oallow_other") == -1)
4175 goto out2;
4176 }
4177 mo = parse_mount_opts(args);
4178 if (mo == NULL)
4179 goto out3;
4180
4181 if(args->argc == 1 &&
4182 args->argv[0][0] == '-') {
4183 fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
4184 "will be ignored\n");
4185 } else if (args->argc != 1) {
4186 int i;
4187 fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
4188 for(i = 1; i < args->argc-1; i++)
4189 fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
4190 fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
4191 goto out4;
4192 }
4193
4194 if (se->debug)
4195 fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
4196
4197 list_init_req(&se->list);
4198 list_init_req(&se->interrupts);
4199 list_init_nreq(&se->notify_list);
4200 se->notify_ctr = 1;
4201 pthread_mutex_init(&se->lock, NULL);
4202 sem_init(&se->mt_finish, 0, 0);
4203 pthread_mutex_init(&se->mt_lock, NULL);
4204
4205 err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
4206 if (err) {
4207 fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
4208 strerror(err));
4209 goto out5;
4210 }
4211
4212 memcpy(&se->op, op, op_size);
4213 se->owner = getuid();
4214 se->userdata = userdata;
4215
4216 se->mo = mo;
4217
4218 /* Fuse server application should pass the version it was compiled
4219 * against and pass it. If a libfuse version accidentally introduces an
4220 * ABI incompatibility, it might be possible to 'fix' that at run time,
4221 * by checking the version numbers.
4222 */
4223 se->version = *version;
4224
4225 return se;
4226
4227out5:
4228 sem_destroy(&se->mt_finish);
4229 pthread_mutex_destroy(&se->mt_lock);
4230 pthread_mutex_destroy(&se->lock);
4231out4:
4232 fuse_opt_free_args(args);
4233out3:
4234 if (mo != NULL)
4235 destroy_mount_opts(mo);
4236out2:
4237 free(se);
4238out1:
4239 return NULL;
4240}
4241
4242struct fuse_session *fuse_session_new_30(struct fuse_args *args,
4243 const struct fuse_lowlevel_ops *op,
4244 size_t op_size, void *userdata);
4245struct fuse_session *fuse_session_new_30(struct fuse_args *args,
4246 const struct fuse_lowlevel_ops *op,
4247 size_t op_size,
4248 void *userdata)
4249{
4250 struct fuse_lowlevel_ops null_ops = { 0 };
4251
4252 /* unknown version */
4253 struct libfuse_version version = { 0 };
4254
4255 /*
4256 * This function is the ABI interface function from fuse_session_new in
4257 * compat.c. External libraries like "fuser" might call fuse_session_new()
4258 * with NULL ops and then pass that session to fuse_session_mount().
4259 * The actual FUSE operations are handled in their own library.
4260 */
4261 if (op == NULL) {
4262 op = &null_ops;
4263 op_size = sizeof(null_ops);
4264 }
4265
4266 return fuse_session_new_versioned(args, op, op_size, &version,
4267 userdata);
4268}
4269
4270FUSE_SYMVER("fuse_session_custom_io_317", "fuse_session_custom_io@@FUSE_3.17")
4271int fuse_session_custom_io_317(struct fuse_session *se,
4272 const struct fuse_custom_io *io, size_t op_size, int fd)
4273{
4274 if (sizeof(struct fuse_custom_io) < op_size) {
4275 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
4276 op_size = sizeof(struct fuse_custom_io);
4277 }
4278
4279 if (fd < 0) {
4280 fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
4281 "fuse_session_custom_io()\n", fd);
4282 return -EBADF;
4283 }
4284 if (io == NULL) {
4285 fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
4286 "fuse_session_custom_io()\n");
4287 return -EINVAL;
4288 } else if (io->read == NULL || io->writev == NULL) {
4289 /* If the user provides their own file descriptor, we can't
4290 guarantee that the default behavior of the io operations made
4291 in libfuse will function properly. Therefore, we enforce the
4292 user to implement these io operations when using custom io. */
4293 fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
4294 "implement both io->read() and io->writev\n");
4295 return -EINVAL;
4296 }
4297
4298 se->io = calloc(1, sizeof(struct fuse_custom_io));
4299 if (se->io == NULL) {
4300 fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
4301 "Error: %s\n", strerror(errno));
4302 return -errno;
4303 }
4304
4305 se->fd = fd;
4306 memcpy(se->io, io, op_size);
4307 return 0;
4308}
4309
4310int fuse_session_custom_io_30(struct fuse_session *se,
4311 const struct fuse_custom_io *io, int fd);
4312FUSE_SYMVER("fuse_session_custom_io_30", "fuse_session_custom_io@FUSE_3.0")
4313int fuse_session_custom_io_30(struct fuse_session *se,
4314 const struct fuse_custom_io *io, int fd)
4315{
4316 return fuse_session_custom_io_317(se, io,
4317 offsetof(struct fuse_custom_io, clone_fd), fd);
4318}
4319
4320int fuse_session_mount(struct fuse_session *se, const char *_mountpoint)
4321{
4322 int fd;
4323 char *mountpoint;
4324
4325 if (_mountpoint == NULL) {
4326 fuse_log(FUSE_LOG_ERR, "Invalid null-ptr mountpoint!\n");
4327 return -1;
4328 }
4329
4330 mountpoint = strdup(_mountpoint);
4331 if (mountpoint == NULL) {
4332 fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for mountpoint. Error: %s\n",
4333 strerror(errno));
4334 return -1;
4335 }
4336
4337 /*
4338 * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
4339 * would ensue.
4340 */
4341 do {
4342 fd = open("/dev/null", O_RDWR);
4343 if (fd > 2)
4344 close(fd);
4345 } while (fd >= 0 && fd <= 2);
4346
4347 /*
4348 * To allow FUSE daemons to run without privileges, the caller may open
4349 * /dev/fuse before launching the file system and pass on the file
4350 * descriptor by specifying /dev/fd/N as the mount point. Note that the
4351 * parent process takes care of performing the mount in this case.
4352 */
4353 fd = fuse_mnt_parse_fuse_fd(mountpoint);
4354 if (fd != -1) {
4355 if (fcntl(fd, F_GETFD) == -1) {
4356 fuse_log(FUSE_LOG_ERR,
4357 "fuse: Invalid file descriptor /dev/fd/%u\n",
4358 fd);
4359 goto error_out;
4360 }
4361 se->fd = fd;
4362 return 0;
4363 }
4364
4365 /* Open channel */
4366 fd = fuse_kern_mount(mountpoint, se->mo);
4367 if (fd == -1)
4368 goto error_out;
4369 se->fd = fd;
4370
4371 /* Save mountpoint */
4372 se->mountpoint = mountpoint;
4373
4374 return 0;
4375
4376error_out:
4377 free(mountpoint);
4378 return -1;
4379}
4380
4381int fuse_session_fd(struct fuse_session *se)
4382{
4383 return se->fd;
4384}
4385
4386void fuse_session_unmount(struct fuse_session *se)
4387{
4388 if (se->mountpoint != NULL) {
4389 char *mountpoint = atomic_exchange(&se->mountpoint, NULL);
4390
4391 fuse_kern_unmount(mountpoint, se->fd);
4392 se->fd = -1;
4393 free(mountpoint);
4394 }
4395}
4396
4397#ifdef linux
4398int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
4399{
4400 char *buf;
4401 size_t bufsize = 1024;
4402 char path[128];
4403 int ret;
4404 int fd;
4405 unsigned long pid = req->ctx.pid;
4406 char *s;
4407
4408 sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
4409
4410retry:
4411 buf = malloc(bufsize);
4412 if (buf == NULL)
4413 return -ENOMEM;
4414
4415 ret = -EIO;
4416 fd = open(path, O_RDONLY);
4417 if (fd == -1)
4418 goto out_free;
4419
4420 ret = read(fd, buf, bufsize);
4421 close(fd);
4422 if (ret < 0) {
4423 ret = -EIO;
4424 goto out_free;
4425 }
4426
4427 if ((size_t)ret == bufsize) {
4428 free(buf);
4429 bufsize *= 4;
4430 goto retry;
4431 }
4432
4433 buf[ret] = '\0';
4434 ret = -EIO;
4435 s = strstr(buf, "\nGroups:");
4436 if (s == NULL)
4437 goto out_free;
4438
4439 s += 8;
4440 ret = 0;
4441 while (1) {
4442 char *end;
4443 unsigned long val = strtoul(s, &end, 0);
4444 if (end == s)
4445 break;
4446
4447 s = end;
4448 if (ret < size)
4449 list[ret] = val;
4450 ret++;
4451 }
4452
4453out_free:
4454 free(buf);
4455 return ret;
4456}
4457#else /* linux */
4458/*
4459 * This is currently not implemented on other than Linux...
4460 */
4461int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
4462{
4463 (void) req; (void) size; (void) list;
4464 return -ENOSYS;
4465}
4466#endif
4467
4468/* Prevent spurious data race warning - we don't care
4469 * about races for this flag */
4470__attribute__((no_sanitize_thread))
4471void fuse_session_exit(struct fuse_session *se)
4472{
4473 atomic_store_explicit(&se->mt_exited, 1, memory_order_relaxed);
4474 sem_post(&se->mt_finish);
4475}
4476
4477__attribute__((no_sanitize_thread))
4478void fuse_session_reset(struct fuse_session *se)
4479{
4480 se->mt_exited = false;
4481 se->error = 0;
4482}
4483
4484__attribute__((no_sanitize_thread))
4485int fuse_session_exited(struct fuse_session *se)
4486{
4487 bool exited =
4488 atomic_load_explicit(&se->mt_exited, memory_order_relaxed);
4489
4490 return exited ? 1 : 0;
4491}
#define FUSE_CAP_IOCTL_DIR
#define FUSE_CAP_DONT_MASK
void fuse_unset_feature_flag(struct fuse_conn_info *conn, uint64_t flag)
int fuse_convert_to_conn_want_ext(struct fuse_conn_info *conn)
bool fuse_set_feature_flag(struct fuse_conn_info *conn, uint64_t flag)
#define FUSE_CAP_HANDLE_KILLPRIV
#define FUSE_CAP_AUTO_INVAL_DATA
#define FUSE_CAP_HANDLE_KILLPRIV_V2
#define FUSE_CAP_SPLICE_READ
#define FUSE_CAP_PARALLEL_DIROPS
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition buffer.c:22
#define FUSE_CAP_WRITEBACK_CACHE
#define FUSE_CAP_EXPIRE_ONLY
#define FUSE_CAP_ATOMIC_O_TRUNC
#define FUSE_CAP_ASYNC_READ
#define FUSE_CAP_SPLICE_WRITE
#define FUSE_CAP_CACHE_SYMLINKS
#define FUSE_CAP_POSIX_ACL
@ FUSE_BUF_IS_FD
#define FUSE_CAP_EXPORT_SUPPORT
#define FUSE_CAP_POSIX_LOCKS
#define FUSE_CAP_EXPLICIT_INVAL_DATA
#define FUSE_CAP_READDIRPLUS_AUTO
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition buffer.c:284
#define FUSE_CAP_NO_OPENDIR_SUPPORT
#define FUSE_CAP_ASYNC_DIO
bool fuse_get_feature_flag(struct fuse_conn_info *conn, uint64_t flag)
#define FUSE_CAP_PASSTHROUGH
#define FUSE_CAP_DIRECT_IO_ALLOW_MMAP
#define FUSE_CAP_NO_OPEN_SUPPORT
#define FUSE_CAP_READDIRPLUS
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
fuse_buf_copy_flags
@ FUSE_BUF_SPLICE_NONBLOCK
@ FUSE_BUF_FORCE_SPLICE
@ FUSE_BUF_NO_SPLICE
@ FUSE_BUF_SPLICE_MOVE
#define FUSE_CAP_SETXATTR_EXT
#define FUSE_CAP_SPLICE_MOVE
#define FUSE_CAP_NO_EXPORT_SUPPORT
#define FUSE_CAP_FLOCK_LOCKS
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition fuse_log.c:77
void fuse_session_destroy(struct fuse_session *se)
fuse_notify_entry_flags
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
void * fuse_req_userdata(fuse_req_t req)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
int fuse_session_exited(struct fuse_session *se)
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_reply_none(fuse_req_t req)
int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
void fuse_session_reset(struct fuse_session *se)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
void fuse_lowlevel_version(void)
uint64_t fuse_ino_t
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_passthrough_open(fuse_req_t req, int fd)
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition fuse_opt.c:55
void fuse_opt_free_args(struct fuse_args *args)
Definition fuse_opt.c:34
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition fuse_opt.c:398
#define FUSE_OPT_END
Definition fuse_opt.h:104
int fuse_convert_to_conn_want_ext(struct fuse_conn_info *conn)
@ FUSE_BUF_IS_FD
#define FUSE_CAP_OVER_IO_URING
bool fuse_req_is_uring(fuse_req_t req)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
int fuse_session_fd(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
void fuse_session_unmount(struct fuse_session *se)
int fuse_req_get_payload(fuse_req_t req, char **payload, size_t *payload_sz, void **mr)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
int fuse_lowlevel_notify_increment_epoch(struct fuse_session *se)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
int fuse_reply_statx(fuse_req_t req, int flags, struct statx *statx, double attr_timeout)
char ** argv
Definition fuse_opt.h:114
enum fuse_buf_flags flags
size_t mem_size
void * mem
size_t size
struct fuse_buf buf[1]
uint64_t capable_ext
uint64_t want_ext
double entry_timeout
fuse_ino_t ino
uint64_t generation
double attr_timeout
struct stat attr
uint64_t lock_owner
uint32_t writepage
Definition fuse_common.h:66
uint32_t poll_events
uint32_t cache_readdir
Definition fuse_common.h:95
uint32_t nonseekable
Definition fuse_common.h:84
int32_t backing_id
uint32_t parallel_direct_writes
uint32_t noflush
Definition fuse_common.h:99
uint32_t flush
Definition fuse_common.h:80
uint32_t direct_io
Definition fuse_common.h:69
uint32_t keep_cache
Definition fuse_common.h:75