libfuse
fuse_lowlevel.c
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4
5 Implementation of (most of) the low-level FUSE API. The session loop
6 functions are implemented in separate files.
7
8 This program can be distributed under the terms of the GNU LGPLv2.
9 See the file COPYING.LIB
10*/
11
12#define _GNU_SOURCE
13
14#include "fuse_config.h"
15#include "fuse_i.h"
16#include "fuse_kernel.h"
17#include "fuse_opt.h"
18#include "fuse_misc.h"
19#include "mount_util.h"
20#include "util.h"
21
22#include <stdint.h>
23#include <stdbool.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <stddef.h>
27#include <stdalign.h>
28#include <string.h>
29#include <unistd.h>
30#include <limits.h>
31#include <errno.h>
32#include <assert.h>
33#include <sys/file.h>
34#include <sys/ioctl.h>
35
36#ifndef F_LINUX_SPECIFIC_BASE
37#define F_LINUX_SPECIFIC_BASE 1024
38#endif
39#ifndef F_SETPIPE_SZ
40#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
41#endif
42
43
44#define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
45#define OFFSET_MAX 0x7fffffffffffffffLL
46
47struct fuse_pollhandle {
48 uint64_t kh;
49 struct fuse_session *se;
50};
51
52static size_t pagesize;
53
54static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
55{
56 pagesize = getpagesize();
57}
58
59static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
60{
61 attr->ino = stbuf->st_ino;
62 attr->mode = stbuf->st_mode;
63 attr->nlink = stbuf->st_nlink;
64 attr->uid = stbuf->st_uid;
65 attr->gid = stbuf->st_gid;
66 attr->rdev = stbuf->st_rdev;
67 attr->size = stbuf->st_size;
68 attr->blksize = stbuf->st_blksize;
69 attr->blocks = stbuf->st_blocks;
70 attr->atime = stbuf->st_atime;
71 attr->mtime = stbuf->st_mtime;
72 attr->ctime = stbuf->st_ctime;
73 attr->atimensec = ST_ATIM_NSEC(stbuf);
74 attr->mtimensec = ST_MTIM_NSEC(stbuf);
75 attr->ctimensec = ST_CTIM_NSEC(stbuf);
76}
77
78static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
79{
80 stbuf->st_mode = attr->mode;
81 stbuf->st_uid = attr->uid;
82 stbuf->st_gid = attr->gid;
83 stbuf->st_size = attr->size;
84 stbuf->st_atime = attr->atime;
85 stbuf->st_mtime = attr->mtime;
86 stbuf->st_ctime = attr->ctime;
87 ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
88 ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
89 ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
90}
91
92static size_t iov_length(const struct iovec *iov, size_t count)
93{
94 size_t seg;
95 size_t ret = 0;
96
97 for (seg = 0; seg < count; seg++)
98 ret += iov[seg].iov_len;
99 return ret;
100}
101
102static void list_init_req(struct fuse_req *req)
103{
104 req->next = req;
105 req->prev = req;
106}
107
108static void list_del_req(struct fuse_req *req)
109{
110 struct fuse_req *prev = req->prev;
111 struct fuse_req *next = req->next;
112 prev->next = next;
113 next->prev = prev;
114}
115
116static void list_add_req(struct fuse_req *req, struct fuse_req *next)
117{
118 struct fuse_req *prev = next->prev;
119 req->next = next;
120 req->prev = prev;
121 prev->next = req;
122 next->prev = req;
123}
124
125static void destroy_req(fuse_req_t req)
126{
127 assert(req->ch == NULL);
128 pthread_mutex_destroy(&req->lock);
129 free(req);
130}
131
132void fuse_free_req(fuse_req_t req)
133{
134 int ctr;
135 struct fuse_session *se = req->se;
136
137 if (se->conn.no_interrupt) {
138 ctr = --req->ref_cnt;
139 fuse_chan_put(req->ch);
140 req->ch = NULL;
141 } else {
142 pthread_mutex_lock(&se->lock);
143 req->u.ni.func = NULL;
144 req->u.ni.data = NULL;
145 list_del_req(req);
146 ctr = --req->ref_cnt;
147 fuse_chan_put(req->ch);
148 req->ch = NULL;
149 pthread_mutex_unlock(&se->lock);
150 }
151 if (!ctr)
152 destroy_req(req);
153}
154
155static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
156{
157 struct fuse_req *req;
158
159 req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
160 if (req == NULL) {
161 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
162 } else {
163 req->se = se;
164 req->ref_cnt = 1;
165 list_init_req(req);
166 pthread_mutex_init(&req->lock, NULL);
167 }
168
169 return req;
170}
171
172/* Send data. If *ch* is NULL, send via session master fd */
173static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
174 struct iovec *iov, int count)
175{
176 struct fuse_out_header *out = iov[0].iov_base;
177
178 assert(se != NULL);
179 out->len = iov_length(iov, count);
180 if (se->debug) {
181 if (out->unique == 0) {
182 fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
183 out->error, out->len);
184 } else if (out->error) {
185 fuse_log(FUSE_LOG_DEBUG,
186 " unique: %llu, error: %i (%s), outsize: %i\n",
187 (unsigned long long) out->unique, out->error,
188 strerror(-out->error), out->len);
189 } else {
190 fuse_log(FUSE_LOG_DEBUG,
191 " unique: %llu, success, outsize: %i\n",
192 (unsigned long long) out->unique, out->len);
193 }
194 }
195
196 ssize_t res;
197 if (se->io != NULL)
198 /* se->io->writev is never NULL if se->io is not NULL as
199 specified by fuse_session_custom_io()*/
200 res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
201 se->userdata);
202 else
203 res = writev(ch ? ch->fd : se->fd, iov, count);
204
205 int err = errno;
206
207 if (res == -1) {
208 /* ENOENT means the operation was interrupted */
209 if (!fuse_session_exited(se) && err != ENOENT)
210 perror("fuse: writing device");
211 return -err;
212 }
213
214 return 0;
215}
216
217
218int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
219 int count)
220{
221 struct fuse_out_header out;
222
223#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
224 const char *str = strerrordesc_np(error * -1);
225 if ((str == NULL && error != 0) || error > 0) {
226#else
227 if (error <= -1000 || error > 0) {
228#endif
229 fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
230 error = -ERANGE;
231 }
232
233 out.unique = req->unique;
234 out.error = error;
235
236 iov[0].iov_base = &out;
237 iov[0].iov_len = sizeof(struct fuse_out_header);
238
239 return fuse_send_msg(req->se, req->ch, iov, count);
240}
241
242static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
243 int count)
244{
245 int res;
246
247 res = fuse_send_reply_iov_nofree(req, error, iov, count);
248 fuse_free_req(req);
249 return res;
250}
251
252static int send_reply(fuse_req_t req, int error, const void *arg,
253 size_t argsize)
254{
255 struct iovec iov[2];
256 int count = 1;
257 if (argsize) {
258 iov[1].iov_base = (void *) arg;
259 iov[1].iov_len = argsize;
260 count++;
261 }
262 return send_reply_iov(req, error, iov, count);
263}
264
265int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
266{
267 int res;
268 struct iovec *padded_iov;
269
270 padded_iov = malloc((count + 1) * sizeof(struct iovec));
271 if (padded_iov == NULL)
272 return fuse_reply_err(req, ENOMEM);
273
274 memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
275 count++;
276
277 res = send_reply_iov(req, 0, padded_iov, count);
278 free(padded_iov);
279
280 return res;
281}
282
283
284/* `buf` is allowed to be empty so that the proper size may be
285 allocated by the caller */
286size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
287 const char *name, const struct stat *stbuf, off_t off)
288{
289 (void)req;
290 size_t namelen;
291 size_t entlen;
292 size_t entlen_padded;
293 struct fuse_dirent *dirent;
294
295 namelen = strlen(name);
296 entlen = FUSE_NAME_OFFSET + namelen;
297 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
298
299 if ((buf == NULL) || (entlen_padded > bufsize))
300 return entlen_padded;
301
302 dirent = (struct fuse_dirent*) buf;
303 dirent->ino = stbuf->st_ino;
304 dirent->off = off;
305 dirent->namelen = namelen;
306 dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
307 memcpy(dirent->name, name, namelen);
308 memset(dirent->name + namelen, 0, entlen_padded - entlen);
309
310 return entlen_padded;
311}
312
313static void convert_statfs(const struct statvfs *stbuf,
314 struct fuse_kstatfs *kstatfs)
315{
316 kstatfs->bsize = stbuf->f_bsize;
317 kstatfs->frsize = stbuf->f_frsize;
318 kstatfs->blocks = stbuf->f_blocks;
319 kstatfs->bfree = stbuf->f_bfree;
320 kstatfs->bavail = stbuf->f_bavail;
321 kstatfs->files = stbuf->f_files;
322 kstatfs->ffree = stbuf->f_ffree;
323 kstatfs->namelen = stbuf->f_namemax;
324}
325
326static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
327{
328 return send_reply(req, 0, arg, argsize);
329}
330
331int fuse_reply_err(fuse_req_t req, int err)
332{
333 return send_reply(req, -err, NULL, 0);
334}
335
337{
338 fuse_free_req(req);
339}
340
341static unsigned long calc_timeout_sec(double t)
342{
343 if (t > (double) ULONG_MAX)
344 return ULONG_MAX;
345 else if (t < 0.0)
346 return 0;
347 else
348 return (unsigned long) t;
349}
350
351static unsigned int calc_timeout_nsec(double t)
352{
353 double f = t - (double) calc_timeout_sec(t);
354 if (f < 0.0)
355 return 0;
356 else if (f >= 0.999999999)
357 return 999999999;
358 else
359 return (unsigned int) (f * 1.0e9);
360}
361
362static void fill_entry(struct fuse_entry_out *arg,
363 const struct fuse_entry_param *e)
364{
365 arg->nodeid = e->ino;
366 arg->generation = e->generation;
367 arg->entry_valid = calc_timeout_sec(e->entry_timeout);
368 arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
369 arg->attr_valid = calc_timeout_sec(e->attr_timeout);
370 arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
371 convert_stat(&e->attr, &arg->attr);
372}
373
374/* `buf` is allowed to be empty so that the proper size may be
375 allocated by the caller */
376size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
377 const char *name,
378 const struct fuse_entry_param *e, off_t off)
379{
380 (void)req;
381 size_t namelen;
382 size_t entlen;
383 size_t entlen_padded;
384
385 namelen = strlen(name);
386 entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
387 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
388 if ((buf == NULL) || (entlen_padded > bufsize))
389 return entlen_padded;
390
391 struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
392 memset(&dp->entry_out, 0, sizeof(dp->entry_out));
393 fill_entry(&dp->entry_out, e);
394
395 struct fuse_dirent *dirent = &dp->dirent;
396 dirent->ino = e->attr.st_ino;
397 dirent->off = off;
398 dirent->namelen = namelen;
399 dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
400 memcpy(dirent->name, name, namelen);
401 memset(dirent->name + namelen, 0, entlen_padded - entlen);
402
403 return entlen_padded;
404}
405
406static void fill_open(struct fuse_open_out *arg,
407 const struct fuse_file_info *f)
408{
409 arg->fh = f->fh;
410 if (f->backing_id > 0) {
411 arg->backing_id = f->backing_id;
412 arg->open_flags |= FOPEN_PASSTHROUGH;
413 }
414 if (f->direct_io)
415 arg->open_flags |= FOPEN_DIRECT_IO;
416 if (f->keep_cache)
417 arg->open_flags |= FOPEN_KEEP_CACHE;
418 if (f->cache_readdir)
419 arg->open_flags |= FOPEN_CACHE_DIR;
420 if (f->nonseekable)
421 arg->open_flags |= FOPEN_NONSEEKABLE;
422 if (f->noflush)
423 arg->open_flags |= FOPEN_NOFLUSH;
425 arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES;
426}
427
429{
430 struct fuse_entry_out arg;
431 size_t size = req->se->conn.proto_minor < 9 ?
432 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
433
434 /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
435 negative entry */
436 if (!e->ino && req->se->conn.proto_minor < 4)
437 return fuse_reply_err(req, ENOENT);
438
439 memset(&arg, 0, sizeof(arg));
440 fill_entry(&arg, e);
441 return send_reply_ok(req, &arg, size);
442}
443
445 const struct fuse_file_info *f)
446{
447 alignas(uint64_t) char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
448 size_t entrysize = req->se->conn.proto_minor < 9 ?
449 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
450 struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
451 struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
452
453 memset(buf, 0, sizeof(buf));
454 fill_entry(earg, e);
455 fill_open(oarg, f);
456 return send_reply_ok(req, buf,
457 entrysize + sizeof(struct fuse_open_out));
458}
459
460int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
461 double attr_timeout)
462{
463 struct fuse_attr_out arg;
464 size_t size = req->se->conn.proto_minor < 9 ?
465 FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
466
467 memset(&arg, 0, sizeof(arg));
468 arg.attr_valid = calc_timeout_sec(attr_timeout);
469 arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
470 convert_stat(attr, &arg.attr);
471
472 return send_reply_ok(req, &arg, size);
473}
474
475int fuse_reply_readlink(fuse_req_t req, const char *linkname)
476{
477 return send_reply_ok(req, linkname, strlen(linkname));
478}
479
481{
482 struct fuse_backing_map map = { .fd = fd };
483 int ret;
484
485 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_OPEN, &map);
486 if (ret <= 0) {
487 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_open: %s\n", strerror(errno));
488 return 0;
489 }
490
491 return ret;
492}
493
494int fuse_passthrough_close(fuse_req_t req, int backing_id)
495{
496 int ret;
497
498 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_CLOSE, &backing_id);
499 if (ret < 0)
500 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_close: %s\n", strerror(errno));
501
502 return ret;
503}
504
506{
507 struct fuse_open_out arg;
508
509 memset(&arg, 0, sizeof(arg));
510 fill_open(&arg, f);
511 return send_reply_ok(req, &arg, sizeof(arg));
512}
513
514int fuse_reply_write(fuse_req_t req, size_t count)
515{
516 struct fuse_write_out arg;
517
518 memset(&arg, 0, sizeof(arg));
519 arg.size = count;
520
521 return send_reply_ok(req, &arg, sizeof(arg));
522}
523
524int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
525{
526 return send_reply_ok(req, buf, size);
527}
528
529static int fuse_send_data_iov_fallback(struct fuse_session *se,
530 struct fuse_chan *ch,
531 struct iovec *iov, int iov_count,
532 struct fuse_bufvec *buf,
533 size_t len)
534{
535 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
536 void *mbuf;
537 int res;
538
539 /* Optimize common case */
540 if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
541 !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
542 /* FIXME: also avoid memory copy if there are multiple buffers
543 but none of them contain an fd */
544
545 iov[iov_count].iov_base = buf->buf[0].mem;
546 iov[iov_count].iov_len = len;
547 iov_count++;
548 return fuse_send_msg(se, ch, iov, iov_count);
549 }
550
551 res = posix_memalign(&mbuf, pagesize, len);
552 if (res != 0)
553 return res;
554
555 mem_buf.buf[0].mem = mbuf;
556 res = fuse_buf_copy(&mem_buf, buf, 0);
557 if (res < 0) {
558 free(mbuf);
559 return -res;
560 }
561 len = res;
562
563 iov[iov_count].iov_base = mbuf;
564 iov[iov_count].iov_len = len;
565 iov_count++;
566 res = fuse_send_msg(se, ch, iov, iov_count);
567 free(mbuf);
568
569 return res;
570}
571
572struct fuse_ll_pipe {
573 size_t size;
574 int can_grow;
575 int pipe[2];
576};
577
578static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
579{
580 close(llp->pipe[0]);
581 close(llp->pipe[1]);
582 free(llp);
583}
584
585#ifdef HAVE_SPLICE
586#if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
587static int fuse_pipe(int fds[2])
588{
589 int rv = pipe(fds);
590
591 if (rv == -1)
592 return rv;
593
594 if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
595 fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
596 fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
597 fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
598 close(fds[0]);
599 close(fds[1]);
600 rv = -1;
601 }
602 return rv;
603}
604#else
605static int fuse_pipe(int fds[2])
606{
607 return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
608}
609#endif
610
611static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
612{
613 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
614 if (llp == NULL) {
615 int res;
616
617 llp = malloc(sizeof(struct fuse_ll_pipe));
618 if (llp == NULL)
619 return NULL;
620
621 res = fuse_pipe(llp->pipe);
622 if (res == -1) {
623 free(llp);
624 return NULL;
625 }
626
627 /*
628 *the default size is 16 pages on linux
629 */
630 llp->size = pagesize * 16;
631 llp->can_grow = 1;
632
633 pthread_setspecific(se->pipe_key, llp);
634 }
635
636 return llp;
637}
638#endif
639
640static void fuse_ll_clear_pipe(struct fuse_session *se)
641{
642 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
643 if (llp) {
644 pthread_setspecific(se->pipe_key, NULL);
645 fuse_ll_pipe_free(llp);
646 }
647}
648
649#if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
650static int read_back(int fd, char *buf, size_t len)
651{
652 int res;
653
654 res = read(fd, buf, len);
655 if (res == -1) {
656 fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
657 return -EIO;
658 }
659 if (res != len) {
660 fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
661 return -EIO;
662 }
663 return 0;
664}
665
666static int grow_pipe_to_max(int pipefd)
667{
668 int res;
669 long max;
670 long maxfd;
671 char buf[32];
672
673 maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
674 if (maxfd < 0)
675 return -errno;
676
677 res = read(maxfd, buf, sizeof(buf) - 1);
678 if (res < 0) {
679 int saved_errno;
680
681 saved_errno = errno;
682 close(maxfd);
683 return -saved_errno;
684 }
685 close(maxfd);
686 buf[res] = '\0';
687
688 res = libfuse_strtol(buf, &max);
689 if (res)
690 return res;
691 res = fcntl(pipefd, F_SETPIPE_SZ, max);
692 if (res < 0)
693 return -errno;
694 return max;
695}
696
697static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
698 struct iovec *iov, int iov_count,
699 struct fuse_bufvec *buf, unsigned int flags)
700{
701 int res;
702 size_t len = fuse_buf_size(buf);
703 struct fuse_out_header *out = iov[0].iov_base;
704 struct fuse_ll_pipe *llp;
705 int splice_flags;
706 size_t pipesize;
707 size_t total_buf_size;
708 size_t idx;
709 size_t headerlen;
710 struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
711
712 if (se->broken_splice_nonblock)
713 goto fallback;
714
715 if (flags & FUSE_BUF_NO_SPLICE)
716 goto fallback;
717
718 total_buf_size = 0;
719 for (idx = buf->idx; idx < buf->count; idx++) {
720 total_buf_size += buf->buf[idx].size;
721 if (idx == buf->idx)
722 total_buf_size -= buf->off;
723 }
724 if (total_buf_size < 2 * pagesize)
725 goto fallback;
726
727 if (se->conn.proto_minor < 14 ||
728 !(se->conn.want_ext & FUSE_CAP_SPLICE_WRITE))
729 goto fallback;
730
731 llp = fuse_ll_get_pipe(se);
732 if (llp == NULL)
733 goto fallback;
734
735
736 headerlen = iov_length(iov, iov_count);
737
738 out->len = headerlen + len;
739
740 /*
741 * Heuristic for the required pipe size, does not work if the
742 * source contains less than page size fragments
743 */
744 pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
745
746 if (llp->size < pipesize) {
747 if (llp->can_grow) {
748 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
749 if (res == -1) {
750 res = grow_pipe_to_max(llp->pipe[0]);
751 if (res > 0)
752 llp->size = res;
753 llp->can_grow = 0;
754 goto fallback;
755 }
756 llp->size = res;
757 }
758 if (llp->size < pipesize)
759 goto fallback;
760 }
761
762
763 res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
764 if (res == -1)
765 goto fallback;
766
767 if (res != headerlen) {
768 res = -EIO;
769 fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
770 headerlen);
771 goto clear_pipe;
772 }
773
774 pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
775 pipe_buf.buf[0].fd = llp->pipe[1];
776
777 res = fuse_buf_copy(&pipe_buf, buf,
779 if (res < 0) {
780 if (res == -EAGAIN || res == -EINVAL) {
781 /*
782 * Should only get EAGAIN on kernels with
783 * broken SPLICE_F_NONBLOCK support (<=
784 * 2.6.35) where this error or a short read is
785 * returned even if the pipe itself is not
786 * full
787 *
788 * EINVAL might mean that splice can't handle
789 * this combination of input and output.
790 */
791 if (res == -EAGAIN)
792 se->broken_splice_nonblock = 1;
793
794 pthread_setspecific(se->pipe_key, NULL);
795 fuse_ll_pipe_free(llp);
796 goto fallback;
797 }
798 res = -res;
799 goto clear_pipe;
800 }
801
802 if (res != 0 && res < len) {
803 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
804 void *mbuf;
805 size_t now_len = res;
806 /*
807 * For regular files a short count is either
808 * 1) due to EOF, or
809 * 2) because of broken SPLICE_F_NONBLOCK (see above)
810 *
811 * For other inputs it's possible that we overflowed
812 * the pipe because of small buffer fragments.
813 */
814
815 res = posix_memalign(&mbuf, pagesize, len);
816 if (res != 0)
817 goto clear_pipe;
818
819 mem_buf.buf[0].mem = mbuf;
820 mem_buf.off = now_len;
821 res = fuse_buf_copy(&mem_buf, buf, 0);
822 if (res > 0) {
823 char *tmpbuf;
824 size_t extra_len = res;
825 /*
826 * Trickiest case: got more data. Need to get
827 * back the data from the pipe and then fall
828 * back to regular write.
829 */
830 tmpbuf = malloc(headerlen);
831 if (tmpbuf == NULL) {
832 free(mbuf);
833 res = ENOMEM;
834 goto clear_pipe;
835 }
836 res = read_back(llp->pipe[0], tmpbuf, headerlen);
837 free(tmpbuf);
838 if (res != 0) {
839 free(mbuf);
840 goto clear_pipe;
841 }
842 res = read_back(llp->pipe[0], mbuf, now_len);
843 if (res != 0) {
844 free(mbuf);
845 goto clear_pipe;
846 }
847 len = now_len + extra_len;
848 iov[iov_count].iov_base = mbuf;
849 iov[iov_count].iov_len = len;
850 iov_count++;
851 res = fuse_send_msg(se, ch, iov, iov_count);
852 free(mbuf);
853 return res;
854 }
855 free(mbuf);
856 res = now_len;
857 }
858 len = res;
859 out->len = headerlen + len;
860
861 if (se->debug) {
862 fuse_log(FUSE_LOG_DEBUG,
863 " unique: %llu, success, outsize: %i (splice)\n",
864 (unsigned long long) out->unique, out->len);
865 }
866
867 splice_flags = 0;
868 if ((flags & FUSE_BUF_SPLICE_MOVE) &&
869 (se->conn.want_ext & FUSE_CAP_SPLICE_MOVE))
870 splice_flags |= SPLICE_F_MOVE;
871
872 if (se->io != NULL && se->io->splice_send != NULL) {
873 res = se->io->splice_send(llp->pipe[0], NULL,
874 ch ? ch->fd : se->fd, NULL, out->len,
875 splice_flags, se->userdata);
876 } else {
877 res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
878 out->len, splice_flags);
879 }
880 if (res == -1) {
881 res = -errno;
882 perror("fuse: splice from pipe");
883 goto clear_pipe;
884 }
885 if (res != out->len) {
886 res = -EIO;
887 fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
888 res, out->len);
889 goto clear_pipe;
890 }
891 return 0;
892
893clear_pipe:
894 fuse_ll_clear_pipe(se);
895 return res;
896
897fallback:
898 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
899}
900#else
901static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
902 struct iovec *iov, int iov_count,
903 struct fuse_bufvec *buf, unsigned int flags)
904{
905 size_t len = fuse_buf_size(buf);
906 (void) flags;
907
908 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
909}
910#endif
911
913 enum fuse_buf_copy_flags flags)
914{
915 struct iovec iov[2];
916 struct fuse_out_header out;
917 int res;
918
919 iov[0].iov_base = &out;
920 iov[0].iov_len = sizeof(struct fuse_out_header);
921
922 out.unique = req->unique;
923 out.error = 0;
924
925 res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
926 if (res <= 0) {
927 fuse_free_req(req);
928 return res;
929 } else {
930 return fuse_reply_err(req, res);
931 }
932}
933
934int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
935{
936 struct fuse_statfs_out arg;
937 size_t size = req->se->conn.proto_minor < 4 ?
938 FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
939
940 memset(&arg, 0, sizeof(arg));
941 convert_statfs(stbuf, &arg.st);
942
943 return send_reply_ok(req, &arg, size);
944}
945
946int fuse_reply_xattr(fuse_req_t req, size_t count)
947{
948 struct fuse_getxattr_out arg;
949
950 memset(&arg, 0, sizeof(arg));
951 arg.size = count;
952
953 return send_reply_ok(req, &arg, sizeof(arg));
954}
955
956int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
957{
958 struct fuse_lk_out arg;
959
960 memset(&arg, 0, sizeof(arg));
961 arg.lk.type = lock->l_type;
962 if (lock->l_type != F_UNLCK) {
963 arg.lk.start = lock->l_start;
964 if (lock->l_len == 0)
965 arg.lk.end = OFFSET_MAX;
966 else
967 arg.lk.end = lock->l_start + lock->l_len - 1;
968 }
969 arg.lk.pid = lock->l_pid;
970 return send_reply_ok(req, &arg, sizeof(arg));
971}
972
973int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
974{
975 struct fuse_bmap_out arg;
976
977 memset(&arg, 0, sizeof(arg));
978 arg.block = idx;
979
980 return send_reply_ok(req, &arg, sizeof(arg));
981}
982
983static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
984 size_t count)
985{
986 struct fuse_ioctl_iovec *fiov;
987 size_t i;
988
989 fiov = malloc(sizeof(fiov[0]) * count);
990 if (!fiov)
991 return NULL;
992
993 for (i = 0; i < count; i++) {
994 fiov[i].base = (uintptr_t) iov[i].iov_base;
995 fiov[i].len = iov[i].iov_len;
996 }
997
998 return fiov;
999}
1000
1002 const struct iovec *in_iov, size_t in_count,
1003 const struct iovec *out_iov, size_t out_count)
1004{
1005 struct fuse_ioctl_out arg;
1006 struct fuse_ioctl_iovec *in_fiov = NULL;
1007 struct fuse_ioctl_iovec *out_fiov = NULL;
1008 struct iovec iov[4];
1009 size_t count = 1;
1010 int res;
1011
1012 memset(&arg, 0, sizeof(arg));
1013 arg.flags |= FUSE_IOCTL_RETRY;
1014 arg.in_iovs = in_count;
1015 arg.out_iovs = out_count;
1016 iov[count].iov_base = &arg;
1017 iov[count].iov_len = sizeof(arg);
1018 count++;
1019
1020 if (req->se->conn.proto_minor < 16) {
1021 if (in_count) {
1022 iov[count].iov_base = (void *)in_iov;
1023 iov[count].iov_len = sizeof(in_iov[0]) * in_count;
1024 count++;
1025 }
1026
1027 if (out_count) {
1028 iov[count].iov_base = (void *)out_iov;
1029 iov[count].iov_len = sizeof(out_iov[0]) * out_count;
1030 count++;
1031 }
1032 } else {
1033 /* Can't handle non-compat 64bit ioctls on 32bit */
1034 if (sizeof(void *) == 4 && req->ioctl_64bit) {
1035 res = fuse_reply_err(req, EINVAL);
1036 goto out;
1037 }
1038
1039 if (in_count) {
1040 in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
1041 if (!in_fiov)
1042 goto enomem;
1043
1044 iov[count].iov_base = (void *)in_fiov;
1045 iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1046 count++;
1047 }
1048 if (out_count) {
1049 out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1050 if (!out_fiov)
1051 goto enomem;
1052
1053 iov[count].iov_base = (void *)out_fiov;
1054 iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1055 count++;
1056 }
1057 }
1058
1059 res = send_reply_iov(req, 0, iov, count);
1060out:
1061 free(in_fiov);
1062 free(out_fiov);
1063
1064 return res;
1065
1066enomem:
1067 res = fuse_reply_err(req, ENOMEM);
1068 goto out;
1069}
1070
1071int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1072{
1073 struct fuse_ioctl_out arg;
1074 struct iovec iov[3];
1075 size_t count = 1;
1076
1077 memset(&arg, 0, sizeof(arg));
1078 arg.result = result;
1079 iov[count].iov_base = &arg;
1080 iov[count].iov_len = sizeof(arg);
1081 count++;
1082
1083 if (size) {
1084 iov[count].iov_base = (char *) buf;
1085 iov[count].iov_len = size;
1086 count++;
1087 }
1088
1089 return send_reply_iov(req, 0, iov, count);
1090}
1091
1092int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1093 int count)
1094{
1095 struct iovec *padded_iov;
1096 struct fuse_ioctl_out arg;
1097 int res;
1098
1099 padded_iov = malloc((count + 2) * sizeof(struct iovec));
1100 if (padded_iov == NULL)
1101 return fuse_reply_err(req, ENOMEM);
1102
1103 memset(&arg, 0, sizeof(arg));
1104 arg.result = result;
1105 padded_iov[1].iov_base = &arg;
1106 padded_iov[1].iov_len = sizeof(arg);
1107
1108 memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1109
1110 res = send_reply_iov(req, 0, padded_iov, count + 2);
1111 free(padded_iov);
1112
1113 return res;
1114}
1115
1116int fuse_reply_poll(fuse_req_t req, unsigned revents)
1117{
1118 struct fuse_poll_out arg;
1119
1120 memset(&arg, 0, sizeof(arg));
1121 arg.revents = revents;
1122
1123 return send_reply_ok(req, &arg, sizeof(arg));
1124}
1125
1126int fuse_reply_lseek(fuse_req_t req, off_t off)
1127{
1128 struct fuse_lseek_out arg;
1129
1130 memset(&arg, 0, sizeof(arg));
1131 arg.offset = off;
1132
1133 return send_reply_ok(req, &arg, sizeof(arg));
1134}
1135
1136static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1137{
1138 char *name = (char *) inarg;
1139
1140 if (req->se->op.lookup)
1141 req->se->op.lookup(req, nodeid, name);
1142 else
1143 fuse_reply_err(req, ENOSYS);
1144}
1145
1146static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1147{
1148 struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1149
1150 if (req->se->op.forget)
1151 req->se->op.forget(req, nodeid, arg->nlookup);
1152 else
1153 fuse_reply_none(req);
1154}
1155
1156static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1157 const void *inarg)
1158{
1159 struct fuse_batch_forget_in *arg = (void *) inarg;
1160 struct fuse_forget_one *param = (void *) PARAM(arg);
1161 unsigned int i;
1162
1163 (void) nodeid;
1164
1165 if (req->se->op.forget_multi) {
1166 req->se->op.forget_multi(req, arg->count,
1167 (struct fuse_forget_data *) param);
1168 } else if (req->se->op.forget) {
1169 for (i = 0; i < arg->count; i++) {
1170 struct fuse_forget_one *forget = &param[i];
1171 struct fuse_req *dummy_req;
1172
1173 dummy_req = fuse_ll_alloc_req(req->se);
1174 if (dummy_req == NULL)
1175 break;
1176
1177 dummy_req->unique = req->unique;
1178 dummy_req->ctx = req->ctx;
1179 dummy_req->ch = NULL;
1180
1181 req->se->op.forget(dummy_req, forget->nodeid,
1182 forget->nlookup);
1183 }
1184 fuse_reply_none(req);
1185 } else {
1186 fuse_reply_none(req);
1187 }
1188}
1189
1190static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1191{
1192 struct fuse_file_info *fip = NULL;
1193 struct fuse_file_info fi;
1194
1195 if (req->se->conn.proto_minor >= 9) {
1196 struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1197
1198 if (arg->getattr_flags & FUSE_GETATTR_FH) {
1199 memset(&fi, 0, sizeof(fi));
1200 fi.fh = arg->fh;
1201 fip = &fi;
1202 }
1203 }
1204
1205 if (req->se->op.getattr)
1206 req->se->op.getattr(req, nodeid, fip);
1207 else
1208 fuse_reply_err(req, ENOSYS);
1209}
1210
1211static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1212{
1213 struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1214
1215 if (req->se->op.setattr) {
1216 struct fuse_file_info *fi = NULL;
1217 struct fuse_file_info fi_store;
1218 struct stat stbuf;
1219 memset(&stbuf, 0, sizeof(stbuf));
1220 convert_attr(arg, &stbuf);
1221 if (arg->valid & FATTR_FH) {
1222 arg->valid &= ~FATTR_FH;
1223 memset(&fi_store, 0, sizeof(fi_store));
1224 fi = &fi_store;
1225 fi->fh = arg->fh;
1226 }
1227 arg->valid &=
1228 FUSE_SET_ATTR_MODE |
1229 FUSE_SET_ATTR_UID |
1230 FUSE_SET_ATTR_GID |
1231 FUSE_SET_ATTR_SIZE |
1232 FUSE_SET_ATTR_ATIME |
1233 FUSE_SET_ATTR_MTIME |
1234 FUSE_SET_ATTR_KILL_SUID |
1235 FUSE_SET_ATTR_KILL_SGID |
1236 FUSE_SET_ATTR_ATIME_NOW |
1237 FUSE_SET_ATTR_MTIME_NOW |
1238 FUSE_SET_ATTR_CTIME;
1239
1240 req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1241 } else
1242 fuse_reply_err(req, ENOSYS);
1243}
1244
1245static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1246{
1247 struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1248
1249 if (req->se->op.access)
1250 req->se->op.access(req, nodeid, arg->mask);
1251 else
1252 fuse_reply_err(req, ENOSYS);
1253}
1254
1255static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1256{
1257 (void) inarg;
1258
1259 if (req->se->op.readlink)
1260 req->se->op.readlink(req, nodeid);
1261 else
1262 fuse_reply_err(req, ENOSYS);
1263}
1264
1265static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1266{
1267 struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1268 char *name = PARAM(arg);
1269
1270 if (req->se->conn.proto_minor >= 12)
1271 req->ctx.umask = arg->umask;
1272 else
1273 name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1274
1275 if (req->se->op.mknod)
1276 req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1277 else
1278 fuse_reply_err(req, ENOSYS);
1279}
1280
1281static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1282{
1283 struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1284
1285 if (req->se->conn.proto_minor >= 12)
1286 req->ctx.umask = arg->umask;
1287
1288 if (req->se->op.mkdir)
1289 req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1290 else
1291 fuse_reply_err(req, ENOSYS);
1292}
1293
1294static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1295{
1296 char *name = (char *) inarg;
1297
1298 if (req->se->op.unlink)
1299 req->se->op.unlink(req, nodeid, name);
1300 else
1301 fuse_reply_err(req, ENOSYS);
1302}
1303
1304static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1305{
1306 char *name = (char *) inarg;
1307
1308 if (req->se->op.rmdir)
1309 req->se->op.rmdir(req, nodeid, name);
1310 else
1311 fuse_reply_err(req, ENOSYS);
1312}
1313
1314static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1315{
1316 char *name = (char *) inarg;
1317 char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1318
1319 if (req->se->op.symlink)
1320 req->se->op.symlink(req, linkname, nodeid, name);
1321 else
1322 fuse_reply_err(req, ENOSYS);
1323}
1324
1325static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1326{
1327 struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1328 char *oldname = PARAM(arg);
1329 char *newname = oldname + strlen(oldname) + 1;
1330
1331 if (req->se->op.rename)
1332 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1333 0);
1334 else
1335 fuse_reply_err(req, ENOSYS);
1336}
1337
1338static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1339{
1340 struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1341 char *oldname = PARAM(arg);
1342 char *newname = oldname + strlen(oldname) + 1;
1343
1344 if (req->se->op.rename)
1345 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1346 arg->flags);
1347 else
1348 fuse_reply_err(req, ENOSYS);
1349}
1350
1351static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1352{
1353 struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1354
1355 if (req->se->op.link)
1356 req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1357 else
1358 fuse_reply_err(req, ENOSYS);
1359}
1360
1361static void do_tmpfile(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1362{
1363 struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1364
1365 if (req->se->op.tmpfile) {
1366 struct fuse_file_info fi;
1367
1368 memset(&fi, 0, sizeof(fi));
1369 fi.flags = arg->flags;
1370
1371 if (req->se->conn.proto_minor >= 12)
1372 req->ctx.umask = arg->umask;
1373
1374 req->se->op.tmpfile(req, nodeid, arg->mode, &fi);
1375 } else
1376 fuse_reply_err(req, ENOSYS);
1377}
1378
1379static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1380{
1381 struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1382
1383 if (req->se->op.create) {
1384 struct fuse_file_info fi;
1385 char *name = PARAM(arg);
1386
1387 memset(&fi, 0, sizeof(fi));
1388 fi.flags = arg->flags;
1389
1390 if (req->se->conn.proto_minor >= 12)
1391 req->ctx.umask = arg->umask;
1392 else
1393 name = (char *) inarg + sizeof(struct fuse_open_in);
1394
1395 req->se->op.create(req, nodeid, name, arg->mode, &fi);
1396 } else
1397 fuse_reply_err(req, ENOSYS);
1398}
1399
1400static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1401{
1402 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1403 struct fuse_file_info fi;
1404
1405 memset(&fi, 0, sizeof(fi));
1406 fi.flags = arg->flags;
1407
1408 if (req->se->op.open)
1409 req->se->op.open(req, nodeid, &fi);
1410 else if (req->se->conn.want_ext & FUSE_CAP_NO_OPEN_SUPPORT)
1411 fuse_reply_err(req, ENOSYS);
1412 else
1413 fuse_reply_open(req, &fi);
1414}
1415
1416static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1417{
1418 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1419
1420 if (req->se->op.read) {
1421 struct fuse_file_info fi;
1422
1423 memset(&fi, 0, sizeof(fi));
1424 fi.fh = arg->fh;
1425 if (req->se->conn.proto_minor >= 9) {
1426 fi.lock_owner = arg->lock_owner;
1427 fi.flags = arg->flags;
1428 }
1429 req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1430 } else
1431 fuse_reply_err(req, ENOSYS);
1432}
1433
1434static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1435{
1436 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1437 struct fuse_file_info fi;
1438 char *param;
1439
1440 memset(&fi, 0, sizeof(fi));
1441 fi.fh = arg->fh;
1442 fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1443
1444 if (req->se->conn.proto_minor < 9) {
1445 param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1446 } else {
1447 fi.lock_owner = arg->lock_owner;
1448 fi.flags = arg->flags;
1449 param = PARAM(arg);
1450 }
1451
1452 if (req->se->op.write)
1453 req->se->op.write(req, nodeid, param, arg->size,
1454 arg->offset, &fi);
1455 else
1456 fuse_reply_err(req, ENOSYS);
1457}
1458
1459static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1460 const struct fuse_buf *ibuf)
1461{
1462 struct fuse_session *se = req->se;
1463 struct fuse_bufvec bufv = {
1464 .buf[0] = *ibuf,
1465 .count = 1,
1466 };
1467 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1468 struct fuse_file_info fi;
1469
1470 memset(&fi, 0, sizeof(fi));
1471 fi.fh = arg->fh;
1472 fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1473
1474 if (se->conn.proto_minor < 9) {
1475 bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1476 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1477 FUSE_COMPAT_WRITE_IN_SIZE;
1478 assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1479 } else {
1480 fi.lock_owner = arg->lock_owner;
1481 fi.flags = arg->flags;
1482 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1483 bufv.buf[0].mem = PARAM(arg);
1484
1485 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1486 sizeof(struct fuse_write_in);
1487 }
1488 if (bufv.buf[0].size < arg->size) {
1489 fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1490 fuse_reply_err(req, EIO);
1491 goto out;
1492 }
1493 bufv.buf[0].size = arg->size;
1494
1495 se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1496
1497out:
1498 /* Need to reset the pipe if ->write_buf() didn't consume all data */
1499 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1500 fuse_ll_clear_pipe(se);
1501}
1502
1503static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1504{
1505 struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1506 struct fuse_file_info fi;
1507
1508 memset(&fi, 0, sizeof(fi));
1509 fi.fh = arg->fh;
1510 fi.flush = 1;
1511 if (req->se->conn.proto_minor >= 7)
1512 fi.lock_owner = arg->lock_owner;
1513
1514 if (req->se->op.flush)
1515 req->se->op.flush(req, nodeid, &fi);
1516 else
1517 fuse_reply_err(req, ENOSYS);
1518}
1519
1520static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1521{
1522 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1523 struct fuse_file_info fi;
1524
1525 memset(&fi, 0, sizeof(fi));
1526 fi.flags = arg->flags;
1527 fi.fh = arg->fh;
1528 if (req->se->conn.proto_minor >= 8) {
1529 fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1530 fi.lock_owner = arg->lock_owner;
1531 }
1532 if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1533 fi.flock_release = 1;
1534 fi.lock_owner = arg->lock_owner;
1535 }
1536
1537 if (req->se->op.release)
1538 req->se->op.release(req, nodeid, &fi);
1539 else
1540 fuse_reply_err(req, 0);
1541}
1542
1543static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1544{
1545 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1546 struct fuse_file_info fi;
1547 int datasync = arg->fsync_flags & 1;
1548
1549 memset(&fi, 0, sizeof(fi));
1550 fi.fh = arg->fh;
1551
1552 if (req->se->op.fsync)
1553 req->se->op.fsync(req, nodeid, datasync, &fi);
1554 else
1555 fuse_reply_err(req, ENOSYS);
1556}
1557
1558static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1559{
1560 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1561 struct fuse_file_info fi;
1562
1563 memset(&fi, 0, sizeof(fi));
1564 fi.flags = arg->flags;
1565
1566 if (req->se->op.opendir)
1567 req->se->op.opendir(req, nodeid, &fi);
1568 else if (req->se->conn.want_ext & FUSE_CAP_NO_OPENDIR_SUPPORT)
1569 fuse_reply_err(req, ENOSYS);
1570 else
1571 fuse_reply_open(req, &fi);
1572}
1573
1574static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1575{
1576 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1577 struct fuse_file_info fi;
1578
1579 memset(&fi, 0, sizeof(fi));
1580 fi.fh = arg->fh;
1581
1582 if (req->se->op.readdir)
1583 req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1584 else
1585 fuse_reply_err(req, ENOSYS);
1586}
1587
1588static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1589{
1590 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1591 struct fuse_file_info fi;
1592
1593 memset(&fi, 0, sizeof(fi));
1594 fi.fh = arg->fh;
1595
1596 if (req->se->op.readdirplus)
1597 req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1598 else
1599 fuse_reply_err(req, ENOSYS);
1600}
1601
1602static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1603{
1604 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1605 struct fuse_file_info fi;
1606
1607 memset(&fi, 0, sizeof(fi));
1608 fi.flags = arg->flags;
1609 fi.fh = arg->fh;
1610
1611 if (req->se->op.releasedir)
1612 req->se->op.releasedir(req, nodeid, &fi);
1613 else
1614 fuse_reply_err(req, 0);
1615}
1616
1617static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1618{
1619 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1620 struct fuse_file_info fi;
1621 int datasync = arg->fsync_flags & 1;
1622
1623 memset(&fi, 0, sizeof(fi));
1624 fi.fh = arg->fh;
1625
1626 if (req->se->op.fsyncdir)
1627 req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1628 else
1629 fuse_reply_err(req, ENOSYS);
1630}
1631
1632static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1633{
1634 (void) nodeid;
1635 (void) inarg;
1636
1637 if (req->se->op.statfs)
1638 req->se->op.statfs(req, nodeid);
1639 else {
1640 struct statvfs buf = {
1641 .f_namemax = 255,
1642 .f_bsize = 512,
1643 };
1644 fuse_reply_statfs(req, &buf);
1645 }
1646}
1647
1648static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1649{
1650 struct fuse_session *se = req->se;
1651 unsigned int xattr_ext = !!(se->conn.want_ext & FUSE_CAP_SETXATTR_EXT);
1652 struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1653 char *name = xattr_ext ? PARAM(arg) :
1654 (char *)arg + FUSE_COMPAT_SETXATTR_IN_SIZE;
1655 char *value = name + strlen(name) + 1;
1656
1657 /* XXX:The API should be extended to support extra_flags/setxattr_flags */
1658 if (req->se->op.setxattr)
1659 req->se->op.setxattr(req, nodeid, name, value, arg->size,
1660 arg->flags);
1661 else
1662 fuse_reply_err(req, ENOSYS);
1663}
1664
1665static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1666{
1667 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1668
1669 if (req->se->op.getxattr)
1670 req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1671 else
1672 fuse_reply_err(req, ENOSYS);
1673}
1674
1675static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1676{
1677 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1678
1679 if (req->se->op.listxattr)
1680 req->se->op.listxattr(req, nodeid, arg->size);
1681 else
1682 fuse_reply_err(req, ENOSYS);
1683}
1684
1685static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1686{
1687 char *name = (char *) inarg;
1688
1689 if (req->se->op.removexattr)
1690 req->se->op.removexattr(req, nodeid, name);
1691 else
1692 fuse_reply_err(req, ENOSYS);
1693}
1694
1695static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1696 struct flock *flock)
1697{
1698 memset(flock, 0, sizeof(struct flock));
1699 flock->l_type = fl->type;
1700 flock->l_whence = SEEK_SET;
1701 flock->l_start = fl->start;
1702 if (fl->end == OFFSET_MAX)
1703 flock->l_len = 0;
1704 else
1705 flock->l_len = fl->end - fl->start + 1;
1706 flock->l_pid = fl->pid;
1707}
1708
1709static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1710{
1711 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1712 struct fuse_file_info fi;
1713 struct flock flock;
1714
1715 memset(&fi, 0, sizeof(fi));
1716 fi.fh = arg->fh;
1717 fi.lock_owner = arg->owner;
1718
1719 convert_fuse_file_lock(&arg->lk, &flock);
1720 if (req->se->op.getlk)
1721 req->se->op.getlk(req, nodeid, &fi, &flock);
1722 else
1723 fuse_reply_err(req, ENOSYS);
1724}
1725
1726static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1727 const void *inarg, int sleep)
1728{
1729 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1730 struct fuse_file_info fi;
1731 struct flock flock;
1732
1733 memset(&fi, 0, sizeof(fi));
1734 fi.fh = arg->fh;
1735 fi.lock_owner = arg->owner;
1736
1737 if (arg->lk_flags & FUSE_LK_FLOCK) {
1738 int op = 0;
1739
1740 switch (arg->lk.type) {
1741 case F_RDLCK:
1742 op = LOCK_SH;
1743 break;
1744 case F_WRLCK:
1745 op = LOCK_EX;
1746 break;
1747 case F_UNLCK:
1748 op = LOCK_UN;
1749 break;
1750 }
1751 if (!sleep)
1752 op |= LOCK_NB;
1753
1754 if (req->se->op.flock)
1755 req->se->op.flock(req, nodeid, &fi, op);
1756 else
1757 fuse_reply_err(req, ENOSYS);
1758 } else {
1759 convert_fuse_file_lock(&arg->lk, &flock);
1760 if (req->se->op.setlk)
1761 req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1762 else
1763 fuse_reply_err(req, ENOSYS);
1764 }
1765}
1766
1767static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1768{
1769 do_setlk_common(req, nodeid, inarg, 0);
1770}
1771
1772static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1773{
1774 do_setlk_common(req, nodeid, inarg, 1);
1775}
1776
1777static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1778{
1779 struct fuse_req *curr;
1780
1781 for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1782 if (curr->unique == req->u.i.unique) {
1784 void *data;
1785
1786 curr->ref_cnt++;
1787 pthread_mutex_unlock(&se->lock);
1788
1789 /* Ugh, ugly locking */
1790 pthread_mutex_lock(&curr->lock);
1791 pthread_mutex_lock(&se->lock);
1792 curr->interrupted = 1;
1793 func = curr->u.ni.func;
1794 data = curr->u.ni.data;
1795 pthread_mutex_unlock(&se->lock);
1796 if (func)
1797 func(curr, data);
1798 pthread_mutex_unlock(&curr->lock);
1799
1800 pthread_mutex_lock(&se->lock);
1801 curr->ref_cnt--;
1802 if (!curr->ref_cnt) {
1803 destroy_req(curr);
1804 }
1805
1806 return 1;
1807 }
1808 }
1809 for (curr = se->interrupts.next; curr != &se->interrupts;
1810 curr = curr->next) {
1811 if (curr->u.i.unique == req->u.i.unique)
1812 return 1;
1813 }
1814 return 0;
1815}
1816
1817static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1818{
1819 struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1820 struct fuse_session *se = req->se;
1821
1822 (void) nodeid;
1823 if (se->debug)
1824 fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1825 (unsigned long long) arg->unique);
1826
1827 req->u.i.unique = arg->unique;
1828
1829 pthread_mutex_lock(&se->lock);
1830 if (find_interrupted(se, req)) {
1831 fuse_chan_put(req->ch);
1832 req->ch = NULL;
1833 destroy_req(req);
1834 } else
1835 list_add_req(req, &se->interrupts);
1836 pthread_mutex_unlock(&se->lock);
1837}
1838
1839static struct fuse_req *check_interrupt(struct fuse_session *se,
1840 struct fuse_req *req)
1841{
1842 struct fuse_req *curr;
1843
1844 for (curr = se->interrupts.next; curr != &se->interrupts;
1845 curr = curr->next) {
1846 if (curr->u.i.unique == req->unique) {
1847 req->interrupted = 1;
1848 list_del_req(curr);
1849 fuse_chan_put(curr->ch);
1850 curr->ch = NULL;
1851 destroy_req(curr);
1852 return NULL;
1853 }
1854 }
1855 curr = se->interrupts.next;
1856 if (curr != &se->interrupts) {
1857 list_del_req(curr);
1858 list_init_req(curr);
1859 return curr;
1860 } else
1861 return NULL;
1862}
1863
1864static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1865{
1866 struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
1867
1868 if (req->se->op.bmap)
1869 req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1870 else
1871 fuse_reply_err(req, ENOSYS);
1872}
1873
1874static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1875{
1876 struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
1877 unsigned int flags = arg->flags;
1878 void *in_buf = arg->in_size ? PARAM(arg) : NULL;
1879 struct fuse_file_info fi;
1880
1881 if (flags & FUSE_IOCTL_DIR &&
1882 !(req->se->conn.want_ext & FUSE_CAP_IOCTL_DIR)) {
1883 fuse_reply_err(req, ENOTTY);
1884 return;
1885 }
1886
1887 memset(&fi, 0, sizeof(fi));
1888 fi.fh = arg->fh;
1889
1890 if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
1891 !(flags & FUSE_IOCTL_32BIT)) {
1892 req->ioctl_64bit = 1;
1893 }
1894
1895 if (req->se->op.ioctl)
1896 req->se->op.ioctl(req, nodeid, arg->cmd,
1897 (void *)(uintptr_t)arg->arg, &fi, flags,
1898 in_buf, arg->in_size, arg->out_size);
1899 else
1900 fuse_reply_err(req, ENOSYS);
1901}
1902
1903void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1904{
1905 free(ph);
1906}
1907
1908static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1909{
1910 struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
1911 struct fuse_file_info fi;
1912
1913 memset(&fi, 0, sizeof(fi));
1914 fi.fh = arg->fh;
1915 fi.poll_events = arg->events;
1916
1917 if (req->se->op.poll) {
1918 struct fuse_pollhandle *ph = NULL;
1919
1920 if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1921 ph = malloc(sizeof(struct fuse_pollhandle));
1922 if (ph == NULL) {
1923 fuse_reply_err(req, ENOMEM);
1924 return;
1925 }
1926 ph->kh = arg->kh;
1927 ph->se = req->se;
1928 }
1929
1930 req->se->op.poll(req, nodeid, &fi, ph);
1931 } else {
1932 fuse_reply_err(req, ENOSYS);
1933 }
1934}
1935
1936static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1937{
1938 struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
1939 struct fuse_file_info fi;
1940
1941 memset(&fi, 0, sizeof(fi));
1942 fi.fh = arg->fh;
1943
1944 if (req->se->op.fallocate)
1945 req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
1946 else
1947 fuse_reply_err(req, ENOSYS);
1948}
1949
1950static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
1951{
1952 struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
1953 struct fuse_file_info fi_in, fi_out;
1954
1955 memset(&fi_in, 0, sizeof(fi_in));
1956 fi_in.fh = arg->fh_in;
1957
1958 memset(&fi_out, 0, sizeof(fi_out));
1959 fi_out.fh = arg->fh_out;
1960
1961
1962 if (req->se->op.copy_file_range)
1963 req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
1964 &fi_in, arg->nodeid_out,
1965 arg->off_out, &fi_out, arg->len,
1966 arg->flags);
1967 else
1968 fuse_reply_err(req, ENOSYS);
1969}
1970
1971static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1972{
1973 struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
1974 struct fuse_file_info fi;
1975
1976 memset(&fi, 0, sizeof(fi));
1977 fi.fh = arg->fh;
1978
1979 if (req->se->op.lseek)
1980 req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
1981 else
1982 fuse_reply_err(req, ENOSYS);
1983}
1984
1985static bool want_flags_valid(uint64_t capable, uint64_t want)
1986{
1987 uint64_t unknown_flags = want & (~capable);
1988 if (unknown_flags != 0) {
1989 fuse_log(FUSE_LOG_ERR,
1990 "fuse: unknown connection 'want' flags: 0x%08lx\n",
1991 unknown_flags);
1992 return false;
1993 }
1994 return true;
1995}
1996
2001{
2002 struct fuse_session *se = container_of(conn, struct fuse_session, conn);
2003
2004 /*
2005 * Convert want to want_ext if necessary.
2006 * For the high level interface this function might be called
2007 * twice, once from the high level interface and once from the
2008 * low level interface. Both, with different want_ext_default and
2009 * want_default values. In order to suppress a failure for the
2010 * second call, we check if the lower 32 bits of want_ext are
2011 * already set to the value of want.
2012 */
2013 if (conn->want != se->conn_want &&
2014 fuse_lower_32_bits(conn->want_ext) != conn->want) {
2015 if (conn->want_ext != se->conn_want_ext) {
2016 fuse_log(FUSE_LOG_ERR,
2017 "%s: Both conn->want_ext and conn->want are set.\n"
2018 "want=%x, want_ext=%lx, se->want=%lx se->want_ext=%lx\n",
2019 __func__, conn->want, conn->want_ext,
2020 se->conn_want, se->conn_want_ext);
2021 return -EINVAL;
2022 }
2023
2024 /* high bits from want_ext, low bits from want */
2025 conn->want_ext = fuse_higher_32_bits(conn->want_ext) |
2026 conn->want;
2027 }
2028
2029 /* ensure there won't be a second conversion */
2030 conn->want = fuse_lower_32_bits(conn->want_ext);
2031
2032 return 0;
2033}
2034
2036 uint64_t flag)
2037{
2038 struct fuse_session *se = container_of(conn, struct fuse_session, conn);
2039
2040 if (conn->capable_ext & flag) {
2041 conn->want_ext |= flag;
2042 se->conn_want_ext |= flag;
2043 conn->want |= flag;
2044 se->conn_want |= flag;
2045 return true;
2046 }
2047 return false;
2048}
2049
2051 uint64_t flag)
2052{
2053 struct fuse_session *se = container_of(conn, struct fuse_session, conn);
2054
2055 conn->want_ext &= ~flag;
2056 se->conn_want_ext &= ~flag;
2057 conn->want &= ~flag;
2058 se->conn_want &= ~flag;
2059}
2060
2062 uint64_t flag)
2063{
2064 return conn->capable_ext & flag ? true : false;
2065}
2066
2067
2068/* Prevent bogus data races (bogus since "init" is called before
2069 * multi-threading becomes relevant */
2070static __attribute__((no_sanitize("thread")))
2071void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2072{
2073 struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
2074 struct fuse_init_out outarg;
2075 struct fuse_session *se = req->se;
2076 size_t bufsize = se->bufsize;
2077 size_t outargsize = sizeof(outarg);
2078 uint64_t inargflags = 0;
2079 uint64_t outargflags = 0;
2080 bool buf_reallocable = se->buf_reallocable;
2081 (void) nodeid;
2082 if (se->debug) {
2083 fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
2084 if (arg->major == 7 && arg->minor >= 6) {
2085 fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
2086 fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
2087 arg->max_readahead);
2088 }
2089 }
2090 se->conn.proto_major = arg->major;
2091 se->conn.proto_minor = arg->minor;
2092 se->conn.capable_ext = 0;
2093 se->conn.want_ext = 0;
2094
2095 memset(&outarg, 0, sizeof(outarg));
2096 outarg.major = FUSE_KERNEL_VERSION;
2097 outarg.minor = FUSE_KERNEL_MINOR_VERSION;
2098
2099 if (arg->major < 7) {
2100 fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
2101 arg->major, arg->minor);
2102 fuse_reply_err(req, EPROTO);
2103 return;
2104 }
2105
2106 if (arg->major > 7) {
2107 /* Wait for a second INIT request with a 7.X version */
2108 send_reply_ok(req, &outarg, sizeof(outarg));
2109 return;
2110 }
2111
2112 if (arg->minor >= 6) {
2113 if (arg->max_readahead < se->conn.max_readahead)
2114 se->conn.max_readahead = arg->max_readahead;
2115 inargflags = arg->flags;
2116 if (inargflags & FUSE_INIT_EXT)
2117 inargflags = inargflags | (uint64_t) arg->flags2 << 32;
2118 if (inargflags & FUSE_ASYNC_READ)
2119 se->conn.capable_ext |= FUSE_CAP_ASYNC_READ;
2120 if (inargflags & FUSE_POSIX_LOCKS)
2121 se->conn.capable_ext |= FUSE_CAP_POSIX_LOCKS;
2122 if (inargflags & FUSE_ATOMIC_O_TRUNC)
2123 se->conn.capable_ext |= FUSE_CAP_ATOMIC_O_TRUNC;
2124 if (inargflags & FUSE_EXPORT_SUPPORT)
2125 se->conn.capable_ext |= FUSE_CAP_EXPORT_SUPPORT;
2126 if (inargflags & FUSE_DONT_MASK)
2127 se->conn.capable_ext |= FUSE_CAP_DONT_MASK;
2128 if (inargflags & FUSE_FLOCK_LOCKS)
2129 se->conn.capable_ext |= FUSE_CAP_FLOCK_LOCKS;
2130 if (inargflags & FUSE_AUTO_INVAL_DATA)
2131 se->conn.capable_ext |= FUSE_CAP_AUTO_INVAL_DATA;
2132 if (inargflags & FUSE_DO_READDIRPLUS)
2133 se->conn.capable_ext |= FUSE_CAP_READDIRPLUS;
2134 if (inargflags & FUSE_READDIRPLUS_AUTO)
2135 se->conn.capable_ext |= FUSE_CAP_READDIRPLUS_AUTO;
2136 if (inargflags & FUSE_ASYNC_DIO)
2137 se->conn.capable_ext |= FUSE_CAP_ASYNC_DIO;
2138 if (inargflags & FUSE_WRITEBACK_CACHE)
2139 se->conn.capable_ext |= FUSE_CAP_WRITEBACK_CACHE;
2140 if (inargflags & FUSE_NO_OPEN_SUPPORT)
2141 se->conn.capable_ext |= FUSE_CAP_NO_OPEN_SUPPORT;
2142 if (inargflags & FUSE_PARALLEL_DIROPS)
2143 se->conn.capable_ext |= FUSE_CAP_PARALLEL_DIROPS;
2144 if (inargflags & FUSE_POSIX_ACL)
2145 se->conn.capable_ext |= FUSE_CAP_POSIX_ACL;
2146 if (inargflags & FUSE_HANDLE_KILLPRIV)
2147 se->conn.capable_ext |= FUSE_CAP_HANDLE_KILLPRIV;
2148 if (inargflags & FUSE_HANDLE_KILLPRIV_V2)
2149 se->conn.capable_ext |= FUSE_CAP_HANDLE_KILLPRIV_V2;
2150 if (inargflags & FUSE_CACHE_SYMLINKS)
2151 se->conn.capable_ext |= FUSE_CAP_CACHE_SYMLINKS;
2152 if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
2153 se->conn.capable_ext |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2154 if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2155 se->conn.capable_ext |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2156 if (inargflags & FUSE_SETXATTR_EXT)
2157 se->conn.capable_ext |= FUSE_CAP_SETXATTR_EXT;
2158 if (!(inargflags & FUSE_MAX_PAGES)) {
2159 size_t max_bufsize =
2160 FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2161 + FUSE_BUFFER_HEADER_SIZE;
2162 if (bufsize > max_bufsize) {
2163 bufsize = max_bufsize;
2164 }
2165 buf_reallocable = false;
2166 }
2167 if (inargflags & FUSE_DIRECT_IO_ALLOW_MMAP)
2168 se->conn.capable_ext |= FUSE_CAP_DIRECT_IO_ALLOW_MMAP;
2169 if (arg->minor >= 38 || (inargflags & FUSE_HAS_EXPIRE_ONLY))
2170 se->conn.capable_ext |= FUSE_CAP_EXPIRE_ONLY;
2171 if (inargflags & FUSE_PASSTHROUGH)
2172 se->conn.capable_ext |= FUSE_CAP_PASSTHROUGH;
2173 if (inargflags & FUSE_NO_EXPORT_SUPPORT)
2174 se->conn.capable_ext |= FUSE_CAP_NO_EXPORT_SUPPORT;
2175 } else {
2176 se->conn.max_readahead = 0;
2177 }
2178
2179 if (se->conn.proto_minor >= 14) {
2180#ifdef HAVE_SPLICE
2181#ifdef HAVE_VMSPLICE
2182 if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2183 se->conn.capable_ext |= FUSE_CAP_SPLICE_WRITE |
2185 }
2186#endif
2187 if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2188 se->conn.capable_ext |= FUSE_CAP_SPLICE_READ;
2189 }
2190#endif
2191 }
2192 if (se->conn.proto_minor >= 18)
2193 se->conn.capable_ext |= FUSE_CAP_IOCTL_DIR;
2194
2195 /* Default settings for modern filesystems.
2196 *
2197 * Most of these capabilities were disabled by default in
2198 * libfuse2 for backwards compatibility reasons. In libfuse3,
2199 * we can finally enable them by default (as long as they're
2200 * supported by the kernel).
2201 */
2202#define LL_SET_DEFAULT(cond, cap) \
2203 if ((cond)) \
2204 fuse_set_feature_flag(&se->conn, cap)
2205
2206 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2207 LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2208 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2209 LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2210 LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2211 LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2212 LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2214 LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2215 LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2216 LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2218
2219 /* This could safely become default, but libfuse needs an API extension
2220 * to support it
2221 * LL_SET_DEFAULT(1, FUSE_CAP_SETXATTR_EXT);
2222 */
2223
2224 se->conn.time_gran = 1;
2225
2226 se->got_init = 1;
2227 if (se->op.init) {
2228 // Apply the first 32 bits of capable_ext to capable
2229 se->conn.capable = fuse_lower_32_bits(se->conn.capable_ext);
2230
2231 se->op.init(se->userdata, &se->conn);
2232
2233 /*
2234 * se->conn.want is 32-bit value and deprecated in favour of
2235 * se->conn.want_ext
2236 * Userspace might still use conn.want - we need to convert it
2237 */
2239 }
2240
2241 if (!want_flags_valid(se->conn.capable_ext, se->conn.want_ext)) {
2242 fuse_reply_err(req, EPROTO);
2243 se->error = -EPROTO;
2245 return;
2246 }
2247
2248 unsigned max_read_mo = get_max_read(se->mo);
2249 if (se->conn.max_read != max_read_mo) {
2250 fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2251 "requested different maximum read size (%u vs %u)\n",
2252 se->conn.max_read, max_read_mo);
2253 fuse_reply_err(req, EPROTO);
2254 se->error = -EPROTO;
2256 return;
2257 }
2258
2259 if (bufsize < FUSE_MIN_READ_BUFFER) {
2260 fuse_log(FUSE_LOG_ERR,
2261 "fuse: warning: buffer size too small: %zu\n",
2262 bufsize);
2263 bufsize = FUSE_MIN_READ_BUFFER;
2264 }
2265
2266 if (buf_reallocable)
2267 bufsize = UINT_MAX;
2268 se->conn.max_write = MIN(se->conn.max_write, bufsize - FUSE_BUFFER_HEADER_SIZE);
2269 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2270
2271 if (arg->flags & FUSE_MAX_PAGES) {
2272 outarg.flags |= FUSE_MAX_PAGES;
2273 outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2274 }
2275 outargflags = outarg.flags;
2276 /* Always enable big writes, this is superseded
2277 by the max_write option */
2278 outargflags |= FUSE_BIG_WRITES;
2279
2280 if (se->conn.want_ext & FUSE_CAP_ASYNC_READ)
2281 outargflags |= FUSE_ASYNC_READ;
2282 if (se->conn.want_ext & FUSE_CAP_POSIX_LOCKS)
2283 outargflags |= FUSE_POSIX_LOCKS;
2284 if (se->conn.want_ext & FUSE_CAP_ATOMIC_O_TRUNC)
2285 outargflags |= FUSE_ATOMIC_O_TRUNC;
2286 if (se->conn.want_ext & FUSE_CAP_EXPORT_SUPPORT)
2287 outargflags |= FUSE_EXPORT_SUPPORT;
2288 if (se->conn.want_ext & FUSE_CAP_DONT_MASK)
2289 outargflags |= FUSE_DONT_MASK;
2290 if (se->conn.want_ext & FUSE_CAP_FLOCK_LOCKS)
2291 outargflags |= FUSE_FLOCK_LOCKS;
2292 if (se->conn.want_ext & FUSE_CAP_AUTO_INVAL_DATA)
2293 outargflags |= FUSE_AUTO_INVAL_DATA;
2294 if (se->conn.want_ext & FUSE_CAP_READDIRPLUS)
2295 outargflags |= FUSE_DO_READDIRPLUS;
2296 if (se->conn.want_ext & FUSE_CAP_READDIRPLUS_AUTO)
2297 outargflags |= FUSE_READDIRPLUS_AUTO;
2298 if (se->conn.want_ext & FUSE_CAP_ASYNC_DIO)
2299 outargflags |= FUSE_ASYNC_DIO;
2300 if (se->conn.want_ext & FUSE_CAP_WRITEBACK_CACHE)
2301 outargflags |= FUSE_WRITEBACK_CACHE;
2302 if (se->conn.want_ext & FUSE_CAP_PARALLEL_DIROPS)
2303 outargflags |= FUSE_PARALLEL_DIROPS;
2304 if (se->conn.want_ext & FUSE_CAP_POSIX_ACL)
2305 outargflags |= FUSE_POSIX_ACL;
2306 if (se->conn.want_ext & FUSE_CAP_HANDLE_KILLPRIV)
2307 outargflags |= FUSE_HANDLE_KILLPRIV;
2308 if (se->conn.want_ext & FUSE_CAP_HANDLE_KILLPRIV_V2)
2309 outargflags |= FUSE_HANDLE_KILLPRIV_V2;
2310 if (se->conn.want_ext & FUSE_CAP_CACHE_SYMLINKS)
2311 outargflags |= FUSE_CACHE_SYMLINKS;
2312 if (se->conn.want_ext & FUSE_CAP_EXPLICIT_INVAL_DATA)
2313 outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2314 if (se->conn.want_ext & FUSE_CAP_SETXATTR_EXT)
2315 outargflags |= FUSE_SETXATTR_EXT;
2316 if (se->conn.want_ext & FUSE_CAP_DIRECT_IO_ALLOW_MMAP)
2317 outargflags |= FUSE_DIRECT_IO_ALLOW_MMAP;
2318 if (se->conn.want_ext & FUSE_CAP_PASSTHROUGH) {
2319 outargflags |= FUSE_PASSTHROUGH;
2320 /*
2321 * outarg.max_stack_depth includes the fuse stack layer,
2322 * so it is one more than max_backing_stack_depth.
2323 */
2324 outarg.max_stack_depth = se->conn.max_backing_stack_depth + 1;
2325 }
2326 if (se->conn.want_ext & FUSE_CAP_NO_EXPORT_SUPPORT)
2327 outargflags |= FUSE_NO_EXPORT_SUPPORT;
2328
2329 if (inargflags & FUSE_INIT_EXT) {
2330 outargflags |= FUSE_INIT_EXT;
2331 outarg.flags2 = outargflags >> 32;
2332 }
2333
2334 outarg.flags = outargflags;
2335
2336 outarg.max_readahead = se->conn.max_readahead;
2337 outarg.max_write = se->conn.max_write;
2338 if (se->conn.proto_minor >= 13) {
2339 if (se->conn.max_background >= (1 << 16))
2340 se->conn.max_background = (1 << 16) - 1;
2341 if (se->conn.congestion_threshold > se->conn.max_background)
2342 se->conn.congestion_threshold = se->conn.max_background;
2343 if (!se->conn.congestion_threshold) {
2344 se->conn.congestion_threshold =
2345 se->conn.max_background * 3 / 4;
2346 }
2347
2348 outarg.max_background = se->conn.max_background;
2349 outarg.congestion_threshold = se->conn.congestion_threshold;
2350 }
2351 if (se->conn.proto_minor >= 23)
2352 outarg.time_gran = se->conn.time_gran;
2353
2354 if (se->debug) {
2355 fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2356 fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2357 fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2358 outarg.max_readahead);
2359 fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2360 fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2361 outarg.max_background);
2362 fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2363 outarg.congestion_threshold);
2364 fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2365 outarg.time_gran);
2366 if (se->conn.want_ext & FUSE_CAP_PASSTHROUGH)
2367 fuse_log(FUSE_LOG_DEBUG, " max_stack_depth=%u\n",
2368 outarg.max_stack_depth);
2369 }
2370 if (arg->minor < 5)
2371 outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2372 else if (arg->minor < 23)
2373 outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2374
2375 send_reply_ok(req, &outarg, outargsize);
2376}
2377
2378static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2379{
2380 struct fuse_session *se = req->se;
2381 char *mountpoint;
2382
2383 (void) nodeid;
2384 (void) inarg;
2385
2386 mountpoint = atomic_exchange(&se->mountpoint, NULL);
2387 free(mountpoint);
2388
2389 se->got_destroy = 1;
2390 se->got_init = 0;
2391 if (se->op.destroy)
2392 se->op.destroy(se->userdata);
2393
2394 send_reply_ok(req, NULL, 0);
2395}
2396
2397static void list_del_nreq(struct fuse_notify_req *nreq)
2398{
2399 struct fuse_notify_req *prev = nreq->prev;
2400 struct fuse_notify_req *next = nreq->next;
2401 prev->next = next;
2402 next->prev = prev;
2403}
2404
2405static void list_add_nreq(struct fuse_notify_req *nreq,
2406 struct fuse_notify_req *next)
2407{
2408 struct fuse_notify_req *prev = next->prev;
2409 nreq->next = next;
2410 nreq->prev = prev;
2411 prev->next = nreq;
2412 next->prev = nreq;
2413}
2414
2415static void list_init_nreq(struct fuse_notify_req *nreq)
2416{
2417 nreq->next = nreq;
2418 nreq->prev = nreq;
2419}
2420
2421static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2422 const void *inarg, const struct fuse_buf *buf)
2423{
2424 struct fuse_session *se = req->se;
2425 struct fuse_notify_req *nreq;
2426 struct fuse_notify_req *head;
2427
2428 pthread_mutex_lock(&se->lock);
2429 head = &se->notify_list;
2430 for (nreq = head->next; nreq != head; nreq = nreq->next) {
2431 if (nreq->unique == req->unique) {
2432 list_del_nreq(nreq);
2433 break;
2434 }
2435 }
2436 pthread_mutex_unlock(&se->lock);
2437
2438 if (nreq != head)
2439 nreq->reply(nreq, req, nodeid, inarg, buf);
2440}
2441
2442static int send_notify_iov(struct fuse_session *se, int notify_code,
2443 struct iovec *iov, int count)
2444{
2445 struct fuse_out_header out;
2446
2447 if (!se->got_init)
2448 return -ENOTCONN;
2449
2450 out.unique = 0;
2451 out.error = notify_code;
2452 iov[0].iov_base = &out;
2453 iov[0].iov_len = sizeof(struct fuse_out_header);
2454
2455 return fuse_send_msg(se, NULL, iov, count);
2456}
2457
2458int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2459{
2460 if (ph != NULL) {
2461 struct fuse_notify_poll_wakeup_out outarg;
2462 struct iovec iov[2];
2463
2464 outarg.kh = ph->kh;
2465
2466 iov[1].iov_base = &outarg;
2467 iov[1].iov_len = sizeof(outarg);
2468
2469 return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2470 } else {
2471 return 0;
2472 }
2473}
2474
2475int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2476 off_t off, off_t len)
2477{
2478 struct fuse_notify_inval_inode_out outarg;
2479 struct iovec iov[2];
2480
2481 if (!se)
2482 return -EINVAL;
2483
2484 if (se->conn.proto_minor < 12)
2485 return -ENOSYS;
2486
2487 outarg.ino = ino;
2488 outarg.off = off;
2489 outarg.len = len;
2490
2491 iov[1].iov_base = &outarg;
2492 iov[1].iov_len = sizeof(outarg);
2493
2494 return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2495}
2496
2516static int fuse_lowlevel_notify_entry(struct fuse_session *se, fuse_ino_t parent,
2517 const char *name, size_t namelen,
2518 enum fuse_notify_entry_flags flags)
2519{
2520 struct fuse_notify_inval_entry_out outarg;
2521 struct iovec iov[3];
2522
2523 if (!se)
2524 return -EINVAL;
2525
2526 if (se->conn.proto_minor < 12)
2527 return -ENOSYS;
2528
2529 outarg.parent = parent;
2530 outarg.namelen = namelen;
2531 outarg.flags = 0;
2532 if (flags & FUSE_LL_EXPIRE_ONLY)
2533 outarg.flags |= FUSE_EXPIRE_ONLY;
2534
2535 iov[1].iov_base = &outarg;
2536 iov[1].iov_len = sizeof(outarg);
2537 iov[2].iov_base = (void *)name;
2538 iov[2].iov_len = namelen + 1;
2539
2540 return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2541}
2542
2543int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2544 const char *name, size_t namelen)
2545{
2546 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_INVALIDATE);
2547}
2548
2549int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
2550 const char *name, size_t namelen)
2551{
2552 if (!se)
2553 return -EINVAL;
2554
2555 if (!(se->conn.capable_ext & FUSE_CAP_EXPIRE_ONLY))
2556 return -ENOSYS;
2557
2558 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_EXPIRE_ONLY);
2559}
2560
2561
2562int fuse_lowlevel_notify_delete(struct fuse_session *se,
2563 fuse_ino_t parent, fuse_ino_t child,
2564 const char *name, size_t namelen)
2565{
2566 struct fuse_notify_delete_out outarg;
2567 struct iovec iov[3];
2568
2569 if (!se)
2570 return -EINVAL;
2571
2572 if (se->conn.proto_minor < 18)
2573 return -ENOSYS;
2574
2575 outarg.parent = parent;
2576 outarg.child = child;
2577 outarg.namelen = namelen;
2578 outarg.padding = 0;
2579
2580 iov[1].iov_base = &outarg;
2581 iov[1].iov_len = sizeof(outarg);
2582 iov[2].iov_base = (void *)name;
2583 iov[2].iov_len = namelen + 1;
2584
2585 return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2586}
2587
2588int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2589 off_t offset, struct fuse_bufvec *bufv,
2590 enum fuse_buf_copy_flags flags)
2591{
2592 struct fuse_out_header out;
2593 struct fuse_notify_store_out outarg;
2594 struct iovec iov[3];
2595 size_t size = fuse_buf_size(bufv);
2596 int res;
2597
2598 if (!se)
2599 return -EINVAL;
2600
2601 if (se->conn.proto_minor < 15)
2602 return -ENOSYS;
2603
2604 out.unique = 0;
2605 out.error = FUSE_NOTIFY_STORE;
2606
2607 outarg.nodeid = ino;
2608 outarg.offset = offset;
2609 outarg.size = size;
2610 outarg.padding = 0;
2611
2612 iov[0].iov_base = &out;
2613 iov[0].iov_len = sizeof(out);
2614 iov[1].iov_base = &outarg;
2615 iov[1].iov_len = sizeof(outarg);
2616
2617 res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2618 if (res > 0)
2619 res = -res;
2620
2621 return res;
2622}
2623
2624struct fuse_retrieve_req {
2625 struct fuse_notify_req nreq;
2626 void *cookie;
2627};
2628
2629static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2630 fuse_req_t req, fuse_ino_t ino,
2631 const void *inarg,
2632 const struct fuse_buf *ibuf)
2633{
2634 struct fuse_session *se = req->se;
2635 struct fuse_retrieve_req *rreq =
2636 container_of(nreq, struct fuse_retrieve_req, nreq);
2637 const struct fuse_notify_retrieve_in *arg = inarg;
2638 struct fuse_bufvec bufv = {
2639 .buf[0] = *ibuf,
2640 .count = 1,
2641 };
2642
2643 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2644 bufv.buf[0].mem = PARAM(arg);
2645
2646 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2647 sizeof(struct fuse_notify_retrieve_in);
2648
2649 if (bufv.buf[0].size < arg->size) {
2650 fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2651 fuse_reply_none(req);
2652 goto out;
2653 }
2654 bufv.buf[0].size = arg->size;
2655
2656 if (se->op.retrieve_reply) {
2657 se->op.retrieve_reply(req, rreq->cookie, ino,
2658 arg->offset, &bufv);
2659 } else {
2660 fuse_reply_none(req);
2661 }
2662out:
2663 free(rreq);
2664 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2665 fuse_ll_clear_pipe(se);
2666}
2667
2668int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2669 size_t size, off_t offset, void *cookie)
2670{
2671 struct fuse_notify_retrieve_out outarg;
2672 struct iovec iov[2];
2673 struct fuse_retrieve_req *rreq;
2674 int err;
2675
2676 if (!se)
2677 return -EINVAL;
2678
2679 if (se->conn.proto_minor < 15)
2680 return -ENOSYS;
2681
2682 rreq = malloc(sizeof(*rreq));
2683 if (rreq == NULL)
2684 return -ENOMEM;
2685
2686 pthread_mutex_lock(&se->lock);
2687 rreq->cookie = cookie;
2688 rreq->nreq.unique = se->notify_ctr++;
2689 rreq->nreq.reply = fuse_ll_retrieve_reply;
2690 list_add_nreq(&rreq->nreq, &se->notify_list);
2691 pthread_mutex_unlock(&se->lock);
2692
2693 outarg.notify_unique = rreq->nreq.unique;
2694 outarg.nodeid = ino;
2695 outarg.offset = offset;
2696 outarg.size = size;
2697 outarg.padding = 0;
2698
2699 iov[1].iov_base = &outarg;
2700 iov[1].iov_len = sizeof(outarg);
2701
2702 err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2703 if (err) {
2704 pthread_mutex_lock(&se->lock);
2705 list_del_nreq(&rreq->nreq);
2706 pthread_mutex_unlock(&se->lock);
2707 free(rreq);
2708 }
2709
2710 return err;
2711}
2712
2714{
2715 return req->se->userdata;
2716}
2717
2719{
2720 return &req->ctx;
2721}
2722
2724 void *data)
2725{
2726 pthread_mutex_lock(&req->lock);
2727 pthread_mutex_lock(&req->se->lock);
2728 req->u.ni.func = func;
2729 req->u.ni.data = data;
2730 pthread_mutex_unlock(&req->se->lock);
2731 if (req->interrupted && func)
2732 func(req, data);
2733 pthread_mutex_unlock(&req->lock);
2734}
2735
2737{
2738 int interrupted;
2739
2740 pthread_mutex_lock(&req->se->lock);
2741 interrupted = req->interrupted;
2742 pthread_mutex_unlock(&req->se->lock);
2743
2744 return interrupted;
2745}
2746
2747static struct {
2748 void (*func)(fuse_req_t, fuse_ino_t, const void *);
2749 const char *name;
2750} fuse_ll_ops[] = {
2751 [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2752 [FUSE_FORGET] = { do_forget, "FORGET" },
2753 [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2754 [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2755 [FUSE_READLINK] = { do_readlink, "READLINK" },
2756 [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2757 [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2758 [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2759 [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2760 [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2761 [FUSE_RENAME] = { do_rename, "RENAME" },
2762 [FUSE_LINK] = { do_link, "LINK" },
2763 [FUSE_OPEN] = { do_open, "OPEN" },
2764 [FUSE_READ] = { do_read, "READ" },
2765 [FUSE_WRITE] = { do_write, "WRITE" },
2766 [FUSE_STATFS] = { do_statfs, "STATFS" },
2767 [FUSE_RELEASE] = { do_release, "RELEASE" },
2768 [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2769 [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2770 [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2771 [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2772 [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2773 [FUSE_FLUSH] = { do_flush, "FLUSH" },
2774 [FUSE_INIT] = { do_init, "INIT" },
2775 [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2776 [FUSE_READDIR] = { do_readdir, "READDIR" },
2777 [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2778 [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2779 [FUSE_GETLK] = { do_getlk, "GETLK" },
2780 [FUSE_SETLK] = { do_setlk, "SETLK" },
2781 [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2782 [FUSE_ACCESS] = { do_access, "ACCESS" },
2783 [FUSE_CREATE] = { do_create, "CREATE" },
2784 [FUSE_TMPFILE] = { do_tmpfile, "TMPFILE" },
2785 [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2786 [FUSE_BMAP] = { do_bmap, "BMAP" },
2787 [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2788 [FUSE_POLL] = { do_poll, "POLL" },
2789 [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2790 [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2791 [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2792 [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2793 [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2794 [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2795 [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2796 [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2797 [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2798};
2799
2800/*
2801 * For ABI compatibility we cannot allow higher values than CUSE_INIT.
2802 * Without ABI compatibility we could use the size of the array.
2803 * #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2804 */
2805#define FUSE_MAXOP (CUSE_INIT + 1)
2806
2807static const char *opname(enum fuse_opcode opcode)
2808{
2809 if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2810 return "???";
2811 else
2812 return fuse_ll_ops[opcode].name;
2813}
2814
2815static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2816 struct fuse_bufvec *src)
2817{
2818 ssize_t res = fuse_buf_copy(dst, src, 0);
2819 if (res < 0) {
2820 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2821 return res;
2822 }
2823 if ((size_t)res < fuse_buf_size(dst)) {
2824 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2825 return -1;
2826 }
2827 return 0;
2828}
2829
2830void fuse_session_process_buf(struct fuse_session *se,
2831 const struct fuse_buf *buf)
2832{
2833 fuse_session_process_buf_internal(se, buf, NULL);
2834}
2835
2836/* libfuse internal handler */
2837void fuse_session_process_buf_internal(struct fuse_session *se,
2838 const struct fuse_buf *buf, struct fuse_chan *ch)
2839{
2840 const size_t write_header_size = sizeof(struct fuse_in_header) +
2841 sizeof(struct fuse_write_in);
2842 struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2843 struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2844 struct fuse_in_header *in;
2845 const void *inarg;
2846 struct fuse_req *req;
2847 void *mbuf = NULL;
2848 int err;
2849 int res;
2850
2851 if (buf->flags & FUSE_BUF_IS_FD) {
2852 if (buf->size < tmpbuf.buf[0].size)
2853 tmpbuf.buf[0].size = buf->size;
2854
2855 mbuf = malloc(tmpbuf.buf[0].size);
2856 if (mbuf == NULL) {
2857 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2858 goto clear_pipe;
2859 }
2860 tmpbuf.buf[0].mem = mbuf;
2861
2862 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2863 if (res < 0)
2864 goto clear_pipe;
2865
2866 in = mbuf;
2867 } else {
2868 in = buf->mem;
2869 }
2870
2871 if (se->debug) {
2872 fuse_log(FUSE_LOG_DEBUG,
2873 "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2874 (unsigned long long) in->unique,
2875 opname((enum fuse_opcode) in->opcode), in->opcode,
2876 (unsigned long long) in->nodeid, buf->size, in->pid);
2877 }
2878
2879 req = fuse_ll_alloc_req(se);
2880 if (req == NULL) {
2881 struct fuse_out_header out = {
2882 .unique = in->unique,
2883 .error = -ENOMEM,
2884 };
2885 struct iovec iov = {
2886 .iov_base = &out,
2887 .iov_len = sizeof(struct fuse_out_header),
2888 };
2889
2890 fuse_send_msg(se, ch, &iov, 1);
2891 goto clear_pipe;
2892 }
2893
2894 req->unique = in->unique;
2895 req->ctx.uid = in->uid;
2896 req->ctx.gid = in->gid;
2897 req->ctx.pid = in->pid;
2898 req->ch = ch ? fuse_chan_get(ch) : NULL;
2899
2900 err = EIO;
2901 if (!se->got_init) {
2902 enum fuse_opcode expected;
2903
2904 expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2905 if (in->opcode != expected)
2906 goto reply_err;
2907 } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2908 goto reply_err;
2909
2910 err = EACCES;
2911 /* Implement -o allow_root */
2912 if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2913 in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2914 in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2915 in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2916 in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2917 in->opcode != FUSE_NOTIFY_REPLY &&
2918 in->opcode != FUSE_READDIRPLUS)
2919 goto reply_err;
2920
2921 err = ENOSYS;
2922 if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2923 goto reply_err;
2924 /* Do not process interrupt request */
2925 if (se->conn.no_interrupt && in->opcode == FUSE_INTERRUPT) {
2926 if (se->debug)
2927 fuse_log(FUSE_LOG_DEBUG, "FUSE_INTERRUPT: reply to kernel to disable interrupt\n");
2928 goto reply_err;
2929 }
2930 if (!se->conn.no_interrupt && in->opcode != FUSE_INTERRUPT) {
2931 struct fuse_req *intr;
2932 pthread_mutex_lock(&se->lock);
2933 intr = check_interrupt(se, req);
2934 list_add_req(req, &se->list);
2935 pthread_mutex_unlock(&se->lock);
2936 if (intr)
2937 fuse_reply_err(intr, EAGAIN);
2938 }
2939
2940 if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
2941 (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
2942 in->opcode != FUSE_NOTIFY_REPLY) {
2943 void *newmbuf;
2944
2945 err = ENOMEM;
2946 newmbuf = realloc(mbuf, buf->size);
2947 if (newmbuf == NULL)
2948 goto reply_err;
2949 mbuf = newmbuf;
2950
2951 tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
2952 tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
2953
2954 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2955 err = -res;
2956 if (res < 0)
2957 goto reply_err;
2958
2959 in = mbuf;
2960 }
2961
2962 inarg = (void *) &in[1];
2963 if (in->opcode == FUSE_WRITE && se->op.write_buf)
2964 do_write_buf(req, in->nodeid, inarg, buf);
2965 else if (in->opcode == FUSE_NOTIFY_REPLY)
2966 do_notify_reply(req, in->nodeid, inarg, buf);
2967 else
2968 fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
2969
2970out_free:
2971 free(mbuf);
2972 return;
2973
2974reply_err:
2975 fuse_reply_err(req, err);
2976clear_pipe:
2977 if (buf->flags & FUSE_BUF_IS_FD)
2978 fuse_ll_clear_pipe(se);
2979 goto out_free;
2980}
2981
2982#define LL_OPTION(n,o,v) \
2983 { n, offsetof(struct fuse_session, o), v }
2984
2985static const struct fuse_opt fuse_ll_opts[] = {
2986 LL_OPTION("debug", debug, 1),
2987 LL_OPTION("-d", debug, 1),
2988 LL_OPTION("--debug", debug, 1),
2989 LL_OPTION("allow_root", deny_others, 1),
2991};
2992
2994{
2995 printf("using FUSE kernel interface version %i.%i\n",
2996 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2997 fuse_mount_version();
2998}
2999
3001{
3002 /* These are not all options, but the ones that are
3003 potentially of interest to an end-user */
3004 printf(
3005" -o allow_other allow access by all users\n"
3006" -o allow_root allow access by root\n"
3007" -o auto_unmount auto unmount on process termination\n");
3008}
3009
3010void fuse_session_destroy(struct fuse_session *se)
3011{
3012 struct fuse_ll_pipe *llp;
3013
3014 if (se->got_init && !se->got_destroy) {
3015 if (se->op.destroy)
3016 se->op.destroy(se->userdata);
3017 }
3018 llp = pthread_getspecific(se->pipe_key);
3019 if (llp != NULL)
3020 fuse_ll_pipe_free(llp);
3021 pthread_key_delete(se->pipe_key);
3022 pthread_mutex_destroy(&se->lock);
3023 free(se->cuse_data);
3024 if (se->fd != -1)
3025 close(se->fd);
3026 if (se->io != NULL)
3027 free(se->io);
3028 destroy_mount_opts(se->mo);
3029 free(se);
3030}
3031
3032
3033static void fuse_ll_pipe_destructor(void *data)
3034{
3035 struct fuse_ll_pipe *llp = data;
3036 fuse_ll_pipe_free(llp);
3037}
3038
3039void fuse_buf_free(struct fuse_buf *buf)
3040{
3041 if (buf->mem == NULL)
3042 return;
3043
3044 size_t write_header_sz =
3045 sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in);
3046
3047 char *ptr = (char *)buf->mem - pagesize + write_header_sz;
3048 free(ptr);
3049 buf->mem = NULL;
3050}
3051
3052/*
3053 * This is used to allocate buffers that hold fuse requests
3054 */
3055static void *buf_alloc(size_t size, bool internal)
3056{
3057 /*
3058 * For libfuse internal caller add in alignment. That cannot be done
3059 * for an external caller, as it is not guaranteed that the external
3060 * caller frees the raw pointer.
3061 */
3062 if (internal) {
3063 size_t write_header_sz = sizeof(struct fuse_in_header) +
3064 sizeof(struct fuse_write_in);
3065 size_t new_size = ROUND_UP(size + write_header_sz, pagesize);
3066
3067 char *buf = aligned_alloc(pagesize, new_size);
3068 if (buf == NULL)
3069 return NULL;
3070
3071 buf += pagesize - write_header_sz;
3072
3073 return buf;
3074 } else {
3075 return malloc(size);
3076 }
3077}
3078
3079/*
3080 *@param internal true if called from libfuse internal code
3081 */
3082static int _fuse_session_receive_buf(struct fuse_session *se,
3083 struct fuse_buf *buf, struct fuse_chan *ch,
3084 bool internal)
3085{
3086 int err;
3087 ssize_t res;
3088 size_t bufsize;
3089#ifdef HAVE_SPLICE
3090 struct fuse_ll_pipe *llp;
3091 struct fuse_buf tmpbuf;
3092
3093pipe_retry:
3094 bufsize = se->bufsize;
3095
3096 if (se->conn.proto_minor < 14 ||
3097 !(se->conn.want_ext & FUSE_CAP_SPLICE_READ))
3098 goto fallback;
3099
3100 llp = fuse_ll_get_pipe(se);
3101 if (llp == NULL)
3102 goto fallback;
3103
3104 if (llp->size < bufsize) {
3105 if (llp->can_grow) {
3106 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
3107 if (res == -1) {
3108 llp->can_grow = 0;
3109 res = grow_pipe_to_max(llp->pipe[0]);
3110 if (res > 0)
3111 llp->size = res;
3112 goto fallback;
3113 }
3114 llp->size = res;
3115 }
3116 if (llp->size < bufsize)
3117 goto fallback;
3118 }
3119
3120 if (se->io != NULL && se->io->splice_receive != NULL) {
3121 res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
3122 llp->pipe[1], NULL, bufsize, 0,
3123 se->userdata);
3124 } else {
3125 res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
3126 bufsize, 0);
3127 }
3128 err = errno;
3129
3130 if (fuse_session_exited(se))
3131 return 0;
3132
3133 if (res == -1) {
3134 if (err == ENODEV) {
3135 /* Filesystem was unmounted, or connection was aborted
3136 via /sys/fs/fuse/connections */
3138 return 0;
3139 }
3140
3141 /* FUSE_INIT might have increased the required bufsize */
3142 if (err == EINVAL && bufsize < se->bufsize) {
3143 fuse_ll_clear_pipe(se);
3144 goto pipe_retry;
3145 }
3146
3147 if (err != EINTR && err != EAGAIN)
3148 perror("fuse: splice from device");
3149 return -err;
3150 }
3151
3152 if (res < sizeof(struct fuse_in_header)) {
3153 fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
3154 return -EIO;
3155 }
3156
3157 tmpbuf = (struct fuse_buf){
3158 .size = res,
3159 .flags = FUSE_BUF_IS_FD,
3160 .fd = llp->pipe[0],
3161 };
3162
3163 /*
3164 * Don't bother with zero copy for small requests.
3165 * fuse_loop_mt() needs to check for FORGET so this more than
3166 * just an optimization.
3167 */
3168 if (res < sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in) +
3169 pagesize) {
3170 struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
3171 struct fuse_bufvec dst = { .count = 1 };
3172
3173 if (!buf->mem) {
3174 buf->mem = buf_alloc(bufsize, internal);
3175 if (!buf->mem) {
3176 fuse_log(
3177 FUSE_LOG_ERR,
3178 "fuse: failed to allocate read buffer\n");
3179 return -ENOMEM;
3180 }
3181 buf->mem_size = bufsize;
3182 }
3183 buf->size = bufsize;
3184 buf->flags = 0;
3185 dst.buf[0] = *buf;
3186
3187 res = fuse_buf_copy(&dst, &src, 0);
3188 if (res < 0) {
3189 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
3190 strerror(-res));
3191 fuse_ll_clear_pipe(se);
3192 return res;
3193 }
3194 if (res < tmpbuf.size) {
3195 fuse_log(FUSE_LOG_ERR,
3196 "fuse: copy from pipe: short read\n");
3197 fuse_ll_clear_pipe(se);
3198 return -EIO;
3199 }
3200 assert(res == tmpbuf.size);
3201
3202 } else {
3203 /* Don't overwrite buf->mem, as that would cause a leak */
3204 buf->fd = tmpbuf.fd;
3205 buf->flags = tmpbuf.flags;
3206 }
3207 buf->size = tmpbuf.size;
3208
3209 return res;
3210
3211fallback:
3212#endif
3213 bufsize = internal ? buf->mem_size : se->bufsize;
3214 if (!buf->mem) {
3215 bufsize = se->bufsize; /* might have changed */
3216 buf->mem = buf_alloc(bufsize, internal);
3217 if (!buf->mem) {
3218 fuse_log(FUSE_LOG_ERR,
3219 "fuse: failed to allocate read buffer\n");
3220 return -ENOMEM;
3221 }
3222
3223 if (internal)
3224 buf->mem_size = bufsize;
3225 }
3226
3227restart:
3228 if (se->io != NULL) {
3229 /* se->io->read is never NULL if se->io is not NULL as
3230 specified by fuse_session_custom_io()*/
3231 res = se->io->read(ch ? ch->fd : se->fd, buf->mem, bufsize,
3232 se->userdata);
3233 } else {
3234 res = read(ch ? ch->fd : se->fd, buf->mem, bufsize);
3235 }
3236 err = errno;
3237
3238 if (fuse_session_exited(se))
3239 return 0;
3240 if (res == -1) {
3241 if (err == EINVAL && internal && se->bufsize > bufsize) {
3242 /* FUSE_INIT might have increased the required bufsize */
3243 bufsize = se->bufsize;
3244 void *newbuf = buf_alloc(bufsize, internal);
3245 if (!newbuf) {
3246 fuse_log(
3247 FUSE_LOG_ERR,
3248 "fuse: failed to (re)allocate read buffer\n");
3249 return -ENOMEM;
3250 }
3251 fuse_buf_free(buf);
3252 buf->mem = newbuf;
3253 buf->mem_size = bufsize;
3254 goto restart;
3255 }
3256
3257 /* ENOENT means the operation was interrupted, it's safe
3258 to restart */
3259 if (err == ENOENT)
3260 goto restart;
3261
3262 if (err == ENODEV) {
3263 /* Filesystem was unmounted, or connection was aborted
3264 via /sys/fs/fuse/connections */
3266 return 0;
3267 }
3268 /* Errors occurring during normal operation: EINTR (read
3269 interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
3270 umounted) */
3271 if (err != EINTR && err != EAGAIN)
3272 perror("fuse: reading device");
3273 return -err;
3274 }
3275 if ((size_t)res < sizeof(struct fuse_in_header)) {
3276 fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
3277 return -EIO;
3278 }
3279
3280 buf->size = res;
3281
3282 return res;
3283}
3284
3285int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
3286{
3287 return _fuse_session_receive_buf(se, buf, NULL, false);
3288}
3289
3290/* libfuse internal handler */
3291int fuse_session_receive_buf_internal(struct fuse_session *se,
3292 struct fuse_buf *buf,
3293 struct fuse_chan *ch)
3294{
3295 /*
3296 * if run internally thread buffers are from libfuse - we can
3297 * reallocate them
3298 */
3299 if (unlikely(!se->got_init) && !se->buf_reallocable)
3300 se->buf_reallocable = true;
3301
3302 return _fuse_session_receive_buf(se, buf, ch, true);
3303}
3304
3305struct fuse_session *
3306fuse_session_new_versioned(struct fuse_args *args,
3307 const struct fuse_lowlevel_ops *op, size_t op_size,
3308 struct libfuse_version *version, void *userdata)
3309{
3310 int err;
3311 struct fuse_session *se;
3312 struct mount_opts *mo;
3313
3314 if (sizeof(struct fuse_lowlevel_ops) < op_size) {
3315 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
3316 op_size = sizeof(struct fuse_lowlevel_ops);
3317 }
3318
3319 if (args->argc == 0) {
3320 fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
3321 return NULL;
3322 }
3323
3324 se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
3325 if (se == NULL) {
3326 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
3327 goto out1;
3328 }
3329 se->fd = -1;
3330 se->conn.max_write = FUSE_DEFAULT_MAX_PAGES_LIMIT * getpagesize();
3331 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
3332 se->conn.max_readahead = UINT_MAX;
3333
3334 /* Parse options */
3335 if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
3336 goto out2;
3337 if(se->deny_others) {
3338 /* Allowing access only by root is done by instructing
3339 * kernel to allow access by everyone, and then restricting
3340 * access to root and mountpoint owner in libfuse.
3341 */
3342 // We may be adding the option a second time, but
3343 // that doesn't hurt.
3344 if(fuse_opt_add_arg(args, "-oallow_other") == -1)
3345 goto out2;
3346 }
3347 mo = parse_mount_opts(args);
3348 if (mo == NULL)
3349 goto out3;
3350
3351 if(args->argc == 1 &&
3352 args->argv[0][0] == '-') {
3353 fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
3354 "will be ignored\n");
3355 } else if (args->argc != 1) {
3356 int i;
3357 fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
3358 for(i = 1; i < args->argc-1; i++)
3359 fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
3360 fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
3361 goto out4;
3362 }
3363
3364 if (se->debug)
3365 fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
3366
3367 list_init_req(&se->list);
3368 list_init_req(&se->interrupts);
3369 list_init_nreq(&se->notify_list);
3370 se->notify_ctr = 1;
3371 pthread_mutex_init(&se->lock, NULL);
3372
3373 err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
3374 if (err) {
3375 fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
3376 strerror(err));
3377 goto out5;
3378 }
3379
3380 memcpy(&se->op, op, op_size);
3381 se->owner = getuid();
3382 se->userdata = userdata;
3383
3384 se->mo = mo;
3385
3386 /* Fuse server application should pass the version it was compiled
3387 * against and pass it. If a libfuse version accidentally introduces an
3388 * ABI incompatibility, it might be possible to 'fix' that at run time,
3389 * by checking the version numbers.
3390 */
3391 se->version = *version;
3392
3393 return se;
3394
3395out5:
3396 pthread_mutex_destroy(&se->lock);
3397out4:
3398 fuse_opt_free_args(args);
3399out3:
3400 if (mo != NULL)
3401 destroy_mount_opts(mo);
3402out2:
3403 free(se);
3404out1:
3405 return NULL;
3406}
3407
3408struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3409 const struct fuse_lowlevel_ops *op,
3410 size_t op_size, void *userdata);
3411struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3412 const struct fuse_lowlevel_ops *op,
3413 size_t op_size,
3414 void *userdata)
3415{
3416 /* unknown version */
3417 struct libfuse_version version = { 0 };
3418
3419 return fuse_session_new_versioned(args, op, op_size, &version,
3420 userdata);
3421}
3422
3423FUSE_SYMVER("fuse_session_custom_io_317", "fuse_session_custom_io@@FUSE_3.17")
3424int fuse_session_custom_io_317(struct fuse_session *se,
3425 const struct fuse_custom_io *io, size_t op_size, int fd)
3426{
3427 if (sizeof(struct fuse_custom_io) < op_size) {
3428 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
3429 op_size = sizeof(struct fuse_custom_io);
3430 }
3431
3432 if (fd < 0) {
3433 fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
3434 "fuse_session_custom_io()\n", fd);
3435 return -EBADF;
3436 }
3437 if (io == NULL) {
3438 fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
3439 "fuse_session_custom_io()\n");
3440 return -EINVAL;
3441 } else if (io->read == NULL || io->writev == NULL) {
3442 /* If the user provides their own file descriptor, we can't
3443 guarantee that the default behavior of the io operations made
3444 in libfuse will function properly. Therefore, we enforce the
3445 user to implement these io operations when using custom io. */
3446 fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
3447 "implement both io->read() and io->writev\n");
3448 return -EINVAL;
3449 }
3450
3451 se->io = calloc(1, sizeof(struct fuse_custom_io));
3452 if (se->io == NULL) {
3453 fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
3454 "Error: %s\n", strerror(errno));
3455 return -errno;
3456 }
3457
3458 se->fd = fd;
3459 memcpy(se->io, io, op_size);
3460 return 0;
3461}
3462
3463int fuse_session_custom_io_30(struct fuse_session *se,
3464 const struct fuse_custom_io *io, int fd);
3465FUSE_SYMVER("fuse_session_custom_io_30", "fuse_session_custom_io@FUSE_3.0")
3466int fuse_session_custom_io_30(struct fuse_session *se,
3467 const struct fuse_custom_io *io, int fd)
3468{
3469 return fuse_session_custom_io_317(se, io,
3470 offsetof(struct fuse_custom_io, clone_fd), fd);
3471}
3472
3473int fuse_session_mount(struct fuse_session *se, const char *_mountpoint)
3474{
3475 int fd;
3476 char *mountpoint;
3477
3478 if (_mountpoint == NULL) {
3479 fuse_log(FUSE_LOG_ERR, "Invalid null-ptr mountpoint!\n");
3480 return -1;
3481 }
3482
3483 mountpoint = strdup(_mountpoint);
3484 if (mountpoint == NULL) {
3485 fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for mountpoint. Error: %s\n",
3486 strerror(errno));
3487 return -1;
3488 }
3489
3490 /*
3491 * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
3492 * would ensue.
3493 */
3494 do {
3495 fd = open("/dev/null", O_RDWR);
3496 if (fd > 2)
3497 close(fd);
3498 } while (fd >= 0 && fd <= 2);
3499
3500 /*
3501 * To allow FUSE daemons to run without privileges, the caller may open
3502 * /dev/fuse before launching the file system and pass on the file
3503 * descriptor by specifying /dev/fd/N as the mount point. Note that the
3504 * parent process takes care of performing the mount in this case.
3505 */
3506 fd = fuse_mnt_parse_fuse_fd(mountpoint);
3507 if (fd != -1) {
3508 if (fcntl(fd, F_GETFD) == -1) {
3509 fuse_log(FUSE_LOG_ERR,
3510 "fuse: Invalid file descriptor /dev/fd/%u\n",
3511 fd);
3512 goto error_out;
3513 }
3514 se->fd = fd;
3515 return 0;
3516 }
3517
3518 /* Open channel */
3519 fd = fuse_kern_mount(mountpoint, se->mo);
3520 if (fd == -1)
3521 goto error_out;
3522 se->fd = fd;
3523
3524 /* Save mountpoint */
3525 se->mountpoint = mountpoint;
3526
3527 return 0;
3528
3529error_out:
3530 free(mountpoint);
3531 return -1;
3532}
3533
3534int fuse_session_fd(struct fuse_session *se)
3535{
3536 return se->fd;
3537}
3538
3539void fuse_session_unmount(struct fuse_session *se)
3540{
3541 if (se->mountpoint != NULL) {
3542 char *mountpoint = atomic_exchange(&se->mountpoint, NULL);
3543
3544 fuse_kern_unmount(mountpoint, se->fd);
3545 se->fd = -1;
3546 free(mountpoint);
3547 }
3548}
3549
3550#ifdef linux
3551int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3552{
3553 char *buf;
3554 size_t bufsize = 1024;
3555 char path[128];
3556 int ret;
3557 int fd;
3558 unsigned long pid = req->ctx.pid;
3559 char *s;
3560
3561 sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3562
3563retry:
3564 buf = malloc(bufsize);
3565 if (buf == NULL)
3566 return -ENOMEM;
3567
3568 ret = -EIO;
3569 fd = open(path, O_RDONLY);
3570 if (fd == -1)
3571 goto out_free;
3572
3573 ret = read(fd, buf, bufsize);
3574 close(fd);
3575 if (ret < 0) {
3576 ret = -EIO;
3577 goto out_free;
3578 }
3579
3580 if ((size_t)ret == bufsize) {
3581 free(buf);
3582 bufsize *= 4;
3583 goto retry;
3584 }
3585
3586 buf[ret] = '\0';
3587 ret = -EIO;
3588 s = strstr(buf, "\nGroups:");
3589 if (s == NULL)
3590 goto out_free;
3591
3592 s += 8;
3593 ret = 0;
3594 while (1) {
3595 char *end;
3596 unsigned long val = strtoul(s, &end, 0);
3597 if (end == s)
3598 break;
3599
3600 s = end;
3601 if (ret < size)
3602 list[ret] = val;
3603 ret++;
3604 }
3605
3606out_free:
3607 free(buf);
3608 return ret;
3609}
3610#else /* linux */
3611/*
3612 * This is currently not implemented on other than Linux...
3613 */
3614int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3615{
3616 (void) req; (void) size; (void) list;
3617 return -ENOSYS;
3618}
3619#endif
3620
3621/* Prevent spurious data race warning - we don't care
3622 * about races for this flag */
3623__attribute__((no_sanitize_thread))
3624void fuse_session_exit(struct fuse_session *se)
3625{
3626 se->exited = 1;
3627}
3628
3629__attribute__((no_sanitize_thread))
3630void fuse_session_reset(struct fuse_session *se)
3631{
3632 se->exited = 0;
3633 se->error = 0;
3634}
3635
3636__attribute__((no_sanitize_thread))
3637int fuse_session_exited(struct fuse_session *se)
3638{
3639 return se->exited;
3640}
#define FUSE_CAP_IOCTL_DIR
#define FUSE_CAP_DONT_MASK
void fuse_unset_feature_flag(struct fuse_conn_info *conn, uint64_t flag)
int fuse_convert_to_conn_want_ext(struct fuse_conn_info *conn)
bool fuse_set_feature_flag(struct fuse_conn_info *conn, uint64_t flag)
#define FUSE_CAP_HANDLE_KILLPRIV
#define FUSE_CAP_AUTO_INVAL_DATA
#define FUSE_CAP_HANDLE_KILLPRIV_V2
#define FUSE_CAP_SPLICE_READ
#define FUSE_CAP_PARALLEL_DIROPS
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition buffer.c:22
#define FUSE_CAP_WRITEBACK_CACHE
#define FUSE_CAP_EXPIRE_ONLY
#define FUSE_CAP_ATOMIC_O_TRUNC
#define FUSE_CAP_ASYNC_READ
#define FUSE_CAP_SPLICE_WRITE
#define FUSE_CAP_CACHE_SYMLINKS
#define FUSE_CAP_POSIX_ACL
@ FUSE_BUF_IS_FD
#define FUSE_CAP_EXPORT_SUPPORT
#define FUSE_CAP_POSIX_LOCKS
#define FUSE_CAP_EXPLICIT_INVAL_DATA
#define FUSE_CAP_READDIRPLUS_AUTO
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition buffer.c:284
#define FUSE_CAP_NO_OPENDIR_SUPPORT
#define FUSE_CAP_ASYNC_DIO
bool fuse_get_feature_flag(struct fuse_conn_info *conn, uint64_t flag)
#define FUSE_CAP_PASSTHROUGH
#define FUSE_CAP_DIRECT_IO_ALLOW_MMAP
#define FUSE_CAP_NO_OPEN_SUPPORT
#define FUSE_CAP_READDIRPLUS
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
fuse_buf_copy_flags
@ FUSE_BUF_SPLICE_NONBLOCK
@ FUSE_BUF_FORCE_SPLICE
@ FUSE_BUF_NO_SPLICE
@ FUSE_BUF_SPLICE_MOVE
#define FUSE_CAP_SETXATTR_EXT
#define FUSE_CAP_SPLICE_MOVE
#define FUSE_CAP_NO_EXPORT_SUPPORT
#define FUSE_CAP_FLOCK_LOCKS
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition fuse_log.c:77
void fuse_session_destroy(struct fuse_session *se)
fuse_notify_entry_flags
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
void * fuse_req_userdata(fuse_req_t req)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
int fuse_session_exited(struct fuse_session *se)
int fuse_session_fd(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_session_unmount(struct fuse_session *se)
void fuse_reply_none(fuse_req_t req)
int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
void fuse_session_reset(struct fuse_session *se)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
void fuse_lowlevel_version(void)
uint64_t fuse_ino_t
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_passthrough_open(fuse_req_t req, int fd)
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition fuse_opt.c:55
void fuse_opt_free_args(struct fuse_args *args)
Definition fuse_opt.c:34
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition fuse_opt.c:398
#define FUSE_OPT_END
Definition fuse_opt.h:104
char ** argv
Definition fuse_opt.h:114
enum fuse_buf_flags flags
size_t mem_size
void * mem
size_t size
struct fuse_buf buf[1]
uint64_t capable_ext
uint64_t want_ext
double entry_timeout
fuse_ino_t ino
uint64_t generation
double attr_timeout
struct stat attr
uint64_t lock_owner
uint32_t writepage
Definition fuse_common.h:66
uint32_t poll_events
uint32_t cache_readdir
Definition fuse_common.h:95
uint32_t nonseekable
Definition fuse_common.h:84
int32_t backing_id
uint32_t parallel_direct_writes
uint32_t noflush
Definition fuse_common.h:99
uint32_t flush
Definition fuse_common.h:80
uint32_t direct_io
Definition fuse_common.h:69
uint32_t keep_cache
Definition fuse_common.h:75