libfuse
fuse_lowlevel.c
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4
5 Implementation of (most of) the low-level FUSE API. The session loop
6 functions are implemented in separate files.
7
8 This program can be distributed under the terms of the GNU LGPLv2.
9 See the file COPYING.LIB
10*/
11
12#define _GNU_SOURCE
13
14#include "fuse_config.h"
15#include "fuse_i.h"
16#include "fuse_kernel.h"
17#include "fuse_opt.h"
18#include "fuse_misc.h"
19#include "mount_util.h"
20#include "util.h"
21
22#include <stdint.h>
23#include <stdbool.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <stddef.h>
27#include <stdalign.h>
28#include <string.h>
29#include <unistd.h>
30#include <limits.h>
31#include <errno.h>
32#include <assert.h>
33#include <sys/file.h>
34#include <sys/ioctl.h>
35
36#ifndef F_LINUX_SPECIFIC_BASE
37#define F_LINUX_SPECIFIC_BASE 1024
38#endif
39#ifndef F_SETPIPE_SZ
40#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
41#endif
42
43
44#define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
45#define OFFSET_MAX 0x7fffffffffffffffLL
46
47#define container_of(ptr, type, member) ({ \
48 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
49 (type *)( (char *)__mptr - offsetof(type,member) );})
50
51struct fuse_pollhandle {
52 uint64_t kh;
53 struct fuse_session *se;
54};
55
56static size_t pagesize;
57
58static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
59{
60 pagesize = getpagesize();
61}
62
63static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
64{
65 attr->ino = stbuf->st_ino;
66 attr->mode = stbuf->st_mode;
67 attr->nlink = stbuf->st_nlink;
68 attr->uid = stbuf->st_uid;
69 attr->gid = stbuf->st_gid;
70 attr->rdev = stbuf->st_rdev;
71 attr->size = stbuf->st_size;
72 attr->blksize = stbuf->st_blksize;
73 attr->blocks = stbuf->st_blocks;
74 attr->atime = stbuf->st_atime;
75 attr->mtime = stbuf->st_mtime;
76 attr->ctime = stbuf->st_ctime;
77 attr->atimensec = ST_ATIM_NSEC(stbuf);
78 attr->mtimensec = ST_MTIM_NSEC(stbuf);
79 attr->ctimensec = ST_CTIM_NSEC(stbuf);
80}
81
82static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
83{
84 stbuf->st_mode = attr->mode;
85 stbuf->st_uid = attr->uid;
86 stbuf->st_gid = attr->gid;
87 stbuf->st_size = attr->size;
88 stbuf->st_atime = attr->atime;
89 stbuf->st_mtime = attr->mtime;
90 stbuf->st_ctime = attr->ctime;
91 ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
92 ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
93 ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
94}
95
96static size_t iov_length(const struct iovec *iov, size_t count)
97{
98 size_t seg;
99 size_t ret = 0;
100
101 for (seg = 0; seg < count; seg++)
102 ret += iov[seg].iov_len;
103 return ret;
104}
105
106static void list_init_req(struct fuse_req *req)
107{
108 req->next = req;
109 req->prev = req;
110}
111
112static void list_del_req(struct fuse_req *req)
113{
114 struct fuse_req *prev = req->prev;
115 struct fuse_req *next = req->next;
116 prev->next = next;
117 next->prev = prev;
118}
119
120static void list_add_req(struct fuse_req *req, struct fuse_req *next)
121{
122 struct fuse_req *prev = next->prev;
123 req->next = next;
124 req->prev = prev;
125 prev->next = req;
126 next->prev = req;
127}
128
129static void destroy_req(fuse_req_t req)
130{
131 assert(req->ch == NULL);
132 pthread_mutex_destroy(&req->lock);
133 free(req);
134}
135
136void fuse_free_req(fuse_req_t req)
137{
138 int ctr;
139 struct fuse_session *se = req->se;
140
141 if (se->conn.no_interrupt) {
142 ctr = --req->ref_cnt;
143 fuse_chan_put(req->ch);
144 req->ch = NULL;
145 } else {
146 pthread_mutex_lock(&se->lock);
147 req->u.ni.func = NULL;
148 req->u.ni.data = NULL;
149 list_del_req(req);
150 ctr = --req->ref_cnt;
151 fuse_chan_put(req->ch);
152 req->ch = NULL;
153 pthread_mutex_unlock(&se->lock);
154 }
155 if (!ctr)
156 destroy_req(req);
157}
158
159static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
160{
161 struct fuse_req *req;
162
163 req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
164 if (req == NULL) {
165 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
166 } else {
167 req->se = se;
168 req->ref_cnt = 1;
169 list_init_req(req);
170 pthread_mutex_init(&req->lock, NULL);
171 }
172
173 return req;
174}
175
176/* Send data. If *ch* is NULL, send via session master fd */
177static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
178 struct iovec *iov, int count)
179{
180 struct fuse_out_header *out = iov[0].iov_base;
181
182 assert(se != NULL);
183 out->len = iov_length(iov, count);
184 if (se->debug) {
185 if (out->unique == 0) {
186 fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
187 out->error, out->len);
188 } else if (out->error) {
189 fuse_log(FUSE_LOG_DEBUG,
190 " unique: %llu, error: %i (%s), outsize: %i\n",
191 (unsigned long long) out->unique, out->error,
192 strerror(-out->error), out->len);
193 } else {
194 fuse_log(FUSE_LOG_DEBUG,
195 " unique: %llu, success, outsize: %i\n",
196 (unsigned long long) out->unique, out->len);
197 }
198 }
199
200 ssize_t res;
201 if (se->io != NULL)
202 /* se->io->writev is never NULL if se->io is not NULL as
203 specified by fuse_session_custom_io()*/
204 res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
205 se->userdata);
206 else
207 res = writev(ch ? ch->fd : se->fd, iov, count);
208
209 int err = errno;
210
211 if (res == -1) {
212 /* ENOENT means the operation was interrupted */
213 if (!fuse_session_exited(se) && err != ENOENT)
214 perror("fuse: writing device");
215 return -err;
216 }
217
218 return 0;
219}
220
221
222int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
223 int count)
224{
225 struct fuse_out_header out;
226
227#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
228 const char *str = strerrordesc_np(error * -1);
229 if ((str == NULL && error != 0) || error > 0) {
230#else
231 if (error <= -1000 || error > 0) {
232#endif
233 fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
234 error = -ERANGE;
235 }
236
237 out.unique = req->unique;
238 out.error = error;
239
240 iov[0].iov_base = &out;
241 iov[0].iov_len = sizeof(struct fuse_out_header);
242
243 return fuse_send_msg(req->se, req->ch, iov, count);
244}
245
246static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
247 int count)
248{
249 int res;
250
251 res = fuse_send_reply_iov_nofree(req, error, iov, count);
252 fuse_free_req(req);
253 return res;
254}
255
256static int send_reply(fuse_req_t req, int error, const void *arg,
257 size_t argsize)
258{
259 struct iovec iov[2];
260 int count = 1;
261 if (argsize) {
262 iov[1].iov_base = (void *) arg;
263 iov[1].iov_len = argsize;
264 count++;
265 }
266 return send_reply_iov(req, error, iov, count);
267}
268
269int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
270{
271 int res;
272 struct iovec *padded_iov;
273
274 padded_iov = malloc((count + 1) * sizeof(struct iovec));
275 if (padded_iov == NULL)
276 return fuse_reply_err(req, ENOMEM);
277
278 memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
279 count++;
280
281 res = send_reply_iov(req, 0, padded_iov, count);
282 free(padded_iov);
283
284 return res;
285}
286
287
288/* `buf` is allowed to be empty so that the proper size may be
289 allocated by the caller */
290size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
291 const char *name, const struct stat *stbuf, off_t off)
292{
293 (void)req;
294 size_t namelen;
295 size_t entlen;
296 size_t entlen_padded;
297 struct fuse_dirent *dirent;
298
299 namelen = strlen(name);
300 entlen = FUSE_NAME_OFFSET + namelen;
301 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
302
303 if ((buf == NULL) || (entlen_padded > bufsize))
304 return entlen_padded;
305
306 dirent = (struct fuse_dirent*) buf;
307 dirent->ino = stbuf->st_ino;
308 dirent->off = off;
309 dirent->namelen = namelen;
310 dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
311 memcpy(dirent->name, name, namelen);
312 memset(dirent->name + namelen, 0, entlen_padded - entlen);
313
314 return entlen_padded;
315}
316
317static void convert_statfs(const struct statvfs *stbuf,
318 struct fuse_kstatfs *kstatfs)
319{
320 kstatfs->bsize = stbuf->f_bsize;
321 kstatfs->frsize = stbuf->f_frsize;
322 kstatfs->blocks = stbuf->f_blocks;
323 kstatfs->bfree = stbuf->f_bfree;
324 kstatfs->bavail = stbuf->f_bavail;
325 kstatfs->files = stbuf->f_files;
326 kstatfs->ffree = stbuf->f_ffree;
327 kstatfs->namelen = stbuf->f_namemax;
328}
329
330static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
331{
332 return send_reply(req, 0, arg, argsize);
333}
334
335int fuse_reply_err(fuse_req_t req, int err)
336{
337 return send_reply(req, -err, NULL, 0);
338}
339
341{
342 fuse_free_req(req);
343}
344
345static unsigned long calc_timeout_sec(double t)
346{
347 if (t > (double) ULONG_MAX)
348 return ULONG_MAX;
349 else if (t < 0.0)
350 return 0;
351 else
352 return (unsigned long) t;
353}
354
355static unsigned int calc_timeout_nsec(double t)
356{
357 double f = t - (double) calc_timeout_sec(t);
358 if (f < 0.0)
359 return 0;
360 else if (f >= 0.999999999)
361 return 999999999;
362 else
363 return (unsigned int) (f * 1.0e9);
364}
365
366static void fill_entry(struct fuse_entry_out *arg,
367 const struct fuse_entry_param *e)
368{
369 arg->nodeid = e->ino;
370 arg->generation = e->generation;
371 arg->entry_valid = calc_timeout_sec(e->entry_timeout);
372 arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
373 arg->attr_valid = calc_timeout_sec(e->attr_timeout);
374 arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
375 convert_stat(&e->attr, &arg->attr);
376}
377
378/* `buf` is allowed to be empty so that the proper size may be
379 allocated by the caller */
380size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
381 const char *name,
382 const struct fuse_entry_param *e, off_t off)
383{
384 (void)req;
385 size_t namelen;
386 size_t entlen;
387 size_t entlen_padded;
388
389 namelen = strlen(name);
390 entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
391 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
392 if ((buf == NULL) || (entlen_padded > bufsize))
393 return entlen_padded;
394
395 struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
396 memset(&dp->entry_out, 0, sizeof(dp->entry_out));
397 fill_entry(&dp->entry_out, e);
398
399 struct fuse_dirent *dirent = &dp->dirent;
400 dirent->ino = e->attr.st_ino;
401 dirent->off = off;
402 dirent->namelen = namelen;
403 dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
404 memcpy(dirent->name, name, namelen);
405 memset(dirent->name + namelen, 0, entlen_padded - entlen);
406
407 return entlen_padded;
408}
409
410static void fill_open(struct fuse_open_out *arg,
411 const struct fuse_file_info *f)
412{
413 arg->fh = f->fh;
414 if (f->backing_id > 0) {
415 arg->backing_id = f->backing_id;
416 arg->open_flags |= FOPEN_PASSTHROUGH;
417 }
418 if (f->direct_io)
419 arg->open_flags |= FOPEN_DIRECT_IO;
420 if (f->keep_cache)
421 arg->open_flags |= FOPEN_KEEP_CACHE;
422 if (f->cache_readdir)
423 arg->open_flags |= FOPEN_CACHE_DIR;
424 if (f->nonseekable)
425 arg->open_flags |= FOPEN_NONSEEKABLE;
426 if (f->noflush)
427 arg->open_flags |= FOPEN_NOFLUSH;
429 arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES;
430}
431
433{
434 struct fuse_entry_out arg;
435 size_t size = req->se->conn.proto_minor < 9 ?
436 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
437
438 /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
439 negative entry */
440 if (!e->ino && req->se->conn.proto_minor < 4)
441 return fuse_reply_err(req, ENOENT);
442
443 memset(&arg, 0, sizeof(arg));
444 fill_entry(&arg, e);
445 return send_reply_ok(req, &arg, size);
446}
447
449 const struct fuse_file_info *f)
450{
451 alignas(uint64_t) char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
452 size_t entrysize = req->se->conn.proto_minor < 9 ?
453 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
454 struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
455 struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
456
457 memset(buf, 0, sizeof(buf));
458 fill_entry(earg, e);
459 fill_open(oarg, f);
460 return send_reply_ok(req, buf,
461 entrysize + sizeof(struct fuse_open_out));
462}
463
464int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
465 double attr_timeout)
466{
467 struct fuse_attr_out arg;
468 size_t size = req->se->conn.proto_minor < 9 ?
469 FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
470
471 memset(&arg, 0, sizeof(arg));
472 arg.attr_valid = calc_timeout_sec(attr_timeout);
473 arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
474 convert_stat(attr, &arg.attr);
475
476 return send_reply_ok(req, &arg, size);
477}
478
479int fuse_reply_readlink(fuse_req_t req, const char *linkname)
480{
481 return send_reply_ok(req, linkname, strlen(linkname));
482}
483
485{
486 struct fuse_backing_map map = { .fd = fd };
487 int ret;
488
489 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_OPEN, &map);
490 if (ret <= 0) {
491 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_open: %s\n", strerror(errno));
492 return 0;
493 }
494
495 return ret;
496}
497
498int fuse_passthrough_close(fuse_req_t req, int backing_id)
499{
500 int ret;
501
502 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_CLOSE, &backing_id);
503 if (ret < 0)
504 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_close: %s\n", strerror(errno));
505
506 return ret;
507}
508
510{
511 struct fuse_open_out arg;
512
513 memset(&arg, 0, sizeof(arg));
514 fill_open(&arg, f);
515 return send_reply_ok(req, &arg, sizeof(arg));
516}
517
518int fuse_reply_write(fuse_req_t req, size_t count)
519{
520 struct fuse_write_out arg;
521
522 memset(&arg, 0, sizeof(arg));
523 arg.size = count;
524
525 return send_reply_ok(req, &arg, sizeof(arg));
526}
527
528int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
529{
530 return send_reply_ok(req, buf, size);
531}
532
533static int fuse_send_data_iov_fallback(struct fuse_session *se,
534 struct fuse_chan *ch,
535 struct iovec *iov, int iov_count,
536 struct fuse_bufvec *buf,
537 size_t len)
538{
539 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
540 void *mbuf;
541 int res;
542
543 /* Optimize common case */
544 if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
545 !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
546 /* FIXME: also avoid memory copy if there are multiple buffers
547 but none of them contain an fd */
548
549 iov[iov_count].iov_base = buf->buf[0].mem;
550 iov[iov_count].iov_len = len;
551 iov_count++;
552 return fuse_send_msg(se, ch, iov, iov_count);
553 }
554
555 res = posix_memalign(&mbuf, pagesize, len);
556 if (res != 0)
557 return res;
558
559 mem_buf.buf[0].mem = mbuf;
560 res = fuse_buf_copy(&mem_buf, buf, 0);
561 if (res < 0) {
562 free(mbuf);
563 return -res;
564 }
565 len = res;
566
567 iov[iov_count].iov_base = mbuf;
568 iov[iov_count].iov_len = len;
569 iov_count++;
570 res = fuse_send_msg(se, ch, iov, iov_count);
571 free(mbuf);
572
573 return res;
574}
575
576struct fuse_ll_pipe {
577 size_t size;
578 int can_grow;
579 int pipe[2];
580};
581
582static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
583{
584 close(llp->pipe[0]);
585 close(llp->pipe[1]);
586 free(llp);
587}
588
589#ifdef HAVE_SPLICE
590#if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
591static int fuse_pipe(int fds[2])
592{
593 int rv = pipe(fds);
594
595 if (rv == -1)
596 return rv;
597
598 if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
599 fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
600 fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
601 fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
602 close(fds[0]);
603 close(fds[1]);
604 rv = -1;
605 }
606 return rv;
607}
608#else
609static int fuse_pipe(int fds[2])
610{
611 return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
612}
613#endif
614
615static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
616{
617 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
618 if (llp == NULL) {
619 int res;
620
621 llp = malloc(sizeof(struct fuse_ll_pipe));
622 if (llp == NULL)
623 return NULL;
624
625 res = fuse_pipe(llp->pipe);
626 if (res == -1) {
627 free(llp);
628 return NULL;
629 }
630
631 /*
632 *the default size is 16 pages on linux
633 */
634 llp->size = pagesize * 16;
635 llp->can_grow = 1;
636
637 pthread_setspecific(se->pipe_key, llp);
638 }
639
640 return llp;
641}
642#endif
643
644static void fuse_ll_clear_pipe(struct fuse_session *se)
645{
646 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
647 if (llp) {
648 pthread_setspecific(se->pipe_key, NULL);
649 fuse_ll_pipe_free(llp);
650 }
651}
652
653#if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
654static int read_back(int fd, char *buf, size_t len)
655{
656 int res;
657
658 res = read(fd, buf, len);
659 if (res == -1) {
660 fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
661 return -EIO;
662 }
663 if (res != len) {
664 fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
665 return -EIO;
666 }
667 return 0;
668}
669
670static int grow_pipe_to_max(int pipefd)
671{
672 int res;
673 long max;
674 long maxfd;
675 char buf[32];
676
677 maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
678 if (maxfd < 0)
679 return -errno;
680
681 res = read(maxfd, buf, sizeof(buf) - 1);
682 if (res < 0) {
683 int saved_errno;
684
685 saved_errno = errno;
686 close(maxfd);
687 return -saved_errno;
688 }
689 close(maxfd);
690 buf[res] = '\0';
691
692 res = libfuse_strtol(buf, &max);
693 if (res)
694 return res;
695 res = fcntl(pipefd, F_SETPIPE_SZ, max);
696 if (res < 0)
697 return -errno;
698 return max;
699}
700
701static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
702 struct iovec *iov, int iov_count,
703 struct fuse_bufvec *buf, unsigned int flags)
704{
705 int res;
706 size_t len = fuse_buf_size(buf);
707 struct fuse_out_header *out = iov[0].iov_base;
708 struct fuse_ll_pipe *llp;
709 int splice_flags;
710 size_t pipesize;
711 size_t total_buf_size;
712 size_t idx;
713 size_t headerlen;
714 struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
715
716 if (se->broken_splice_nonblock)
717 goto fallback;
718
719 if (flags & FUSE_BUF_NO_SPLICE)
720 goto fallback;
721
722 total_buf_size = 0;
723 for (idx = buf->idx; idx < buf->count; idx++) {
724 total_buf_size += buf->buf[idx].size;
725 if (idx == buf->idx)
726 total_buf_size -= buf->off;
727 }
728 if (total_buf_size < 2 * pagesize)
729 goto fallback;
730
731 if (se->conn.proto_minor < 14 ||
732 !(se->conn.want_ext & FUSE_CAP_SPLICE_WRITE))
733 goto fallback;
734
735 llp = fuse_ll_get_pipe(se);
736 if (llp == NULL)
737 goto fallback;
738
739
740 headerlen = iov_length(iov, iov_count);
741
742 out->len = headerlen + len;
743
744 /*
745 * Heuristic for the required pipe size, does not work if the
746 * source contains less than page size fragments
747 */
748 pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
749
750 if (llp->size < pipesize) {
751 if (llp->can_grow) {
752 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
753 if (res == -1) {
754 res = grow_pipe_to_max(llp->pipe[0]);
755 if (res > 0)
756 llp->size = res;
757 llp->can_grow = 0;
758 goto fallback;
759 }
760 llp->size = res;
761 }
762 if (llp->size < pipesize)
763 goto fallback;
764 }
765
766
767 res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
768 if (res == -1)
769 goto fallback;
770
771 if (res != headerlen) {
772 res = -EIO;
773 fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
774 headerlen);
775 goto clear_pipe;
776 }
777
778 pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
779 pipe_buf.buf[0].fd = llp->pipe[1];
780
781 res = fuse_buf_copy(&pipe_buf, buf,
783 if (res < 0) {
784 if (res == -EAGAIN || res == -EINVAL) {
785 /*
786 * Should only get EAGAIN on kernels with
787 * broken SPLICE_F_NONBLOCK support (<=
788 * 2.6.35) where this error or a short read is
789 * returned even if the pipe itself is not
790 * full
791 *
792 * EINVAL might mean that splice can't handle
793 * this combination of input and output.
794 */
795 if (res == -EAGAIN)
796 se->broken_splice_nonblock = 1;
797
798 pthread_setspecific(se->pipe_key, NULL);
799 fuse_ll_pipe_free(llp);
800 goto fallback;
801 }
802 res = -res;
803 goto clear_pipe;
804 }
805
806 if (res != 0 && res < len) {
807 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
808 void *mbuf;
809 size_t now_len = res;
810 /*
811 * For regular files a short count is either
812 * 1) due to EOF, or
813 * 2) because of broken SPLICE_F_NONBLOCK (see above)
814 *
815 * For other inputs it's possible that we overflowed
816 * the pipe because of small buffer fragments.
817 */
818
819 res = posix_memalign(&mbuf, pagesize, len);
820 if (res != 0)
821 goto clear_pipe;
822
823 mem_buf.buf[0].mem = mbuf;
824 mem_buf.off = now_len;
825 res = fuse_buf_copy(&mem_buf, buf, 0);
826 if (res > 0) {
827 char *tmpbuf;
828 size_t extra_len = res;
829 /*
830 * Trickiest case: got more data. Need to get
831 * back the data from the pipe and then fall
832 * back to regular write.
833 */
834 tmpbuf = malloc(headerlen);
835 if (tmpbuf == NULL) {
836 free(mbuf);
837 res = ENOMEM;
838 goto clear_pipe;
839 }
840 res = read_back(llp->pipe[0], tmpbuf, headerlen);
841 free(tmpbuf);
842 if (res != 0) {
843 free(mbuf);
844 goto clear_pipe;
845 }
846 res = read_back(llp->pipe[0], mbuf, now_len);
847 if (res != 0) {
848 free(mbuf);
849 goto clear_pipe;
850 }
851 len = now_len + extra_len;
852 iov[iov_count].iov_base = mbuf;
853 iov[iov_count].iov_len = len;
854 iov_count++;
855 res = fuse_send_msg(se, ch, iov, iov_count);
856 free(mbuf);
857 return res;
858 }
859 free(mbuf);
860 res = now_len;
861 }
862 len = res;
863 out->len = headerlen + len;
864
865 if (se->debug) {
866 fuse_log(FUSE_LOG_DEBUG,
867 " unique: %llu, success, outsize: %i (splice)\n",
868 (unsigned long long) out->unique, out->len);
869 }
870
871 splice_flags = 0;
872 if ((flags & FUSE_BUF_SPLICE_MOVE) &&
873 (se->conn.want_ext & FUSE_CAP_SPLICE_MOVE))
874 splice_flags |= SPLICE_F_MOVE;
875
876 if (se->io != NULL && se->io->splice_send != NULL) {
877 res = se->io->splice_send(llp->pipe[0], NULL,
878 ch ? ch->fd : se->fd, NULL, out->len,
879 splice_flags, se->userdata);
880 } else {
881 res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
882 out->len, splice_flags);
883 }
884 if (res == -1) {
885 res = -errno;
886 perror("fuse: splice from pipe");
887 goto clear_pipe;
888 }
889 if (res != out->len) {
890 res = -EIO;
891 fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
892 res, out->len);
893 goto clear_pipe;
894 }
895 return 0;
896
897clear_pipe:
898 fuse_ll_clear_pipe(se);
899 return res;
900
901fallback:
902 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
903}
904#else
905static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
906 struct iovec *iov, int iov_count,
907 struct fuse_bufvec *buf, unsigned int flags)
908{
909 size_t len = fuse_buf_size(buf);
910 (void) flags;
911
912 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
913}
914#endif
915
917 enum fuse_buf_copy_flags flags)
918{
919 struct iovec iov[2];
920 struct fuse_out_header out;
921 int res;
922
923 iov[0].iov_base = &out;
924 iov[0].iov_len = sizeof(struct fuse_out_header);
925
926 out.unique = req->unique;
927 out.error = 0;
928
929 res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
930 if (res <= 0) {
931 fuse_free_req(req);
932 return res;
933 } else {
934 return fuse_reply_err(req, res);
935 }
936}
937
938int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
939{
940 struct fuse_statfs_out arg;
941 size_t size = req->se->conn.proto_minor < 4 ?
942 FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
943
944 memset(&arg, 0, sizeof(arg));
945 convert_statfs(stbuf, &arg.st);
946
947 return send_reply_ok(req, &arg, size);
948}
949
950int fuse_reply_xattr(fuse_req_t req, size_t count)
951{
952 struct fuse_getxattr_out arg;
953
954 memset(&arg, 0, sizeof(arg));
955 arg.size = count;
956
957 return send_reply_ok(req, &arg, sizeof(arg));
958}
959
960int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
961{
962 struct fuse_lk_out arg;
963
964 memset(&arg, 0, sizeof(arg));
965 arg.lk.type = lock->l_type;
966 if (lock->l_type != F_UNLCK) {
967 arg.lk.start = lock->l_start;
968 if (lock->l_len == 0)
969 arg.lk.end = OFFSET_MAX;
970 else
971 arg.lk.end = lock->l_start + lock->l_len - 1;
972 }
973 arg.lk.pid = lock->l_pid;
974 return send_reply_ok(req, &arg, sizeof(arg));
975}
976
977int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
978{
979 struct fuse_bmap_out arg;
980
981 memset(&arg, 0, sizeof(arg));
982 arg.block = idx;
983
984 return send_reply_ok(req, &arg, sizeof(arg));
985}
986
987static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
988 size_t count)
989{
990 struct fuse_ioctl_iovec *fiov;
991 size_t i;
992
993 fiov = malloc(sizeof(fiov[0]) * count);
994 if (!fiov)
995 return NULL;
996
997 for (i = 0; i < count; i++) {
998 fiov[i].base = (uintptr_t) iov[i].iov_base;
999 fiov[i].len = iov[i].iov_len;
1000 }
1001
1002 return fiov;
1003}
1004
1006 const struct iovec *in_iov, size_t in_count,
1007 const struct iovec *out_iov, size_t out_count)
1008{
1009 struct fuse_ioctl_out arg;
1010 struct fuse_ioctl_iovec *in_fiov = NULL;
1011 struct fuse_ioctl_iovec *out_fiov = NULL;
1012 struct iovec iov[4];
1013 size_t count = 1;
1014 int res;
1015
1016 memset(&arg, 0, sizeof(arg));
1017 arg.flags |= FUSE_IOCTL_RETRY;
1018 arg.in_iovs = in_count;
1019 arg.out_iovs = out_count;
1020 iov[count].iov_base = &arg;
1021 iov[count].iov_len = sizeof(arg);
1022 count++;
1023
1024 if (req->se->conn.proto_minor < 16) {
1025 if (in_count) {
1026 iov[count].iov_base = (void *)in_iov;
1027 iov[count].iov_len = sizeof(in_iov[0]) * in_count;
1028 count++;
1029 }
1030
1031 if (out_count) {
1032 iov[count].iov_base = (void *)out_iov;
1033 iov[count].iov_len = sizeof(out_iov[0]) * out_count;
1034 count++;
1035 }
1036 } else {
1037 /* Can't handle non-compat 64bit ioctls on 32bit */
1038 if (sizeof(void *) == 4 && req->ioctl_64bit) {
1039 res = fuse_reply_err(req, EINVAL);
1040 goto out;
1041 }
1042
1043 if (in_count) {
1044 in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
1045 if (!in_fiov)
1046 goto enomem;
1047
1048 iov[count].iov_base = (void *)in_fiov;
1049 iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1050 count++;
1051 }
1052 if (out_count) {
1053 out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1054 if (!out_fiov)
1055 goto enomem;
1056
1057 iov[count].iov_base = (void *)out_fiov;
1058 iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1059 count++;
1060 }
1061 }
1062
1063 res = send_reply_iov(req, 0, iov, count);
1064out:
1065 free(in_fiov);
1066 free(out_fiov);
1067
1068 return res;
1069
1070enomem:
1071 res = fuse_reply_err(req, ENOMEM);
1072 goto out;
1073}
1074
1075int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1076{
1077 struct fuse_ioctl_out arg;
1078 struct iovec iov[3];
1079 size_t count = 1;
1080
1081 memset(&arg, 0, sizeof(arg));
1082 arg.result = result;
1083 iov[count].iov_base = &arg;
1084 iov[count].iov_len = sizeof(arg);
1085 count++;
1086
1087 if (size) {
1088 iov[count].iov_base = (char *) buf;
1089 iov[count].iov_len = size;
1090 count++;
1091 }
1092
1093 return send_reply_iov(req, 0, iov, count);
1094}
1095
1096int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1097 int count)
1098{
1099 struct iovec *padded_iov;
1100 struct fuse_ioctl_out arg;
1101 int res;
1102
1103 padded_iov = malloc((count + 2) * sizeof(struct iovec));
1104 if (padded_iov == NULL)
1105 return fuse_reply_err(req, ENOMEM);
1106
1107 memset(&arg, 0, sizeof(arg));
1108 arg.result = result;
1109 padded_iov[1].iov_base = &arg;
1110 padded_iov[1].iov_len = sizeof(arg);
1111
1112 memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1113
1114 res = send_reply_iov(req, 0, padded_iov, count + 2);
1115 free(padded_iov);
1116
1117 return res;
1118}
1119
1120int fuse_reply_poll(fuse_req_t req, unsigned revents)
1121{
1122 struct fuse_poll_out arg;
1123
1124 memset(&arg, 0, sizeof(arg));
1125 arg.revents = revents;
1126
1127 return send_reply_ok(req, &arg, sizeof(arg));
1128}
1129
1130int fuse_reply_lseek(fuse_req_t req, off_t off)
1131{
1132 struct fuse_lseek_out arg;
1133
1134 memset(&arg, 0, sizeof(arg));
1135 arg.offset = off;
1136
1137 return send_reply_ok(req, &arg, sizeof(arg));
1138}
1139
1140static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1141{
1142 char *name = (char *) inarg;
1143
1144 if (req->se->op.lookup)
1145 req->se->op.lookup(req, nodeid, name);
1146 else
1147 fuse_reply_err(req, ENOSYS);
1148}
1149
1150static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1151{
1152 struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1153
1154 if (req->se->op.forget)
1155 req->se->op.forget(req, nodeid, arg->nlookup);
1156 else
1157 fuse_reply_none(req);
1158}
1159
1160static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1161 const void *inarg)
1162{
1163 struct fuse_batch_forget_in *arg = (void *) inarg;
1164 struct fuse_forget_one *param = (void *) PARAM(arg);
1165 unsigned int i;
1166
1167 (void) nodeid;
1168
1169 if (req->se->op.forget_multi) {
1170 req->se->op.forget_multi(req, arg->count,
1171 (struct fuse_forget_data *) param);
1172 } else if (req->se->op.forget) {
1173 for (i = 0; i < arg->count; i++) {
1174 struct fuse_forget_one *forget = &param[i];
1175 struct fuse_req *dummy_req;
1176
1177 dummy_req = fuse_ll_alloc_req(req->se);
1178 if (dummy_req == NULL)
1179 break;
1180
1181 dummy_req->unique = req->unique;
1182 dummy_req->ctx = req->ctx;
1183 dummy_req->ch = NULL;
1184
1185 req->se->op.forget(dummy_req, forget->nodeid,
1186 forget->nlookup);
1187 }
1188 fuse_reply_none(req);
1189 } else {
1190 fuse_reply_none(req);
1191 }
1192}
1193
1194static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1195{
1196 struct fuse_file_info *fip = NULL;
1197 struct fuse_file_info fi;
1198
1199 if (req->se->conn.proto_minor >= 9) {
1200 struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1201
1202 if (arg->getattr_flags & FUSE_GETATTR_FH) {
1203 memset(&fi, 0, sizeof(fi));
1204 fi.fh = arg->fh;
1205 fip = &fi;
1206 }
1207 }
1208
1209 if (req->se->op.getattr)
1210 req->se->op.getattr(req, nodeid, fip);
1211 else
1212 fuse_reply_err(req, ENOSYS);
1213}
1214
1215static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1216{
1217 struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1218
1219 if (req->se->op.setattr) {
1220 struct fuse_file_info *fi = NULL;
1221 struct fuse_file_info fi_store;
1222 struct stat stbuf;
1223 memset(&stbuf, 0, sizeof(stbuf));
1224 convert_attr(arg, &stbuf);
1225 if (arg->valid & FATTR_FH) {
1226 arg->valid &= ~FATTR_FH;
1227 memset(&fi_store, 0, sizeof(fi_store));
1228 fi = &fi_store;
1229 fi->fh = arg->fh;
1230 }
1231 arg->valid &=
1232 FUSE_SET_ATTR_MODE |
1233 FUSE_SET_ATTR_UID |
1234 FUSE_SET_ATTR_GID |
1235 FUSE_SET_ATTR_SIZE |
1236 FUSE_SET_ATTR_ATIME |
1237 FUSE_SET_ATTR_MTIME |
1238 FUSE_SET_ATTR_KILL_SUID |
1239 FUSE_SET_ATTR_KILL_SGID |
1240 FUSE_SET_ATTR_ATIME_NOW |
1241 FUSE_SET_ATTR_MTIME_NOW |
1242 FUSE_SET_ATTR_CTIME;
1243
1244 req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1245 } else
1246 fuse_reply_err(req, ENOSYS);
1247}
1248
1249static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1250{
1251 struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1252
1253 if (req->se->op.access)
1254 req->se->op.access(req, nodeid, arg->mask);
1255 else
1256 fuse_reply_err(req, ENOSYS);
1257}
1258
1259static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1260{
1261 (void) inarg;
1262
1263 if (req->se->op.readlink)
1264 req->se->op.readlink(req, nodeid);
1265 else
1266 fuse_reply_err(req, ENOSYS);
1267}
1268
1269static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1270{
1271 struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1272 char *name = PARAM(arg);
1273
1274 if (req->se->conn.proto_minor >= 12)
1275 req->ctx.umask = arg->umask;
1276 else
1277 name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1278
1279 if (req->se->op.mknod)
1280 req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1281 else
1282 fuse_reply_err(req, ENOSYS);
1283}
1284
1285static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1286{
1287 struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1288
1289 if (req->se->conn.proto_minor >= 12)
1290 req->ctx.umask = arg->umask;
1291
1292 if (req->se->op.mkdir)
1293 req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1294 else
1295 fuse_reply_err(req, ENOSYS);
1296}
1297
1298static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1299{
1300 char *name = (char *) inarg;
1301
1302 if (req->se->op.unlink)
1303 req->se->op.unlink(req, nodeid, name);
1304 else
1305 fuse_reply_err(req, ENOSYS);
1306}
1307
1308static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1309{
1310 char *name = (char *) inarg;
1311
1312 if (req->se->op.rmdir)
1313 req->se->op.rmdir(req, nodeid, name);
1314 else
1315 fuse_reply_err(req, ENOSYS);
1316}
1317
1318static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1319{
1320 char *name = (char *) inarg;
1321 char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1322
1323 if (req->se->op.symlink)
1324 req->se->op.symlink(req, linkname, nodeid, name);
1325 else
1326 fuse_reply_err(req, ENOSYS);
1327}
1328
1329static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1330{
1331 struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1332 char *oldname = PARAM(arg);
1333 char *newname = oldname + strlen(oldname) + 1;
1334
1335 if (req->se->op.rename)
1336 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1337 0);
1338 else
1339 fuse_reply_err(req, ENOSYS);
1340}
1341
1342static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1343{
1344 struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1345 char *oldname = PARAM(arg);
1346 char *newname = oldname + strlen(oldname) + 1;
1347
1348 if (req->se->op.rename)
1349 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1350 arg->flags);
1351 else
1352 fuse_reply_err(req, ENOSYS);
1353}
1354
1355static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1356{
1357 struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1358
1359 if (req->se->op.link)
1360 req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1361 else
1362 fuse_reply_err(req, ENOSYS);
1363}
1364
1365static void do_tmpfile(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1366{
1367 struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1368
1369 if (req->se->op.tmpfile) {
1370 struct fuse_file_info fi;
1371
1372 memset(&fi, 0, sizeof(fi));
1373 fi.flags = arg->flags;
1374
1375 if (req->se->conn.proto_minor >= 12)
1376 req->ctx.umask = arg->umask;
1377
1378 req->se->op.tmpfile(req, nodeid, arg->mode, &fi);
1379 } else
1380 fuse_reply_err(req, ENOSYS);
1381}
1382
1383static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1384{
1385 struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1386
1387 if (req->se->op.create) {
1388 struct fuse_file_info fi;
1389 char *name = PARAM(arg);
1390
1391 memset(&fi, 0, sizeof(fi));
1392 fi.flags = arg->flags;
1393
1394 if (req->se->conn.proto_minor >= 12)
1395 req->ctx.umask = arg->umask;
1396 else
1397 name = (char *) inarg + sizeof(struct fuse_open_in);
1398
1399 req->se->op.create(req, nodeid, name, arg->mode, &fi);
1400 } else
1401 fuse_reply_err(req, ENOSYS);
1402}
1403
1404static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1405{
1406 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1407 struct fuse_file_info fi;
1408
1409 memset(&fi, 0, sizeof(fi));
1410 fi.flags = arg->flags;
1411
1412 if (req->se->op.open)
1413 req->se->op.open(req, nodeid, &fi);
1414 else if (req->se->conn.want_ext & FUSE_CAP_NO_OPEN_SUPPORT)
1415 fuse_reply_err(req, ENOSYS);
1416 else
1417 fuse_reply_open(req, &fi);
1418}
1419
1420static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1421{
1422 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1423
1424 if (req->se->op.read) {
1425 struct fuse_file_info fi;
1426
1427 memset(&fi, 0, sizeof(fi));
1428 fi.fh = arg->fh;
1429 if (req->se->conn.proto_minor >= 9) {
1430 fi.lock_owner = arg->lock_owner;
1431 fi.flags = arg->flags;
1432 }
1433 req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1434 } else
1435 fuse_reply_err(req, ENOSYS);
1436}
1437
1438static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1439{
1440 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1441 struct fuse_file_info fi;
1442 char *param;
1443
1444 memset(&fi, 0, sizeof(fi));
1445 fi.fh = arg->fh;
1446 fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1447
1448 if (req->se->conn.proto_minor < 9) {
1449 param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1450 } else {
1451 fi.lock_owner = arg->lock_owner;
1452 fi.flags = arg->flags;
1453 param = PARAM(arg);
1454 }
1455
1456 if (req->se->op.write)
1457 req->se->op.write(req, nodeid, param, arg->size,
1458 arg->offset, &fi);
1459 else
1460 fuse_reply_err(req, ENOSYS);
1461}
1462
1463static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1464 const struct fuse_buf *ibuf)
1465{
1466 struct fuse_session *se = req->se;
1467 struct fuse_bufvec bufv = {
1468 .buf[0] = *ibuf,
1469 .count = 1,
1470 };
1471 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1472 struct fuse_file_info fi;
1473
1474 memset(&fi, 0, sizeof(fi));
1475 fi.fh = arg->fh;
1476 fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1477
1478 if (se->conn.proto_minor < 9) {
1479 bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1480 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1481 FUSE_COMPAT_WRITE_IN_SIZE;
1482 assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1483 } else {
1484 fi.lock_owner = arg->lock_owner;
1485 fi.flags = arg->flags;
1486 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1487 bufv.buf[0].mem = PARAM(arg);
1488
1489 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1490 sizeof(struct fuse_write_in);
1491 }
1492 if (bufv.buf[0].size < arg->size) {
1493 fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1494 fuse_reply_err(req, EIO);
1495 goto out;
1496 }
1497 bufv.buf[0].size = arg->size;
1498
1499 se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1500
1501out:
1502 /* Need to reset the pipe if ->write_buf() didn't consume all data */
1503 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1504 fuse_ll_clear_pipe(se);
1505}
1506
1507static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1508{
1509 struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1510 struct fuse_file_info fi;
1511
1512 memset(&fi, 0, sizeof(fi));
1513 fi.fh = arg->fh;
1514 fi.flush = 1;
1515 if (req->se->conn.proto_minor >= 7)
1516 fi.lock_owner = arg->lock_owner;
1517
1518 if (req->se->op.flush)
1519 req->se->op.flush(req, nodeid, &fi);
1520 else
1521 fuse_reply_err(req, ENOSYS);
1522}
1523
1524static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1525{
1526 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1527 struct fuse_file_info fi;
1528
1529 memset(&fi, 0, sizeof(fi));
1530 fi.flags = arg->flags;
1531 fi.fh = arg->fh;
1532 if (req->se->conn.proto_minor >= 8) {
1533 fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1534 fi.lock_owner = arg->lock_owner;
1535 }
1536 if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1537 fi.flock_release = 1;
1538 fi.lock_owner = arg->lock_owner;
1539 }
1540
1541 if (req->se->op.release)
1542 req->se->op.release(req, nodeid, &fi);
1543 else
1544 fuse_reply_err(req, 0);
1545}
1546
1547static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1548{
1549 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1550 struct fuse_file_info fi;
1551 int datasync = arg->fsync_flags & 1;
1552
1553 memset(&fi, 0, sizeof(fi));
1554 fi.fh = arg->fh;
1555
1556 if (req->se->op.fsync)
1557 req->se->op.fsync(req, nodeid, datasync, &fi);
1558 else
1559 fuse_reply_err(req, ENOSYS);
1560}
1561
1562static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1563{
1564 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1565 struct fuse_file_info fi;
1566
1567 memset(&fi, 0, sizeof(fi));
1568 fi.flags = arg->flags;
1569
1570 if (req->se->op.opendir)
1571 req->se->op.opendir(req, nodeid, &fi);
1572 else if (req->se->conn.want_ext & FUSE_CAP_NO_OPENDIR_SUPPORT)
1573 fuse_reply_err(req, ENOSYS);
1574 else
1575 fuse_reply_open(req, &fi);
1576}
1577
1578static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1579{
1580 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1581 struct fuse_file_info fi;
1582
1583 memset(&fi, 0, sizeof(fi));
1584 fi.fh = arg->fh;
1585
1586 if (req->se->op.readdir)
1587 req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1588 else
1589 fuse_reply_err(req, ENOSYS);
1590}
1591
1592static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1593{
1594 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1595 struct fuse_file_info fi;
1596
1597 memset(&fi, 0, sizeof(fi));
1598 fi.fh = arg->fh;
1599
1600 if (req->se->op.readdirplus)
1601 req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1602 else
1603 fuse_reply_err(req, ENOSYS);
1604}
1605
1606static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1607{
1608 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1609 struct fuse_file_info fi;
1610
1611 memset(&fi, 0, sizeof(fi));
1612 fi.flags = arg->flags;
1613 fi.fh = arg->fh;
1614
1615 if (req->se->op.releasedir)
1616 req->se->op.releasedir(req, nodeid, &fi);
1617 else
1618 fuse_reply_err(req, 0);
1619}
1620
1621static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1622{
1623 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1624 struct fuse_file_info fi;
1625 int datasync = arg->fsync_flags & 1;
1626
1627 memset(&fi, 0, sizeof(fi));
1628 fi.fh = arg->fh;
1629
1630 if (req->se->op.fsyncdir)
1631 req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1632 else
1633 fuse_reply_err(req, ENOSYS);
1634}
1635
1636static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1637{
1638 (void) nodeid;
1639 (void) inarg;
1640
1641 if (req->se->op.statfs)
1642 req->se->op.statfs(req, nodeid);
1643 else {
1644 struct statvfs buf = {
1645 .f_namemax = 255,
1646 .f_bsize = 512,
1647 };
1648 fuse_reply_statfs(req, &buf);
1649 }
1650}
1651
1652static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1653{
1654 struct fuse_session *se = req->se;
1655 unsigned int xattr_ext = !!(se->conn.want_ext & FUSE_CAP_SETXATTR_EXT);
1656 struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1657 char *name = xattr_ext ? PARAM(arg) :
1658 (char *)arg + FUSE_COMPAT_SETXATTR_IN_SIZE;
1659 char *value = name + strlen(name) + 1;
1660
1661 /* XXX:The API should be extended to support extra_flags/setxattr_flags */
1662 if (req->se->op.setxattr)
1663 req->se->op.setxattr(req, nodeid, name, value, arg->size,
1664 arg->flags);
1665 else
1666 fuse_reply_err(req, ENOSYS);
1667}
1668
1669static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1670{
1671 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1672
1673 if (req->se->op.getxattr)
1674 req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1675 else
1676 fuse_reply_err(req, ENOSYS);
1677}
1678
1679static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1680{
1681 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1682
1683 if (req->se->op.listxattr)
1684 req->se->op.listxattr(req, nodeid, arg->size);
1685 else
1686 fuse_reply_err(req, ENOSYS);
1687}
1688
1689static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1690{
1691 char *name = (char *) inarg;
1692
1693 if (req->se->op.removexattr)
1694 req->se->op.removexattr(req, nodeid, name);
1695 else
1696 fuse_reply_err(req, ENOSYS);
1697}
1698
1699static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1700 struct flock *flock)
1701{
1702 memset(flock, 0, sizeof(struct flock));
1703 flock->l_type = fl->type;
1704 flock->l_whence = SEEK_SET;
1705 flock->l_start = fl->start;
1706 if (fl->end == OFFSET_MAX)
1707 flock->l_len = 0;
1708 else
1709 flock->l_len = fl->end - fl->start + 1;
1710 flock->l_pid = fl->pid;
1711}
1712
1713static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1714{
1715 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1716 struct fuse_file_info fi;
1717 struct flock flock;
1718
1719 memset(&fi, 0, sizeof(fi));
1720 fi.fh = arg->fh;
1721 fi.lock_owner = arg->owner;
1722
1723 convert_fuse_file_lock(&arg->lk, &flock);
1724 if (req->se->op.getlk)
1725 req->se->op.getlk(req, nodeid, &fi, &flock);
1726 else
1727 fuse_reply_err(req, ENOSYS);
1728}
1729
1730static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1731 const void *inarg, int sleep)
1732{
1733 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1734 struct fuse_file_info fi;
1735 struct flock flock;
1736
1737 memset(&fi, 0, sizeof(fi));
1738 fi.fh = arg->fh;
1739 fi.lock_owner = arg->owner;
1740
1741 if (arg->lk_flags & FUSE_LK_FLOCK) {
1742 int op = 0;
1743
1744 switch (arg->lk.type) {
1745 case F_RDLCK:
1746 op = LOCK_SH;
1747 break;
1748 case F_WRLCK:
1749 op = LOCK_EX;
1750 break;
1751 case F_UNLCK:
1752 op = LOCK_UN;
1753 break;
1754 }
1755 if (!sleep)
1756 op |= LOCK_NB;
1757
1758 if (req->se->op.flock)
1759 req->se->op.flock(req, nodeid, &fi, op);
1760 else
1761 fuse_reply_err(req, ENOSYS);
1762 } else {
1763 convert_fuse_file_lock(&arg->lk, &flock);
1764 if (req->se->op.setlk)
1765 req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1766 else
1767 fuse_reply_err(req, ENOSYS);
1768 }
1769}
1770
1771static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1772{
1773 do_setlk_common(req, nodeid, inarg, 0);
1774}
1775
1776static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1777{
1778 do_setlk_common(req, nodeid, inarg, 1);
1779}
1780
1781static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1782{
1783 struct fuse_req *curr;
1784
1785 for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1786 if (curr->unique == req->u.i.unique) {
1788 void *data;
1789
1790 curr->ref_cnt++;
1791 pthread_mutex_unlock(&se->lock);
1792
1793 /* Ugh, ugly locking */
1794 pthread_mutex_lock(&curr->lock);
1795 pthread_mutex_lock(&se->lock);
1796 curr->interrupted = 1;
1797 func = curr->u.ni.func;
1798 data = curr->u.ni.data;
1799 pthread_mutex_unlock(&se->lock);
1800 if (func)
1801 func(curr, data);
1802 pthread_mutex_unlock(&curr->lock);
1803
1804 pthread_mutex_lock(&se->lock);
1805 curr->ref_cnt--;
1806 if (!curr->ref_cnt) {
1807 destroy_req(curr);
1808 }
1809
1810 return 1;
1811 }
1812 }
1813 for (curr = se->interrupts.next; curr != &se->interrupts;
1814 curr = curr->next) {
1815 if (curr->u.i.unique == req->u.i.unique)
1816 return 1;
1817 }
1818 return 0;
1819}
1820
1821static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1822{
1823 struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1824 struct fuse_session *se = req->se;
1825
1826 (void) nodeid;
1827 if (se->debug)
1828 fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1829 (unsigned long long) arg->unique);
1830
1831 req->u.i.unique = arg->unique;
1832
1833 pthread_mutex_lock(&se->lock);
1834 if (find_interrupted(se, req)) {
1835 fuse_chan_put(req->ch);
1836 req->ch = NULL;
1837 destroy_req(req);
1838 } else
1839 list_add_req(req, &se->interrupts);
1840 pthread_mutex_unlock(&se->lock);
1841}
1842
1843static struct fuse_req *check_interrupt(struct fuse_session *se,
1844 struct fuse_req *req)
1845{
1846 struct fuse_req *curr;
1847
1848 for (curr = se->interrupts.next; curr != &se->interrupts;
1849 curr = curr->next) {
1850 if (curr->u.i.unique == req->unique) {
1851 req->interrupted = 1;
1852 list_del_req(curr);
1853 fuse_chan_put(curr->ch);
1854 curr->ch = NULL;
1855 destroy_req(curr);
1856 return NULL;
1857 }
1858 }
1859 curr = se->interrupts.next;
1860 if (curr != &se->interrupts) {
1861 list_del_req(curr);
1862 list_init_req(curr);
1863 return curr;
1864 } else
1865 return NULL;
1866}
1867
1868static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1869{
1870 struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
1871
1872 if (req->se->op.bmap)
1873 req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1874 else
1875 fuse_reply_err(req, ENOSYS);
1876}
1877
1878static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1879{
1880 struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
1881 unsigned int flags = arg->flags;
1882 void *in_buf = arg->in_size ? PARAM(arg) : NULL;
1883 struct fuse_file_info fi;
1884
1885 if (flags & FUSE_IOCTL_DIR &&
1886 !(req->se->conn.want_ext & FUSE_CAP_IOCTL_DIR)) {
1887 fuse_reply_err(req, ENOTTY);
1888 return;
1889 }
1890
1891 memset(&fi, 0, sizeof(fi));
1892 fi.fh = arg->fh;
1893
1894 if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
1895 !(flags & FUSE_IOCTL_32BIT)) {
1896 req->ioctl_64bit = 1;
1897 }
1898
1899 if (req->se->op.ioctl)
1900 req->se->op.ioctl(req, nodeid, arg->cmd,
1901 (void *)(uintptr_t)arg->arg, &fi, flags,
1902 in_buf, arg->in_size, arg->out_size);
1903 else
1904 fuse_reply_err(req, ENOSYS);
1905}
1906
1907void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1908{
1909 free(ph);
1910}
1911
1912static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1913{
1914 struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
1915 struct fuse_file_info fi;
1916
1917 memset(&fi, 0, sizeof(fi));
1918 fi.fh = arg->fh;
1919 fi.poll_events = arg->events;
1920
1921 if (req->se->op.poll) {
1922 struct fuse_pollhandle *ph = NULL;
1923
1924 if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1925 ph = malloc(sizeof(struct fuse_pollhandle));
1926 if (ph == NULL) {
1927 fuse_reply_err(req, ENOMEM);
1928 return;
1929 }
1930 ph->kh = arg->kh;
1931 ph->se = req->se;
1932 }
1933
1934 req->se->op.poll(req, nodeid, &fi, ph);
1935 } else {
1936 fuse_reply_err(req, ENOSYS);
1937 }
1938}
1939
1940static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1941{
1942 struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
1943 struct fuse_file_info fi;
1944
1945 memset(&fi, 0, sizeof(fi));
1946 fi.fh = arg->fh;
1947
1948 if (req->se->op.fallocate)
1949 req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
1950 else
1951 fuse_reply_err(req, ENOSYS);
1952}
1953
1954static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
1955{
1956 struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
1957 struct fuse_file_info fi_in, fi_out;
1958
1959 memset(&fi_in, 0, sizeof(fi_in));
1960 fi_in.fh = arg->fh_in;
1961
1962 memset(&fi_out, 0, sizeof(fi_out));
1963 fi_out.fh = arg->fh_out;
1964
1965
1966 if (req->se->op.copy_file_range)
1967 req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
1968 &fi_in, arg->nodeid_out,
1969 arg->off_out, &fi_out, arg->len,
1970 arg->flags);
1971 else
1972 fuse_reply_err(req, ENOSYS);
1973}
1974
1975static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1976{
1977 struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
1978 struct fuse_file_info fi;
1979
1980 memset(&fi, 0, sizeof(fi));
1981 fi.fh = arg->fh;
1982
1983 if (req->se->op.lseek)
1984 req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
1985 else
1986 fuse_reply_err(req, ENOSYS);
1987}
1988
1989static bool want_flags_valid(uint64_t capable, uint64_t want)
1990{
1991 uint64_t unknown_flags = want & (~capable);
1992 if (unknown_flags != 0) {
1993 fuse_log(FUSE_LOG_ERR,
1994 "fuse: unknown connection 'want' flags: 0x%08lx\n",
1995 unknown_flags);
1996 return false;
1997 }
1998 return true;
1999}
2000
2001/* Prevent bogus data races (bogus since "init" is called before
2002 * multi-threading becomes relevant */
2003static __attribute__((no_sanitize("thread")))
2004void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2005{
2006 struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
2007 struct fuse_init_out outarg;
2008 struct fuse_session *se = req->se;
2009 size_t bufsize = se->bufsize;
2010 size_t outargsize = sizeof(outarg);
2011 uint64_t inargflags = 0;
2012 uint64_t outargflags = 0;
2013 bool buf_reallocable = se->buf_reallocable;
2014 (void) nodeid;
2015 if (se->debug) {
2016 fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
2017 if (arg->major == 7 && arg->minor >= 6) {
2018 fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
2019 fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
2020 arg->max_readahead);
2021 }
2022 }
2023 se->conn.proto_major = arg->major;
2024 se->conn.proto_minor = arg->minor;
2025 se->conn.capable_ext = 0;
2026 se->conn.want_ext = 0;
2027
2028 memset(&outarg, 0, sizeof(outarg));
2029 outarg.major = FUSE_KERNEL_VERSION;
2030 outarg.minor = FUSE_KERNEL_MINOR_VERSION;
2031
2032 if (arg->major < 7) {
2033 fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
2034 arg->major, arg->minor);
2035 fuse_reply_err(req, EPROTO);
2036 return;
2037 }
2038
2039 if (arg->major > 7) {
2040 /* Wait for a second INIT request with a 7.X version */
2041 send_reply_ok(req, &outarg, sizeof(outarg));
2042 return;
2043 }
2044
2045 if (arg->minor >= 6) {
2046 if (arg->max_readahead < se->conn.max_readahead)
2047 se->conn.max_readahead = arg->max_readahead;
2048 inargflags = arg->flags;
2049 if (inargflags & FUSE_INIT_EXT)
2050 inargflags = inargflags | (uint64_t) arg->flags2 << 32;
2051 if (inargflags & FUSE_ASYNC_READ)
2052 se->conn.capable_ext |= FUSE_CAP_ASYNC_READ;
2053 if (inargflags & FUSE_POSIX_LOCKS)
2054 se->conn.capable_ext |= FUSE_CAP_POSIX_LOCKS;
2055 if (inargflags & FUSE_ATOMIC_O_TRUNC)
2056 se->conn.capable_ext |= FUSE_CAP_ATOMIC_O_TRUNC;
2057 if (inargflags & FUSE_EXPORT_SUPPORT)
2058 se->conn.capable_ext |= FUSE_CAP_EXPORT_SUPPORT;
2059 if (inargflags & FUSE_DONT_MASK)
2060 se->conn.capable_ext |= FUSE_CAP_DONT_MASK;
2061 if (inargflags & FUSE_FLOCK_LOCKS)
2062 se->conn.capable_ext |= FUSE_CAP_FLOCK_LOCKS;
2063 if (inargflags & FUSE_AUTO_INVAL_DATA)
2064 se->conn.capable_ext |= FUSE_CAP_AUTO_INVAL_DATA;
2065 if (inargflags & FUSE_DO_READDIRPLUS)
2066 se->conn.capable_ext |= FUSE_CAP_READDIRPLUS;
2067 if (inargflags & FUSE_READDIRPLUS_AUTO)
2068 se->conn.capable_ext |= FUSE_CAP_READDIRPLUS_AUTO;
2069 if (inargflags & FUSE_ASYNC_DIO)
2070 se->conn.capable_ext |= FUSE_CAP_ASYNC_DIO;
2071 if (inargflags & FUSE_WRITEBACK_CACHE)
2072 se->conn.capable_ext |= FUSE_CAP_WRITEBACK_CACHE;
2073 if (inargflags & FUSE_NO_OPEN_SUPPORT)
2074 se->conn.capable_ext |= FUSE_CAP_NO_OPEN_SUPPORT;
2075 if (inargflags & FUSE_PARALLEL_DIROPS)
2076 se->conn.capable_ext |= FUSE_CAP_PARALLEL_DIROPS;
2077 if (inargflags & FUSE_POSIX_ACL)
2078 se->conn.capable_ext |= FUSE_CAP_POSIX_ACL;
2079 if (inargflags & FUSE_HANDLE_KILLPRIV)
2080 se->conn.capable_ext |= FUSE_CAP_HANDLE_KILLPRIV;
2081 if (inargflags & FUSE_HANDLE_KILLPRIV_V2)
2082 se->conn.capable_ext |= FUSE_CAP_HANDLE_KILLPRIV_V2;
2083 if (inargflags & FUSE_CACHE_SYMLINKS)
2084 se->conn.capable_ext |= FUSE_CAP_CACHE_SYMLINKS;
2085 if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
2086 se->conn.capable_ext |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2087 if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2088 se->conn.capable_ext |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2089 if (inargflags & FUSE_SETXATTR_EXT)
2090 se->conn.capable_ext |= FUSE_CAP_SETXATTR_EXT;
2091 if (!(inargflags & FUSE_MAX_PAGES)) {
2092 size_t max_bufsize =
2093 FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2094 + FUSE_BUFFER_HEADER_SIZE;
2095 if (bufsize > max_bufsize) {
2096 bufsize = max_bufsize;
2097 }
2098 buf_reallocable = false;
2099 }
2100 if (inargflags & FUSE_DIRECT_IO_ALLOW_MMAP)
2101 se->conn.capable_ext |= FUSE_CAP_DIRECT_IO_ALLOW_MMAP;
2102 if (arg->minor >= 38 || (inargflags & FUSE_HAS_EXPIRE_ONLY))
2103 se->conn.capable_ext |= FUSE_CAP_EXPIRE_ONLY;
2104 if (inargflags & FUSE_PASSTHROUGH)
2105 se->conn.capable_ext |= FUSE_CAP_PASSTHROUGH;
2106 if (inargflags & FUSE_NO_EXPORT_SUPPORT)
2107 se->conn.capable_ext |= FUSE_CAP_NO_EXPORT_SUPPORT;
2108 } else {
2109 se->conn.max_readahead = 0;
2110 }
2111
2112 if (se->conn.proto_minor >= 14) {
2113#ifdef HAVE_SPLICE
2114#ifdef HAVE_VMSPLICE
2115 if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2116 se->conn.capable_ext |= FUSE_CAP_SPLICE_WRITE |
2118 }
2119#endif
2120 if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2121 se->conn.capable_ext |= FUSE_CAP_SPLICE_READ;
2122 }
2123#endif
2124 }
2125 if (se->conn.proto_minor >= 18)
2126 se->conn.capable_ext |= FUSE_CAP_IOCTL_DIR;
2127
2128 /* Default settings for modern filesystems.
2129 *
2130 * Most of these capabilities were disabled by default in
2131 * libfuse2 for backwards compatibility reasons. In libfuse3,
2132 * we can finally enable them by default (as long as they're
2133 * supported by the kernel).
2134 */
2135#define LL_SET_DEFAULT(cond, cap) \
2136 if ((cond)) \
2137 fuse_set_feature_flag(&se->conn, cap)
2138
2139 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2140 LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2141 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2142 LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2143 LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2144 LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2145 LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2147 LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2148 LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2149 LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2151
2152 /* This could safely become default, but libfuse needs an API extension
2153 * to support it
2154 * LL_SET_DEFAULT(1, FUSE_CAP_SETXATTR_EXT);
2155 */
2156
2157 se->conn.time_gran = 1;
2158
2159 se->got_init = 1;
2160 if (se->op.init) {
2161 uint64_t want_ext_default = se->conn.want_ext;
2162 uint32_t want_default = fuse_lower_32_bits(se->conn.want_ext);
2163 int rc;
2164
2165 // Apply the first 32 bits of capable_ext to capable
2166 se->conn.capable = fuse_lower_32_bits(se->conn.capable_ext);
2167 se->conn.want = want_default;
2168
2169 se->op.init(se->userdata, &se->conn);
2170
2171 /*
2172 * se->conn.want is 32-bit value and deprecated in favour of
2173 * se->conn.want_ext
2174 * Userspace might still use conn.want - we need to convert it
2175 */
2176 rc = convert_to_conn_want_ext(&se->conn, want_ext_default,
2177 want_default);
2178 if (rc != 0) {
2179 fuse_reply_err(req, EPROTO);
2180 se->error = -EPROTO;
2182 return;
2183 }
2184 }
2185
2186 if (!want_flags_valid(se->conn.capable_ext, se->conn.want_ext)) {
2187 fuse_reply_err(req, EPROTO);
2188 se->error = -EPROTO;
2190 return;
2191 }
2192
2193 unsigned max_read_mo = get_max_read(se->mo);
2194 if (se->conn.max_read != max_read_mo) {
2195 fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2196 "requested different maximum read size (%u vs %u)\n",
2197 se->conn.max_read, max_read_mo);
2198 fuse_reply_err(req, EPROTO);
2199 se->error = -EPROTO;
2201 return;
2202 }
2203
2204 if (bufsize < FUSE_MIN_READ_BUFFER) {
2205 fuse_log(FUSE_LOG_ERR,
2206 "fuse: warning: buffer size too small: %zu\n",
2207 bufsize);
2208 bufsize = FUSE_MIN_READ_BUFFER;
2209 }
2210
2211 if (buf_reallocable)
2212 bufsize = UINT_MAX;
2213 se->conn.max_write = MIN(se->conn.max_write, bufsize - FUSE_BUFFER_HEADER_SIZE);
2214 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2215
2216 if (arg->flags & FUSE_MAX_PAGES) {
2217 outarg.flags |= FUSE_MAX_PAGES;
2218 outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2219 }
2220 outargflags = outarg.flags;
2221 /* Always enable big writes, this is superseded
2222 by the max_write option */
2223 outargflags |= FUSE_BIG_WRITES;
2224
2225 if (se->conn.want_ext & FUSE_CAP_ASYNC_READ)
2226 outargflags |= FUSE_ASYNC_READ;
2227 if (se->conn.want_ext & FUSE_CAP_POSIX_LOCKS)
2228 outargflags |= FUSE_POSIX_LOCKS;
2229 if (se->conn.want_ext & FUSE_CAP_ATOMIC_O_TRUNC)
2230 outargflags |= FUSE_ATOMIC_O_TRUNC;
2231 if (se->conn.want_ext & FUSE_CAP_EXPORT_SUPPORT)
2232 outargflags |= FUSE_EXPORT_SUPPORT;
2233 if (se->conn.want_ext & FUSE_CAP_DONT_MASK)
2234 outargflags |= FUSE_DONT_MASK;
2235 if (se->conn.want_ext & FUSE_CAP_FLOCK_LOCKS)
2236 outargflags |= FUSE_FLOCK_LOCKS;
2237 if (se->conn.want_ext & FUSE_CAP_AUTO_INVAL_DATA)
2238 outargflags |= FUSE_AUTO_INVAL_DATA;
2239 if (se->conn.want_ext & FUSE_CAP_READDIRPLUS)
2240 outargflags |= FUSE_DO_READDIRPLUS;
2241 if (se->conn.want_ext & FUSE_CAP_READDIRPLUS_AUTO)
2242 outargflags |= FUSE_READDIRPLUS_AUTO;
2243 if (se->conn.want_ext & FUSE_CAP_ASYNC_DIO)
2244 outargflags |= FUSE_ASYNC_DIO;
2245 if (se->conn.want_ext & FUSE_CAP_WRITEBACK_CACHE)
2246 outargflags |= FUSE_WRITEBACK_CACHE;
2247 if (se->conn.want_ext & FUSE_CAP_PARALLEL_DIROPS)
2248 outargflags |= FUSE_PARALLEL_DIROPS;
2249 if (se->conn.want_ext & FUSE_CAP_POSIX_ACL)
2250 outargflags |= FUSE_POSIX_ACL;
2251 if (se->conn.want_ext & FUSE_CAP_HANDLE_KILLPRIV)
2252 outargflags |= FUSE_HANDLE_KILLPRIV;
2253 if (se->conn.want_ext & FUSE_CAP_HANDLE_KILLPRIV_V2)
2254 outargflags |= FUSE_HANDLE_KILLPRIV_V2;
2255 if (se->conn.want_ext & FUSE_CAP_CACHE_SYMLINKS)
2256 outargflags |= FUSE_CACHE_SYMLINKS;
2257 if (se->conn.want_ext & FUSE_CAP_EXPLICIT_INVAL_DATA)
2258 outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2259 if (se->conn.want_ext & FUSE_CAP_SETXATTR_EXT)
2260 outargflags |= FUSE_SETXATTR_EXT;
2261 if (se->conn.want_ext & FUSE_CAP_DIRECT_IO_ALLOW_MMAP)
2262 outargflags |= FUSE_DIRECT_IO_ALLOW_MMAP;
2263 if (se->conn.want_ext & FUSE_CAP_PASSTHROUGH) {
2264 outargflags |= FUSE_PASSTHROUGH;
2265 /*
2266 * outarg.max_stack_depth includes the fuse stack layer,
2267 * so it is one more than max_backing_stack_depth.
2268 */
2269 outarg.max_stack_depth = se->conn.max_backing_stack_depth + 1;
2270 }
2271 if (se->conn.want_ext & FUSE_CAP_NO_EXPORT_SUPPORT)
2272 outargflags |= FUSE_NO_EXPORT_SUPPORT;
2273
2274 if (inargflags & FUSE_INIT_EXT) {
2275 outargflags |= FUSE_INIT_EXT;
2276 outarg.flags2 = outargflags >> 32;
2277 }
2278
2279 outarg.flags = outargflags;
2280
2281 outarg.max_readahead = se->conn.max_readahead;
2282 outarg.max_write = se->conn.max_write;
2283 if (se->conn.proto_minor >= 13) {
2284 if (se->conn.max_background >= (1 << 16))
2285 se->conn.max_background = (1 << 16) - 1;
2286 if (se->conn.congestion_threshold > se->conn.max_background)
2287 se->conn.congestion_threshold = se->conn.max_background;
2288 if (!se->conn.congestion_threshold) {
2289 se->conn.congestion_threshold =
2290 se->conn.max_background * 3 / 4;
2291 }
2292
2293 outarg.max_background = se->conn.max_background;
2294 outarg.congestion_threshold = se->conn.congestion_threshold;
2295 }
2296 if (se->conn.proto_minor >= 23)
2297 outarg.time_gran = se->conn.time_gran;
2298
2299 if (se->debug) {
2300 fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2301 fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2302 fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2303 outarg.max_readahead);
2304 fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2305 fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2306 outarg.max_background);
2307 fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2308 outarg.congestion_threshold);
2309 fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2310 outarg.time_gran);
2311 if (se->conn.want_ext & FUSE_CAP_PASSTHROUGH)
2312 fuse_log(FUSE_LOG_DEBUG, " max_stack_depth=%u\n",
2313 outarg.max_stack_depth);
2314 }
2315 if (arg->minor < 5)
2316 outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2317 else if (arg->minor < 23)
2318 outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2319
2320 send_reply_ok(req, &outarg, outargsize);
2321}
2322
2323static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2324{
2325 struct fuse_session *se = req->se;
2326
2327 (void) nodeid;
2328 (void) inarg;
2329
2330 se->got_destroy = 1;
2331 se->got_init = 0;
2332 if (se->op.destroy)
2333 se->op.destroy(se->userdata);
2334
2335 send_reply_ok(req, NULL, 0);
2336}
2337
2338static void list_del_nreq(struct fuse_notify_req *nreq)
2339{
2340 struct fuse_notify_req *prev = nreq->prev;
2341 struct fuse_notify_req *next = nreq->next;
2342 prev->next = next;
2343 next->prev = prev;
2344}
2345
2346static void list_add_nreq(struct fuse_notify_req *nreq,
2347 struct fuse_notify_req *next)
2348{
2349 struct fuse_notify_req *prev = next->prev;
2350 nreq->next = next;
2351 nreq->prev = prev;
2352 prev->next = nreq;
2353 next->prev = nreq;
2354}
2355
2356static void list_init_nreq(struct fuse_notify_req *nreq)
2357{
2358 nreq->next = nreq;
2359 nreq->prev = nreq;
2360}
2361
2362static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2363 const void *inarg, const struct fuse_buf *buf)
2364{
2365 struct fuse_session *se = req->se;
2366 struct fuse_notify_req *nreq;
2367 struct fuse_notify_req *head;
2368
2369 pthread_mutex_lock(&se->lock);
2370 head = &se->notify_list;
2371 for (nreq = head->next; nreq != head; nreq = nreq->next) {
2372 if (nreq->unique == req->unique) {
2373 list_del_nreq(nreq);
2374 break;
2375 }
2376 }
2377 pthread_mutex_unlock(&se->lock);
2378
2379 if (nreq != head)
2380 nreq->reply(nreq, req, nodeid, inarg, buf);
2381}
2382
2383static int send_notify_iov(struct fuse_session *se, int notify_code,
2384 struct iovec *iov, int count)
2385{
2386 struct fuse_out_header out;
2387
2388 if (!se->got_init)
2389 return -ENOTCONN;
2390
2391 out.unique = 0;
2392 out.error = notify_code;
2393 iov[0].iov_base = &out;
2394 iov[0].iov_len = sizeof(struct fuse_out_header);
2395
2396 return fuse_send_msg(se, NULL, iov, count);
2397}
2398
2399int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2400{
2401 if (ph != NULL) {
2402 struct fuse_notify_poll_wakeup_out outarg;
2403 struct iovec iov[2];
2404
2405 outarg.kh = ph->kh;
2406
2407 iov[1].iov_base = &outarg;
2408 iov[1].iov_len = sizeof(outarg);
2409
2410 return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2411 } else {
2412 return 0;
2413 }
2414}
2415
2416int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2417 off_t off, off_t len)
2418{
2419 struct fuse_notify_inval_inode_out outarg;
2420 struct iovec iov[2];
2421
2422 if (!se)
2423 return -EINVAL;
2424
2425 if (se->conn.proto_minor < 12)
2426 return -ENOSYS;
2427
2428 outarg.ino = ino;
2429 outarg.off = off;
2430 outarg.len = len;
2431
2432 iov[1].iov_base = &outarg;
2433 iov[1].iov_len = sizeof(outarg);
2434
2435 return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2436}
2437
2457static int fuse_lowlevel_notify_entry(struct fuse_session *se, fuse_ino_t parent,
2458 const char *name, size_t namelen,
2459 enum fuse_notify_entry_flags flags)
2460{
2461 struct fuse_notify_inval_entry_out outarg;
2462 struct iovec iov[3];
2463
2464 if (!se)
2465 return -EINVAL;
2466
2467 if (se->conn.proto_minor < 12)
2468 return -ENOSYS;
2469
2470 outarg.parent = parent;
2471 outarg.namelen = namelen;
2472 outarg.flags = 0;
2473 if (flags & FUSE_LL_EXPIRE_ONLY)
2474 outarg.flags |= FUSE_EXPIRE_ONLY;
2475
2476 iov[1].iov_base = &outarg;
2477 iov[1].iov_len = sizeof(outarg);
2478 iov[2].iov_base = (void *)name;
2479 iov[2].iov_len = namelen + 1;
2480
2481 return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2482}
2483
2484int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2485 const char *name, size_t namelen)
2486{
2487 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_INVALIDATE);
2488}
2489
2490int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
2491 const char *name, size_t namelen)
2492{
2493 if (!se)
2494 return -EINVAL;
2495
2496 if (!(se->conn.capable_ext & FUSE_CAP_EXPIRE_ONLY))
2497 return -ENOSYS;
2498
2499 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_EXPIRE_ONLY);
2500}
2501
2502
2503int fuse_lowlevel_notify_delete(struct fuse_session *se,
2504 fuse_ino_t parent, fuse_ino_t child,
2505 const char *name, size_t namelen)
2506{
2507 struct fuse_notify_delete_out outarg;
2508 struct iovec iov[3];
2509
2510 if (!se)
2511 return -EINVAL;
2512
2513 if (se->conn.proto_minor < 18)
2514 return -ENOSYS;
2515
2516 outarg.parent = parent;
2517 outarg.child = child;
2518 outarg.namelen = namelen;
2519 outarg.padding = 0;
2520
2521 iov[1].iov_base = &outarg;
2522 iov[1].iov_len = sizeof(outarg);
2523 iov[2].iov_base = (void *)name;
2524 iov[2].iov_len = namelen + 1;
2525
2526 return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2527}
2528
2529int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2530 off_t offset, struct fuse_bufvec *bufv,
2531 enum fuse_buf_copy_flags flags)
2532{
2533 struct fuse_out_header out;
2534 struct fuse_notify_store_out outarg;
2535 struct iovec iov[3];
2536 size_t size = fuse_buf_size(bufv);
2537 int res;
2538
2539 if (!se)
2540 return -EINVAL;
2541
2542 if (se->conn.proto_minor < 15)
2543 return -ENOSYS;
2544
2545 out.unique = 0;
2546 out.error = FUSE_NOTIFY_STORE;
2547
2548 outarg.nodeid = ino;
2549 outarg.offset = offset;
2550 outarg.size = size;
2551 outarg.padding = 0;
2552
2553 iov[0].iov_base = &out;
2554 iov[0].iov_len = sizeof(out);
2555 iov[1].iov_base = &outarg;
2556 iov[1].iov_len = sizeof(outarg);
2557
2558 res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2559 if (res > 0)
2560 res = -res;
2561
2562 return res;
2563}
2564
2565struct fuse_retrieve_req {
2566 struct fuse_notify_req nreq;
2567 void *cookie;
2568};
2569
2570static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2571 fuse_req_t req, fuse_ino_t ino,
2572 const void *inarg,
2573 const struct fuse_buf *ibuf)
2574{
2575 struct fuse_session *se = req->se;
2576 struct fuse_retrieve_req *rreq =
2577 container_of(nreq, struct fuse_retrieve_req, nreq);
2578 const struct fuse_notify_retrieve_in *arg = inarg;
2579 struct fuse_bufvec bufv = {
2580 .buf[0] = *ibuf,
2581 .count = 1,
2582 };
2583
2584 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2585 bufv.buf[0].mem = PARAM(arg);
2586
2587 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2588 sizeof(struct fuse_notify_retrieve_in);
2589
2590 if (bufv.buf[0].size < arg->size) {
2591 fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2592 fuse_reply_none(req);
2593 goto out;
2594 }
2595 bufv.buf[0].size = arg->size;
2596
2597 if (se->op.retrieve_reply) {
2598 se->op.retrieve_reply(req, rreq->cookie, ino,
2599 arg->offset, &bufv);
2600 } else {
2601 fuse_reply_none(req);
2602 }
2603out:
2604 free(rreq);
2605 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2606 fuse_ll_clear_pipe(se);
2607}
2608
2609int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2610 size_t size, off_t offset, void *cookie)
2611{
2612 struct fuse_notify_retrieve_out outarg;
2613 struct iovec iov[2];
2614 struct fuse_retrieve_req *rreq;
2615 int err;
2616
2617 if (!se)
2618 return -EINVAL;
2619
2620 if (se->conn.proto_minor < 15)
2621 return -ENOSYS;
2622
2623 rreq = malloc(sizeof(*rreq));
2624 if (rreq == NULL)
2625 return -ENOMEM;
2626
2627 pthread_mutex_lock(&se->lock);
2628 rreq->cookie = cookie;
2629 rreq->nreq.unique = se->notify_ctr++;
2630 rreq->nreq.reply = fuse_ll_retrieve_reply;
2631 list_add_nreq(&rreq->nreq, &se->notify_list);
2632 pthread_mutex_unlock(&se->lock);
2633
2634 outarg.notify_unique = rreq->nreq.unique;
2635 outarg.nodeid = ino;
2636 outarg.offset = offset;
2637 outarg.size = size;
2638 outarg.padding = 0;
2639
2640 iov[1].iov_base = &outarg;
2641 iov[1].iov_len = sizeof(outarg);
2642
2643 err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2644 if (err) {
2645 pthread_mutex_lock(&se->lock);
2646 list_del_nreq(&rreq->nreq);
2647 pthread_mutex_unlock(&se->lock);
2648 free(rreq);
2649 }
2650
2651 return err;
2652}
2653
2655{
2656 return req->se->userdata;
2657}
2658
2660{
2661 return &req->ctx;
2662}
2663
2665 void *data)
2666{
2667 pthread_mutex_lock(&req->lock);
2668 pthread_mutex_lock(&req->se->lock);
2669 req->u.ni.func = func;
2670 req->u.ni.data = data;
2671 pthread_mutex_unlock(&req->se->lock);
2672 if (req->interrupted && func)
2673 func(req, data);
2674 pthread_mutex_unlock(&req->lock);
2675}
2676
2678{
2679 int interrupted;
2680
2681 pthread_mutex_lock(&req->se->lock);
2682 interrupted = req->interrupted;
2683 pthread_mutex_unlock(&req->se->lock);
2684
2685 return interrupted;
2686}
2687
2688static struct {
2689 void (*func)(fuse_req_t, fuse_ino_t, const void *);
2690 const char *name;
2691} fuse_ll_ops[] = {
2692 [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2693 [FUSE_FORGET] = { do_forget, "FORGET" },
2694 [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2695 [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2696 [FUSE_READLINK] = { do_readlink, "READLINK" },
2697 [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2698 [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2699 [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2700 [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2701 [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2702 [FUSE_RENAME] = { do_rename, "RENAME" },
2703 [FUSE_LINK] = { do_link, "LINK" },
2704 [FUSE_OPEN] = { do_open, "OPEN" },
2705 [FUSE_READ] = { do_read, "READ" },
2706 [FUSE_WRITE] = { do_write, "WRITE" },
2707 [FUSE_STATFS] = { do_statfs, "STATFS" },
2708 [FUSE_RELEASE] = { do_release, "RELEASE" },
2709 [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2710 [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2711 [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2712 [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2713 [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2714 [FUSE_FLUSH] = { do_flush, "FLUSH" },
2715 [FUSE_INIT] = { do_init, "INIT" },
2716 [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2717 [FUSE_READDIR] = { do_readdir, "READDIR" },
2718 [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2719 [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2720 [FUSE_GETLK] = { do_getlk, "GETLK" },
2721 [FUSE_SETLK] = { do_setlk, "SETLK" },
2722 [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2723 [FUSE_ACCESS] = { do_access, "ACCESS" },
2724 [FUSE_CREATE] = { do_create, "CREATE" },
2725 [FUSE_TMPFILE] = { do_tmpfile, "TMPFILE" },
2726 [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2727 [FUSE_BMAP] = { do_bmap, "BMAP" },
2728 [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2729 [FUSE_POLL] = { do_poll, "POLL" },
2730 [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2731 [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2732 [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2733 [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2734 [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2735 [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2736 [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2737 [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2738 [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2739};
2740
2741/*
2742 * For ABI compatibility we cannot allow higher values than CUSE_INIT.
2743 * Without ABI compatibility we could use the size of the array.
2744 * #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2745 */
2746#define FUSE_MAXOP (CUSE_INIT + 1)
2747
2748static const char *opname(enum fuse_opcode opcode)
2749{
2750 if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2751 return "???";
2752 else
2753 return fuse_ll_ops[opcode].name;
2754}
2755
2756static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2757 struct fuse_bufvec *src)
2758{
2759 ssize_t res = fuse_buf_copy(dst, src, 0);
2760 if (res < 0) {
2761 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2762 return res;
2763 }
2764 if ((size_t)res < fuse_buf_size(dst)) {
2765 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2766 return -1;
2767 }
2768 return 0;
2769}
2770
2771void fuse_session_process_buf(struct fuse_session *se,
2772 const struct fuse_buf *buf)
2773{
2774 fuse_session_process_buf_internal(se, buf, NULL);
2775}
2776
2777/* libfuse internal handler */
2778void fuse_session_process_buf_internal(struct fuse_session *se,
2779 const struct fuse_buf *buf, struct fuse_chan *ch)
2780{
2781 const size_t write_header_size = sizeof(struct fuse_in_header) +
2782 sizeof(struct fuse_write_in);
2783 struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2784 struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2785 struct fuse_in_header *in;
2786 const void *inarg;
2787 struct fuse_req *req;
2788 void *mbuf = NULL;
2789 int err;
2790 int res;
2791
2792 if (buf->flags & FUSE_BUF_IS_FD) {
2793 if (buf->size < tmpbuf.buf[0].size)
2794 tmpbuf.buf[0].size = buf->size;
2795
2796 mbuf = malloc(tmpbuf.buf[0].size);
2797 if (mbuf == NULL) {
2798 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2799 goto clear_pipe;
2800 }
2801 tmpbuf.buf[0].mem = mbuf;
2802
2803 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2804 if (res < 0)
2805 goto clear_pipe;
2806
2807 in = mbuf;
2808 } else {
2809 in = buf->mem;
2810 }
2811
2812 if (se->debug) {
2813 fuse_log(FUSE_LOG_DEBUG,
2814 "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2815 (unsigned long long) in->unique,
2816 opname((enum fuse_opcode) in->opcode), in->opcode,
2817 (unsigned long long) in->nodeid, buf->size, in->pid);
2818 }
2819
2820 req = fuse_ll_alloc_req(se);
2821 if (req == NULL) {
2822 struct fuse_out_header out = {
2823 .unique = in->unique,
2824 .error = -ENOMEM,
2825 };
2826 struct iovec iov = {
2827 .iov_base = &out,
2828 .iov_len = sizeof(struct fuse_out_header),
2829 };
2830
2831 fuse_send_msg(se, ch, &iov, 1);
2832 goto clear_pipe;
2833 }
2834
2835 req->unique = in->unique;
2836 req->ctx.uid = in->uid;
2837 req->ctx.gid = in->gid;
2838 req->ctx.pid = in->pid;
2839 req->ch = ch ? fuse_chan_get(ch) : NULL;
2840
2841 err = EIO;
2842 if (!se->got_init) {
2843 enum fuse_opcode expected;
2844
2845 expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2846 if (in->opcode != expected)
2847 goto reply_err;
2848 } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2849 goto reply_err;
2850
2851 err = EACCES;
2852 /* Implement -o allow_root */
2853 if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2854 in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2855 in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2856 in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2857 in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2858 in->opcode != FUSE_NOTIFY_REPLY &&
2859 in->opcode != FUSE_READDIRPLUS)
2860 goto reply_err;
2861
2862 err = ENOSYS;
2863 if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2864 goto reply_err;
2865 /* Do not process interrupt request */
2866 if (se->conn.no_interrupt && in->opcode == FUSE_INTERRUPT) {
2867 if (se->debug)
2868 fuse_log(FUSE_LOG_DEBUG, "FUSE_INTERRUPT: reply to kernel to disable interrupt\n");
2869 goto reply_err;
2870 }
2871 if (!se->conn.no_interrupt && in->opcode != FUSE_INTERRUPT) {
2872 struct fuse_req *intr;
2873 pthread_mutex_lock(&se->lock);
2874 intr = check_interrupt(se, req);
2875 list_add_req(req, &se->list);
2876 pthread_mutex_unlock(&se->lock);
2877 if (intr)
2878 fuse_reply_err(intr, EAGAIN);
2879 }
2880
2881 if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
2882 (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
2883 in->opcode != FUSE_NOTIFY_REPLY) {
2884 void *newmbuf;
2885
2886 err = ENOMEM;
2887 newmbuf = realloc(mbuf, buf->size);
2888 if (newmbuf == NULL)
2889 goto reply_err;
2890 mbuf = newmbuf;
2891
2892 tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
2893 tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
2894
2895 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2896 err = -res;
2897 if (res < 0)
2898 goto reply_err;
2899
2900 in = mbuf;
2901 }
2902
2903 inarg = (void *) &in[1];
2904 if (in->opcode == FUSE_WRITE && se->op.write_buf)
2905 do_write_buf(req, in->nodeid, inarg, buf);
2906 else if (in->opcode == FUSE_NOTIFY_REPLY)
2907 do_notify_reply(req, in->nodeid, inarg, buf);
2908 else
2909 fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
2910
2911out_free:
2912 free(mbuf);
2913 return;
2914
2915reply_err:
2916 fuse_reply_err(req, err);
2917clear_pipe:
2918 if (buf->flags & FUSE_BUF_IS_FD)
2919 fuse_ll_clear_pipe(se);
2920 goto out_free;
2921}
2922
2923#define LL_OPTION(n,o,v) \
2924 { n, offsetof(struct fuse_session, o), v }
2925
2926static const struct fuse_opt fuse_ll_opts[] = {
2927 LL_OPTION("debug", debug, 1),
2928 LL_OPTION("-d", debug, 1),
2929 LL_OPTION("--debug", debug, 1),
2930 LL_OPTION("allow_root", deny_others, 1),
2932};
2933
2935{
2936 printf("using FUSE kernel interface version %i.%i\n",
2937 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2938 fuse_mount_version();
2939}
2940
2942{
2943 /* These are not all options, but the ones that are
2944 potentially of interest to an end-user */
2945 printf(
2946" -o allow_other allow access by all users\n"
2947" -o allow_root allow access by root\n"
2948" -o auto_unmount auto unmount on process termination\n");
2949}
2950
2951void fuse_session_destroy(struct fuse_session *se)
2952{
2953 struct fuse_ll_pipe *llp;
2954
2955 if (se->got_init && !se->got_destroy) {
2956 if (se->op.destroy)
2957 se->op.destroy(se->userdata);
2958 }
2959 llp = pthread_getspecific(se->pipe_key);
2960 if (llp != NULL)
2961 fuse_ll_pipe_free(llp);
2962 pthread_key_delete(se->pipe_key);
2963 pthread_mutex_destroy(&se->lock);
2964 free(se->cuse_data);
2965 if (se->fd != -1)
2966 close(se->fd);
2967 if (se->io != NULL)
2968 free(se->io);
2969 destroy_mount_opts(se->mo);
2970 free(se);
2971}
2972
2973
2974static void fuse_ll_pipe_destructor(void *data)
2975{
2976 struct fuse_ll_pipe *llp = data;
2977 fuse_ll_pipe_free(llp);
2978}
2979
2980void fuse_buf_free(struct fuse_buf *buf)
2981{
2982 if (buf->mem == NULL)
2983 return;
2984
2985 size_t write_header_sz =
2986 sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in);
2987
2988 char *ptr = (char *)buf->mem - pagesize + write_header_sz;
2989 free(ptr);
2990 buf->mem = NULL;
2991}
2992
2993/*
2994 * This is used to allocate buffers that hold fuse requests
2995 */
2996static void *buf_alloc(size_t size, bool internal)
2997{
2998 /*
2999 * For libfuse internal caller add in alignment. That cannot be done
3000 * for an external caller, as it is not guaranteed that the external
3001 * caller frees the raw pointer.
3002 */
3003 if (internal) {
3004 size_t write_header_sz = sizeof(struct fuse_in_header) +
3005 sizeof(struct fuse_write_in);
3006 size_t new_size = ROUND_UP(size + write_header_sz, pagesize);
3007
3008 char *buf = aligned_alloc(pagesize, new_size);
3009 if (buf == NULL)
3010 return NULL;
3011
3012 buf += pagesize - write_header_sz;
3013
3014 return buf;
3015 } else {
3016 return malloc(size);
3017 }
3018}
3019
3020/*
3021 *@param internal true if called from libfuse internal code
3022 */
3023static int _fuse_session_receive_buf(struct fuse_session *se,
3024 struct fuse_buf *buf, struct fuse_chan *ch,
3025 bool internal)
3026{
3027 int err;
3028 ssize_t res;
3029 size_t bufsize = se->bufsize;
3030#ifdef HAVE_SPLICE
3031 struct fuse_ll_pipe *llp;
3032 struct fuse_buf tmpbuf;
3033
3034 if (se->conn.proto_minor < 14 ||
3035 !(se->conn.want_ext & FUSE_CAP_SPLICE_READ))
3036 goto fallback;
3037
3038 llp = fuse_ll_get_pipe(se);
3039 if (llp == NULL)
3040 goto fallback;
3041
3042 if (llp->size < bufsize) {
3043 if (llp->can_grow) {
3044 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
3045 if (res == -1) {
3046 llp->can_grow = 0;
3047 res = grow_pipe_to_max(llp->pipe[0]);
3048 if (res > 0)
3049 llp->size = res;
3050 goto fallback;
3051 }
3052 llp->size = res;
3053 }
3054 if (llp->size < bufsize)
3055 goto fallback;
3056 }
3057
3058 if (se->io != NULL && se->io->splice_receive != NULL) {
3059 res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
3060 llp->pipe[1], NULL, bufsize, 0,
3061 se->userdata);
3062 } else {
3063 res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
3064 bufsize, 0);
3065 }
3066 err = errno;
3067
3068 if (fuse_session_exited(se))
3069 return 0;
3070
3071 if (res == -1) {
3072 if (err == ENODEV) {
3073 /* Filesystem was unmounted, or connection was aborted
3074 via /sys/fs/fuse/connections */
3076 return 0;
3077 }
3078 if (err != EINTR && err != EAGAIN)
3079 perror("fuse: splice from device");
3080 return -err;
3081 }
3082
3083 if (res < sizeof(struct fuse_in_header)) {
3084 fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
3085 return -EIO;
3086 }
3087
3088 tmpbuf = (struct fuse_buf){
3089 .size = res,
3090 .flags = FUSE_BUF_IS_FD,
3091 .fd = llp->pipe[0],
3092 };
3093
3094 /*
3095 * Don't bother with zero copy for small requests.
3096 * fuse_loop_mt() needs to check for FORGET so this more than
3097 * just an optimization.
3098 */
3099 if (res < sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in) +
3100 pagesize) {
3101 struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
3102 struct fuse_bufvec dst = { .count = 1 };
3103
3104 if (!buf->mem) {
3105 buf->mem = buf_alloc(se->bufsize, internal);
3106 if (!buf->mem) {
3107 fuse_log(
3108 FUSE_LOG_ERR,
3109 "fuse: failed to allocate read buffer\n");
3110 return -ENOMEM;
3111 }
3112 buf->mem_size = se->bufsize;
3113 if (internal)
3114 se->buf_reallocable = true;
3115 }
3116 buf->size = se->bufsize;
3117 buf->flags = 0;
3118 dst.buf[0] = *buf;
3119
3120 res = fuse_buf_copy(&dst, &src, 0);
3121 if (res < 0) {
3122 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
3123 strerror(-res));
3124 fuse_ll_clear_pipe(se);
3125 return res;
3126 }
3127 if (res < tmpbuf.size) {
3128 fuse_log(FUSE_LOG_ERR,
3129 "fuse: copy from pipe: short read\n");
3130 fuse_ll_clear_pipe(se);
3131 return -EIO;
3132 }
3133 assert(res == tmpbuf.size);
3134
3135 } else {
3136 /* Don't overwrite buf->mem, as that would cause a leak */
3137 buf->fd = tmpbuf.fd;
3138 buf->flags = tmpbuf.flags;
3139 }
3140 buf->size = tmpbuf.size;
3141
3142 return res;
3143
3144fallback:
3145#endif
3146 if (!buf->mem) {
3147 buf->mem = buf_alloc(se->bufsize, internal);
3148 if (!buf->mem) {
3149 fuse_log(FUSE_LOG_ERR,
3150 "fuse: failed to allocate read buffer\n");
3151 return -ENOMEM;
3152 }
3153 buf->mem_size = se->bufsize;
3154 if (internal)
3155 se->buf_reallocable = true;
3156 }
3157
3158restart:
3159 if (se->buf_reallocable)
3160 bufsize = buf->mem_size;
3161 if (se->io != NULL) {
3162 /* se->io->read is never NULL if se->io is not NULL as
3163 specified by fuse_session_custom_io()*/
3164 res = se->io->read(ch ? ch->fd : se->fd, buf->mem, bufsize,
3165 se->userdata);
3166 } else {
3167 res = read(ch ? ch->fd : se->fd, buf->mem, bufsize);
3168 }
3169 err = errno;
3170
3171 if (fuse_session_exited(se))
3172 return 0;
3173 if (res == -1) {
3174 if (err == EINVAL && se->buf_reallocable &&
3175 se->bufsize > buf->mem_size) {
3176 void *newbuf = buf_alloc(se->bufsize, internal);
3177 if (!newbuf) {
3178 fuse_log(
3179 FUSE_LOG_ERR,
3180 "fuse: failed to (re)allocate read buffer\n");
3181 return -ENOMEM;
3182 }
3183 fuse_buf_free(buf);
3184 buf->mem = newbuf;
3185 buf->mem_size = se->bufsize;
3186 se->buf_reallocable = true;
3187 goto restart;
3188 }
3189
3190 /* ENOENT means the operation was interrupted, it's safe
3191 to restart */
3192 if (err == ENOENT)
3193 goto restart;
3194
3195 if (err == ENODEV) {
3196 /* Filesystem was unmounted, or connection was aborted
3197 via /sys/fs/fuse/connections */
3199 return 0;
3200 }
3201 /* Errors occurring during normal operation: EINTR (read
3202 interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
3203 umounted) */
3204 if (err != EINTR && err != EAGAIN)
3205 perror("fuse: reading device");
3206 return -err;
3207 }
3208 if ((size_t)res < sizeof(struct fuse_in_header)) {
3209 fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
3210 return -EIO;
3211 }
3212
3213 buf->size = res;
3214
3215 return res;
3216}
3217
3218int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
3219{
3220 return _fuse_session_receive_buf(se, buf, NULL, false);
3221}
3222
3223/* libfuse internal handler */
3224int fuse_session_receive_buf_internal(struct fuse_session *se,
3225 struct fuse_buf *buf,
3226 struct fuse_chan *ch)
3227{
3228 return _fuse_session_receive_buf(se, buf, ch, true);
3229}
3230
3231struct fuse_session *
3232fuse_session_new_versioned(struct fuse_args *args,
3233 const struct fuse_lowlevel_ops *op, size_t op_size,
3234 struct libfuse_version *version, void *userdata)
3235{
3236 int err;
3237 struct fuse_session *se;
3238 struct mount_opts *mo;
3239
3240 if (sizeof(struct fuse_lowlevel_ops) < op_size) {
3241 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
3242 op_size = sizeof(struct fuse_lowlevel_ops);
3243 }
3244
3245 if (args->argc == 0) {
3246 fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
3247 return NULL;
3248 }
3249
3250 se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
3251 if (se == NULL) {
3252 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
3253 goto out1;
3254 }
3255 se->fd = -1;
3256 se->conn.max_write = FUSE_DEFAULT_MAX_PAGES_LIMIT * getpagesize();
3257 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
3258 se->conn.max_readahead = UINT_MAX;
3259
3260 /* Parse options */
3261 if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
3262 goto out2;
3263 if(se->deny_others) {
3264 /* Allowing access only by root is done by instructing
3265 * kernel to allow access by everyone, and then restricting
3266 * access to root and mountpoint owner in libfuse.
3267 */
3268 // We may be adding the option a second time, but
3269 // that doesn't hurt.
3270 if(fuse_opt_add_arg(args, "-oallow_other") == -1)
3271 goto out2;
3272 }
3273 mo = parse_mount_opts(args);
3274 if (mo == NULL)
3275 goto out3;
3276
3277 if(args->argc == 1 &&
3278 args->argv[0][0] == '-') {
3279 fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
3280 "will be ignored\n");
3281 } else if (args->argc != 1) {
3282 int i;
3283 fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
3284 for(i = 1; i < args->argc-1; i++)
3285 fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
3286 fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
3287 goto out4;
3288 }
3289
3290 if (se->debug)
3291 fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
3292
3293 list_init_req(&se->list);
3294 list_init_req(&se->interrupts);
3295 list_init_nreq(&se->notify_list);
3296 se->notify_ctr = 1;
3297 pthread_mutex_init(&se->lock, NULL);
3298
3299 err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
3300 if (err) {
3301 fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
3302 strerror(err));
3303 goto out5;
3304 }
3305
3306 memcpy(&se->op, op, op_size);
3307 se->owner = getuid();
3308 se->userdata = userdata;
3309
3310 se->mo = mo;
3311
3312 /* Fuse server application should pass the version it was compiled
3313 * against and pass it. If a libfuse version accidentally introduces an
3314 * ABI incompatibility, it might be possible to 'fix' that at run time,
3315 * by checking the version numbers.
3316 */
3317 se->version = *version;
3318
3319 return se;
3320
3321out5:
3322 pthread_mutex_destroy(&se->lock);
3323out4:
3324 fuse_opt_free_args(args);
3325out3:
3326 if (mo != NULL)
3327 destroy_mount_opts(mo);
3328out2:
3329 free(se);
3330out1:
3331 return NULL;
3332}
3333
3334struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3335 const struct fuse_lowlevel_ops *op,
3336 size_t op_size, void *userdata);
3337struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3338 const struct fuse_lowlevel_ops *op,
3339 size_t op_size,
3340 void *userdata)
3341{
3342 /* unknown version */
3343 struct libfuse_version version = { 0 };
3344
3345 return fuse_session_new_versioned(args, op, op_size, &version,
3346 userdata);
3347}
3348
3349FUSE_SYMVER("fuse_session_custom_io_317", "fuse_session_custom_io@@FUSE_3.17")
3350int fuse_session_custom_io_317(struct fuse_session *se,
3351 const struct fuse_custom_io *io, size_t op_size, int fd)
3352{
3353 if (sizeof(struct fuse_custom_io) < op_size) {
3354 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
3355 op_size = sizeof(struct fuse_custom_io);
3356 }
3357
3358 if (fd < 0) {
3359 fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
3360 "fuse_session_custom_io()\n", fd);
3361 return -EBADF;
3362 }
3363 if (io == NULL) {
3364 fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
3365 "fuse_session_custom_io()\n");
3366 return -EINVAL;
3367 } else if (io->read == NULL || io->writev == NULL) {
3368 /* If the user provides their own file descriptor, we can't
3369 guarantee that the default behavior of the io operations made
3370 in libfuse will function properly. Therefore, we enforce the
3371 user to implement these io operations when using custom io. */
3372 fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
3373 "implement both io->read() and io->writev\n");
3374 return -EINVAL;
3375 }
3376
3377 se->io = calloc(1, sizeof(struct fuse_custom_io));
3378 if (se->io == NULL) {
3379 fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
3380 "Error: %s\n", strerror(errno));
3381 return -errno;
3382 }
3383
3384 se->fd = fd;
3385 memcpy(se->io, io, op_size);
3386 return 0;
3387}
3388
3389int fuse_session_custom_io_30(struct fuse_session *se,
3390 const struct fuse_custom_io *io, int fd);
3391FUSE_SYMVER("fuse_session_custom_io_30", "fuse_session_custom_io@FUSE_3.0")
3392int fuse_session_custom_io_30(struct fuse_session *se,
3393 const struct fuse_custom_io *io, int fd)
3394{
3395 return fuse_session_custom_io_317(se, io,
3396 offsetof(struct fuse_custom_io, clone_fd), fd);
3397}
3398
3399int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
3400{
3401 int fd;
3402
3403 if (mountpoint == NULL) {
3404 fuse_log(FUSE_LOG_ERR, "Invalid null-ptr mountpoint!\n");
3405 return -1;
3406 }
3407
3408 /*
3409 * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
3410 * would ensue.
3411 */
3412 do {
3413 fd = open("/dev/null", O_RDWR);
3414 if (fd > 2)
3415 close(fd);
3416 } while (fd >= 0 && fd <= 2);
3417
3418 /*
3419 * To allow FUSE daemons to run without privileges, the caller may open
3420 * /dev/fuse before launching the file system and pass on the file
3421 * descriptor by specifying /dev/fd/N as the mount point. Note that the
3422 * parent process takes care of performing the mount in this case.
3423 */
3424 fd = fuse_mnt_parse_fuse_fd(mountpoint);
3425 if (fd != -1) {
3426 if (fcntl(fd, F_GETFD) == -1) {
3427 fuse_log(FUSE_LOG_ERR,
3428 "fuse: Invalid file descriptor /dev/fd/%u\n",
3429 fd);
3430 return -1;
3431 }
3432 se->fd = fd;
3433 return 0;
3434 }
3435
3436 /* Open channel */
3437 fd = fuse_kern_mount(mountpoint, se->mo);
3438 if (fd == -1)
3439 return -1;
3440 se->fd = fd;
3441
3442 /* Save mountpoint */
3443 se->mountpoint = strdup(mountpoint);
3444 if (se->mountpoint == NULL)
3445 goto error_out;
3446
3447 return 0;
3448
3449error_out:
3450 fuse_kern_unmount(mountpoint, fd);
3451 return -1;
3452}
3453
3454int fuse_session_fd(struct fuse_session *se)
3455{
3456 return se->fd;
3457}
3458
3459void fuse_session_unmount(struct fuse_session *se)
3460{
3461 if (se->mountpoint != NULL) {
3462 fuse_kern_unmount(se->mountpoint, se->fd);
3463 se->fd = -1;
3464 free(se->mountpoint);
3465 se->mountpoint = NULL;
3466 }
3467}
3468
3469#ifdef linux
3470int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3471{
3472 char *buf;
3473 size_t bufsize = 1024;
3474 char path[128];
3475 int ret;
3476 int fd;
3477 unsigned long pid = req->ctx.pid;
3478 char *s;
3479
3480 sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3481
3482retry:
3483 buf = malloc(bufsize);
3484 if (buf == NULL)
3485 return -ENOMEM;
3486
3487 ret = -EIO;
3488 fd = open(path, O_RDONLY);
3489 if (fd == -1)
3490 goto out_free;
3491
3492 ret = read(fd, buf, bufsize);
3493 close(fd);
3494 if (ret < 0) {
3495 ret = -EIO;
3496 goto out_free;
3497 }
3498
3499 if ((size_t)ret == bufsize) {
3500 free(buf);
3501 bufsize *= 4;
3502 goto retry;
3503 }
3504
3505 buf[ret] = '\0';
3506 ret = -EIO;
3507 s = strstr(buf, "\nGroups:");
3508 if (s == NULL)
3509 goto out_free;
3510
3511 s += 8;
3512 ret = 0;
3513 while (1) {
3514 char *end;
3515 unsigned long val = strtoul(s, &end, 0);
3516 if (end == s)
3517 break;
3518
3519 s = end;
3520 if (ret < size)
3521 list[ret] = val;
3522 ret++;
3523 }
3524
3525out_free:
3526 free(buf);
3527 return ret;
3528}
3529#else /* linux */
3530/*
3531 * This is currently not implemented on other than Linux...
3532 */
3533int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3534{
3535 (void) req; (void) size; (void) list;
3536 return -ENOSYS;
3537}
3538#endif
3539
3540/* Prevent spurious data race warning - we don't care
3541 * about races for this flag */
3542__attribute__((no_sanitize_thread))
3543void fuse_session_exit(struct fuse_session *se)
3544{
3545 se->exited = 1;
3546}
3547
3548__attribute__((no_sanitize_thread))
3549void fuse_session_reset(struct fuse_session *se)
3550{
3551 se->exited = 0;
3552 se->error = 0;
3553}
3554
3555__attribute__((no_sanitize_thread))
3556int fuse_session_exited(struct fuse_session *se)
3557{
3558 return se->exited;
3559}
#define FUSE_CAP_IOCTL_DIR
#define FUSE_CAP_DONT_MASK
#define FUSE_CAP_HANDLE_KILLPRIV
#define FUSE_CAP_AUTO_INVAL_DATA
#define FUSE_CAP_HANDLE_KILLPRIV_V2
#define FUSE_CAP_SPLICE_READ
#define FUSE_CAP_PARALLEL_DIROPS
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition buffer.c:22
#define FUSE_CAP_WRITEBACK_CACHE
#define FUSE_CAP_EXPIRE_ONLY
#define FUSE_CAP_ATOMIC_O_TRUNC
#define FUSE_CAP_ASYNC_READ
#define FUSE_CAP_SPLICE_WRITE
#define FUSE_CAP_CACHE_SYMLINKS
#define FUSE_CAP_POSIX_ACL
@ FUSE_BUF_IS_FD
#define FUSE_CAP_EXPORT_SUPPORT
#define FUSE_CAP_POSIX_LOCKS
#define FUSE_CAP_EXPLICIT_INVAL_DATA
#define FUSE_CAP_READDIRPLUS_AUTO
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition buffer.c:284
#define FUSE_CAP_NO_OPENDIR_SUPPORT
#define FUSE_CAP_ASYNC_DIO
#define FUSE_CAP_PASSTHROUGH
#define FUSE_CAP_DIRECT_IO_ALLOW_MMAP
#define FUSE_CAP_NO_OPEN_SUPPORT
#define FUSE_CAP_READDIRPLUS
fuse_buf_copy_flags
@ FUSE_BUF_SPLICE_NONBLOCK
@ FUSE_BUF_FORCE_SPLICE
@ FUSE_BUF_NO_SPLICE
@ FUSE_BUF_SPLICE_MOVE
#define FUSE_CAP_SETXATTR_EXT
#define FUSE_CAP_SPLICE_MOVE
#define FUSE_CAP_NO_EXPORT_SUPPORT
#define FUSE_CAP_FLOCK_LOCKS
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition fuse_log.c:77
void fuse_session_destroy(struct fuse_session *se)
fuse_notify_entry_flags
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
void * fuse_req_userdata(fuse_req_t req)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
int fuse_session_exited(struct fuse_session *se)
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_reply_none(fuse_req_t req)
int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
void fuse_session_reset(struct fuse_session *se)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
void fuse_lowlevel_version(void)
uint64_t fuse_ino_t
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_passthrough_open(fuse_req_t req, int fd)
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition fuse_opt.c:55
void fuse_opt_free_args(struct fuse_args *args)
Definition fuse_opt.c:34
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition fuse_opt.c:398
#define FUSE_OPT_END
Definition fuse_opt.h:104
@ FUSE_BUF_IS_FD
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
struct fuse_req * fuse_req_t
int fuse_session_fd(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
void fuse_session_unmount(struct fuse_session *se)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
uint64_t fuse_ino_t
char ** argv
Definition fuse_opt.h:114
enum fuse_buf_flags flags
size_t mem_size
void * mem
size_t size
struct fuse_buf buf[1]
double entry_timeout
fuse_ino_t ino
uint64_t generation
double attr_timeout
struct stat attr
uint64_t lock_owner
uint32_t writepage
Definition fuse_common.h:68
uint32_t poll_events
uint32_t cache_readdir
Definition fuse_common.h:97
uint32_t nonseekable
Definition fuse_common.h:86
int32_t backing_id
uint32_t parallel_direct_writes
uint32_t noflush
uint32_t flush
Definition fuse_common.h:82
uint32_t direct_io
Definition fuse_common.h:71
uint32_t keep_cache
Definition fuse_common.h:77