libfuse
fuse_lowlevel.c
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4
5 Implementation of (most of) the low-level FUSE API. The session loop
6 functions are implemented in separate files.
7
8 This program can be distributed under the terms of the GNU LGPLv2.
9 See the file COPYING.LIB
10*/
11
12#define _GNU_SOURCE
13
14#include "fuse_config.h"
15#include "fuse_i.h"
16#include "fuse_kernel.h"
17#include "fuse_opt.h"
18#include "fuse_misc.h"
19#include "mount_util.h"
20
21#include <stdio.h>
22#include <stdlib.h>
23#include <stddef.h>
24#include <string.h>
25#include <unistd.h>
26#include <limits.h>
27#include <errno.h>
28#include <assert.h>
29#include <sys/file.h>
30
31#ifndef F_LINUX_SPECIFIC_BASE
32#define F_LINUX_SPECIFIC_BASE 1024
33#endif
34#ifndef F_SETPIPE_SZ
35#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
36#endif
37
38
39#define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
40#define OFFSET_MAX 0x7fffffffffffffffLL
41
42#define container_of(ptr, type, member) ({ \
43 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
44 (type *)( (char *)__mptr - offsetof(type,member) );})
45
46struct fuse_pollhandle {
47 uint64_t kh;
48 struct fuse_session *se;
49};
50
51static size_t pagesize;
52
53static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
54{
55 pagesize = getpagesize();
56}
57
58static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
59{
60 attr->ino = stbuf->st_ino;
61 attr->mode = stbuf->st_mode;
62 attr->nlink = stbuf->st_nlink;
63 attr->uid = stbuf->st_uid;
64 attr->gid = stbuf->st_gid;
65 attr->rdev = stbuf->st_rdev;
66 attr->size = stbuf->st_size;
67 attr->blksize = stbuf->st_blksize;
68 attr->blocks = stbuf->st_blocks;
69 attr->atime = stbuf->st_atime;
70 attr->mtime = stbuf->st_mtime;
71 attr->ctime = stbuf->st_ctime;
72 attr->atimensec = ST_ATIM_NSEC(stbuf);
73 attr->mtimensec = ST_MTIM_NSEC(stbuf);
74 attr->ctimensec = ST_CTIM_NSEC(stbuf);
75}
76
77static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
78{
79 stbuf->st_mode = attr->mode;
80 stbuf->st_uid = attr->uid;
81 stbuf->st_gid = attr->gid;
82 stbuf->st_size = attr->size;
83 stbuf->st_atime = attr->atime;
84 stbuf->st_mtime = attr->mtime;
85 stbuf->st_ctime = attr->ctime;
86 ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
87 ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
88 ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
89}
90
91static size_t iov_length(const struct iovec *iov, size_t count)
92{
93 size_t seg;
94 size_t ret = 0;
95
96 for (seg = 0; seg < count; seg++)
97 ret += iov[seg].iov_len;
98 return ret;
99}
100
101static void list_init_req(struct fuse_req *req)
102{
103 req->next = req;
104 req->prev = req;
105}
106
107static void list_del_req(struct fuse_req *req)
108{
109 struct fuse_req *prev = req->prev;
110 struct fuse_req *next = req->next;
111 prev->next = next;
112 next->prev = prev;
113}
114
115static void list_add_req(struct fuse_req *req, struct fuse_req *next)
116{
117 struct fuse_req *prev = next->prev;
118 req->next = next;
119 req->prev = prev;
120 prev->next = req;
121 next->prev = req;
122}
123
124static void destroy_req(fuse_req_t req)
125{
126 assert(req->ch == NULL);
127 pthread_mutex_destroy(&req->lock);
128 free(req);
129}
130
131void fuse_free_req(fuse_req_t req)
132{
133 int ctr;
134 struct fuse_session *se = req->se;
135
136 pthread_mutex_lock(&se->lock);
137 req->u.ni.func = NULL;
138 req->u.ni.data = NULL;
139 list_del_req(req);
140 ctr = --req->ctr;
141 fuse_chan_put(req->ch);
142 req->ch = NULL;
143 pthread_mutex_unlock(&se->lock);
144 if (!ctr)
145 destroy_req(req);
146}
147
148static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
149{
150 struct fuse_req *req;
151
152 req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
153 if (req == NULL) {
154 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
155 } else {
156 req->se = se;
157 req->ctr = 1;
158 list_init_req(req);
159 pthread_mutex_init(&req->lock, NULL);
160 }
161
162 return req;
163}
164
165/* Send data. If *ch* is NULL, send via session master fd */
166static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
167 struct iovec *iov, int count)
168{
169 struct fuse_out_header *out = iov[0].iov_base;
170
171 assert(se != NULL);
172 out->len = iov_length(iov, count);
173 if (se->debug) {
174 if (out->unique == 0) {
175 fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
176 out->error, out->len);
177 } else if (out->error) {
178 fuse_log(FUSE_LOG_DEBUG,
179 " unique: %llu, error: %i (%s), outsize: %i\n",
180 (unsigned long long) out->unique, out->error,
181 strerror(-out->error), out->len);
182 } else {
183 fuse_log(FUSE_LOG_DEBUG,
184 " unique: %llu, success, outsize: %i\n",
185 (unsigned long long) out->unique, out->len);
186 }
187 }
188
189 ssize_t res;
190 if (se->io != NULL)
191 /* se->io->writev is never NULL if se->io is not NULL as
192 specified by fuse_session_custom_io()*/
193 res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
194 se->userdata);
195 else
196 res = writev(ch ? ch->fd : se->fd, iov, count);
197
198 int err = errno;
199
200 if (res == -1) {
201 /* ENOENT means the operation was interrupted */
202 if (!fuse_session_exited(se) && err != ENOENT)
203 perror("fuse: writing device");
204 return -err;
205 }
206
207 return 0;
208}
209
210
211int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
212 int count)
213{
214 struct fuse_out_header out;
215
216#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
217 const char *str = strerrordesc_np(error * -1);
218 if ((str == NULL && error != 0) || error > 0) {
219#else
220 if (error <= -1000 || error > 0) {
221#endif
222 fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
223 error = -ERANGE;
224 }
225
226 out.unique = req->unique;
227 out.error = error;
228
229 iov[0].iov_base = &out;
230 iov[0].iov_len = sizeof(struct fuse_out_header);
231
232 return fuse_send_msg(req->se, req->ch, iov, count);
233}
234
235static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
236 int count)
237{
238 int res;
239
240 res = fuse_send_reply_iov_nofree(req, error, iov, count);
241 fuse_free_req(req);
242 return res;
243}
244
245static int send_reply(fuse_req_t req, int error, const void *arg,
246 size_t argsize)
247{
248 struct iovec iov[2];
249 int count = 1;
250 if (argsize) {
251 iov[1].iov_base = (void *) arg;
252 iov[1].iov_len = argsize;
253 count++;
254 }
255 return send_reply_iov(req, error, iov, count);
256}
257
258int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
259{
260 int res;
261 struct iovec *padded_iov;
262
263 padded_iov = malloc((count + 1) * sizeof(struct iovec));
264 if (padded_iov == NULL)
265 return fuse_reply_err(req, ENOMEM);
266
267 memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
268 count++;
269
270 res = send_reply_iov(req, 0, padded_iov, count);
271 free(padded_iov);
272
273 return res;
274}
275
276
277/* `buf` is allowed to be empty so that the proper size may be
278 allocated by the caller */
279size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
280 const char *name, const struct stat *stbuf, off_t off)
281{
282 (void)req;
283 size_t namelen;
284 size_t entlen;
285 size_t entlen_padded;
286 struct fuse_dirent *dirent;
287
288 namelen = strlen(name);
289 entlen = FUSE_NAME_OFFSET + namelen;
290 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
291
292 if ((buf == NULL) || (entlen_padded > bufsize))
293 return entlen_padded;
294
295 dirent = (struct fuse_dirent*) buf;
296 dirent->ino = stbuf->st_ino;
297 dirent->off = off;
298 dirent->namelen = namelen;
299 dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
300 memcpy(dirent->name, name, namelen);
301 memset(dirent->name + namelen, 0, entlen_padded - entlen);
302
303 return entlen_padded;
304}
305
306static void convert_statfs(const struct statvfs *stbuf,
307 struct fuse_kstatfs *kstatfs)
308{
309 kstatfs->bsize = stbuf->f_bsize;
310 kstatfs->frsize = stbuf->f_frsize;
311 kstatfs->blocks = stbuf->f_blocks;
312 kstatfs->bfree = stbuf->f_bfree;
313 kstatfs->bavail = stbuf->f_bavail;
314 kstatfs->files = stbuf->f_files;
315 kstatfs->ffree = stbuf->f_ffree;
316 kstatfs->namelen = stbuf->f_namemax;
317}
318
319static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
320{
321 return send_reply(req, 0, arg, argsize);
322}
323
324int fuse_reply_err(fuse_req_t req, int err)
325{
326 return send_reply(req, -err, NULL, 0);
327}
328
330{
331 fuse_free_req(req);
332}
333
334static unsigned long calc_timeout_sec(double t)
335{
336 if (t > (double) ULONG_MAX)
337 return ULONG_MAX;
338 else if (t < 0.0)
339 return 0;
340 else
341 return (unsigned long) t;
342}
343
344static unsigned int calc_timeout_nsec(double t)
345{
346 double f = t - (double) calc_timeout_sec(t);
347 if (f < 0.0)
348 return 0;
349 else if (f >= 0.999999999)
350 return 999999999;
351 else
352 return (unsigned int) (f * 1.0e9);
353}
354
355static void fill_entry(struct fuse_entry_out *arg,
356 const struct fuse_entry_param *e)
357{
358 arg->nodeid = e->ino;
359 arg->generation = e->generation;
360 arg->entry_valid = calc_timeout_sec(e->entry_timeout);
361 arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
362 arg->attr_valid = calc_timeout_sec(e->attr_timeout);
363 arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
364 convert_stat(&e->attr, &arg->attr);
365}
366
367/* `buf` is allowed to be empty so that the proper size may be
368 allocated by the caller */
369size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
370 const char *name,
371 const struct fuse_entry_param *e, off_t off)
372{
373 (void)req;
374 size_t namelen;
375 size_t entlen;
376 size_t entlen_padded;
377
378 namelen = strlen(name);
379 entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
380 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
381 if ((buf == NULL) || (entlen_padded > bufsize))
382 return entlen_padded;
383
384 struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
385 memset(&dp->entry_out, 0, sizeof(dp->entry_out));
386 fill_entry(&dp->entry_out, e);
387
388 struct fuse_dirent *dirent = &dp->dirent;
389 dirent->ino = e->attr.st_ino;
390 dirent->off = off;
391 dirent->namelen = namelen;
392 dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
393 memcpy(dirent->name, name, namelen);
394 memset(dirent->name + namelen, 0, entlen_padded - entlen);
395
396 return entlen_padded;
397}
398
399static void fill_open(struct fuse_open_out *arg,
400 const struct fuse_file_info *f)
401{
402 arg->fh = f->fh;
403 if (f->direct_io)
404 arg->open_flags |= FOPEN_DIRECT_IO;
405 if (f->keep_cache)
406 arg->open_flags |= FOPEN_KEEP_CACHE;
407 if (f->cache_readdir)
408 arg->open_flags |= FOPEN_CACHE_DIR;
409 if (f->nonseekable)
410 arg->open_flags |= FOPEN_NONSEEKABLE;
411 if (f->noflush)
412 arg->open_flags |= FOPEN_NOFLUSH;
414 arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES;
415}
416
418{
419 struct fuse_entry_out arg;
420 size_t size = req->se->conn.proto_minor < 9 ?
421 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
422
423 /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
424 negative entry */
425 if (!e->ino && req->se->conn.proto_minor < 4)
426 return fuse_reply_err(req, ENOENT);
427
428 memset(&arg, 0, sizeof(arg));
429 fill_entry(&arg, e);
430 return send_reply_ok(req, &arg, size);
431}
432
434 const struct fuse_file_info *f)
435{
436 char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
437 size_t entrysize = req->se->conn.proto_minor < 9 ?
438 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
439 struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
440 struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
441
442 memset(buf, 0, sizeof(buf));
443 fill_entry(earg, e);
444 fill_open(oarg, f);
445 return send_reply_ok(req, buf,
446 entrysize + sizeof(struct fuse_open_out));
447}
448
449int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
450 double attr_timeout)
451{
452 struct fuse_attr_out arg;
453 size_t size = req->se->conn.proto_minor < 9 ?
454 FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
455
456 memset(&arg, 0, sizeof(arg));
457 arg.attr_valid = calc_timeout_sec(attr_timeout);
458 arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
459 convert_stat(attr, &arg.attr);
460
461 return send_reply_ok(req, &arg, size);
462}
463
464int fuse_reply_readlink(fuse_req_t req, const char *linkname)
465{
466 return send_reply_ok(req, linkname, strlen(linkname));
467}
468
470{
471 struct fuse_open_out arg;
472
473 memset(&arg, 0, sizeof(arg));
474 fill_open(&arg, f);
475 return send_reply_ok(req, &arg, sizeof(arg));
476}
477
478int fuse_reply_write(fuse_req_t req, size_t count)
479{
480 struct fuse_write_out arg;
481
482 memset(&arg, 0, sizeof(arg));
483 arg.size = count;
484
485 return send_reply_ok(req, &arg, sizeof(arg));
486}
487
488int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
489{
490 return send_reply_ok(req, buf, size);
491}
492
493static int fuse_send_data_iov_fallback(struct fuse_session *se,
494 struct fuse_chan *ch,
495 struct iovec *iov, int iov_count,
496 struct fuse_bufvec *buf,
497 size_t len)
498{
499 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
500 void *mbuf;
501 int res;
502
503 /* Optimize common case */
504 if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
505 !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
506 /* FIXME: also avoid memory copy if there are multiple buffers
507 but none of them contain an fd */
508
509 iov[iov_count].iov_base = buf->buf[0].mem;
510 iov[iov_count].iov_len = len;
511 iov_count++;
512 return fuse_send_msg(se, ch, iov, iov_count);
513 }
514
515 res = posix_memalign(&mbuf, pagesize, len);
516 if (res != 0)
517 return res;
518
519 mem_buf.buf[0].mem = mbuf;
520 res = fuse_buf_copy(&mem_buf, buf, 0);
521 if (res < 0) {
522 free(mbuf);
523 return -res;
524 }
525 len = res;
526
527 iov[iov_count].iov_base = mbuf;
528 iov[iov_count].iov_len = len;
529 iov_count++;
530 res = fuse_send_msg(se, ch, iov, iov_count);
531 free(mbuf);
532
533 return res;
534}
535
536struct fuse_ll_pipe {
537 size_t size;
538 int can_grow;
539 int pipe[2];
540};
541
542static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
543{
544 close(llp->pipe[0]);
545 close(llp->pipe[1]);
546 free(llp);
547}
548
549#ifdef HAVE_SPLICE
550#if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
551static int fuse_pipe(int fds[2])
552{
553 int rv = pipe(fds);
554
555 if (rv == -1)
556 return rv;
557
558 if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
559 fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
560 fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
561 fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
562 close(fds[0]);
563 close(fds[1]);
564 rv = -1;
565 }
566 return rv;
567}
568#else
569static int fuse_pipe(int fds[2])
570{
571 return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
572}
573#endif
574
575static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
576{
577 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
578 if (llp == NULL) {
579 int res;
580
581 llp = malloc(sizeof(struct fuse_ll_pipe));
582 if (llp == NULL)
583 return NULL;
584
585 res = fuse_pipe(llp->pipe);
586 if (res == -1) {
587 free(llp);
588 return NULL;
589 }
590
591 /*
592 *the default size is 16 pages on linux
593 */
594 llp->size = pagesize * 16;
595 llp->can_grow = 1;
596
597 pthread_setspecific(se->pipe_key, llp);
598 }
599
600 return llp;
601}
602#endif
603
604static void fuse_ll_clear_pipe(struct fuse_session *se)
605{
606 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
607 if (llp) {
608 pthread_setspecific(se->pipe_key, NULL);
609 fuse_ll_pipe_free(llp);
610 }
611}
612
613#if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
614static int read_back(int fd, char *buf, size_t len)
615{
616 int res;
617
618 res = read(fd, buf, len);
619 if (res == -1) {
620 fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
621 return -EIO;
622 }
623 if (res != len) {
624 fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
625 return -EIO;
626 }
627 return 0;
628}
629
630static int grow_pipe_to_max(int pipefd)
631{
632 int max;
633 int res;
634 int maxfd;
635 char buf[32];
636
637 maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
638 if (maxfd < 0)
639 return -errno;
640
641 res = read(maxfd, buf, sizeof(buf) - 1);
642 if (res < 0) {
643 int saved_errno;
644
645 saved_errno = errno;
646 close(maxfd);
647 return -saved_errno;
648 }
649 close(maxfd);
650 buf[res] = '\0';
651
652 max = atoi(buf);
653 res = fcntl(pipefd, F_SETPIPE_SZ, max);
654 if (res < 0)
655 return -errno;
656 return max;
657}
658
659static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
660 struct iovec *iov, int iov_count,
661 struct fuse_bufvec *buf, unsigned int flags)
662{
663 int res;
664 size_t len = fuse_buf_size(buf);
665 struct fuse_out_header *out = iov[0].iov_base;
666 struct fuse_ll_pipe *llp;
667 int splice_flags;
668 size_t pipesize;
669 size_t total_buf_size;
670 size_t idx;
671 size_t headerlen;
672 struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
673
674 if (se->broken_splice_nonblock)
675 goto fallback;
676
677 if (flags & FUSE_BUF_NO_SPLICE)
678 goto fallback;
679
680 total_buf_size = 0;
681 for (idx = buf->idx; idx < buf->count; idx++) {
682 total_buf_size += buf->buf[idx].size;
683 if (idx == buf->idx)
684 total_buf_size -= buf->off;
685 }
686 if (total_buf_size < 2 * pagesize)
687 goto fallback;
688
689 if (se->conn.proto_minor < 14 ||
690 !(se->conn.want & FUSE_CAP_SPLICE_WRITE))
691 goto fallback;
692
693 llp = fuse_ll_get_pipe(se);
694 if (llp == NULL)
695 goto fallback;
696
697
698 headerlen = iov_length(iov, iov_count);
699
700 out->len = headerlen + len;
701
702 /*
703 * Heuristic for the required pipe size, does not work if the
704 * source contains less than page size fragments
705 */
706 pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
707
708 if (llp->size < pipesize) {
709 if (llp->can_grow) {
710 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
711 if (res == -1) {
712 res = grow_pipe_to_max(llp->pipe[0]);
713 if (res > 0)
714 llp->size = res;
715 llp->can_grow = 0;
716 goto fallback;
717 }
718 llp->size = res;
719 }
720 if (llp->size < pipesize)
721 goto fallback;
722 }
723
724
725 res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
726 if (res == -1)
727 goto fallback;
728
729 if (res != headerlen) {
730 res = -EIO;
731 fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
732 headerlen);
733 goto clear_pipe;
734 }
735
736 pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
737 pipe_buf.buf[0].fd = llp->pipe[1];
738
739 res = fuse_buf_copy(&pipe_buf, buf,
741 if (res < 0) {
742 if (res == -EAGAIN || res == -EINVAL) {
743 /*
744 * Should only get EAGAIN on kernels with
745 * broken SPLICE_F_NONBLOCK support (<=
746 * 2.6.35) where this error or a short read is
747 * returned even if the pipe itself is not
748 * full
749 *
750 * EINVAL might mean that splice can't handle
751 * this combination of input and output.
752 */
753 if (res == -EAGAIN)
754 se->broken_splice_nonblock = 1;
755
756 pthread_setspecific(se->pipe_key, NULL);
757 fuse_ll_pipe_free(llp);
758 goto fallback;
759 }
760 res = -res;
761 goto clear_pipe;
762 }
763
764 if (res != 0 && res < len) {
765 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
766 void *mbuf;
767 size_t now_len = res;
768 /*
769 * For regular files a short count is either
770 * 1) due to EOF, or
771 * 2) because of broken SPLICE_F_NONBLOCK (see above)
772 *
773 * For other inputs it's possible that we overflowed
774 * the pipe because of small buffer fragments.
775 */
776
777 res = posix_memalign(&mbuf, pagesize, len);
778 if (res != 0)
779 goto clear_pipe;
780
781 mem_buf.buf[0].mem = mbuf;
782 mem_buf.off = now_len;
783 res = fuse_buf_copy(&mem_buf, buf, 0);
784 if (res > 0) {
785 char *tmpbuf;
786 size_t extra_len = res;
787 /*
788 * Trickiest case: got more data. Need to get
789 * back the data from the pipe and then fall
790 * back to regular write.
791 */
792 tmpbuf = malloc(headerlen);
793 if (tmpbuf == NULL) {
794 free(mbuf);
795 res = ENOMEM;
796 goto clear_pipe;
797 }
798 res = read_back(llp->pipe[0], tmpbuf, headerlen);
799 free(tmpbuf);
800 if (res != 0) {
801 free(mbuf);
802 goto clear_pipe;
803 }
804 res = read_back(llp->pipe[0], mbuf, now_len);
805 if (res != 0) {
806 free(mbuf);
807 goto clear_pipe;
808 }
809 len = now_len + extra_len;
810 iov[iov_count].iov_base = mbuf;
811 iov[iov_count].iov_len = len;
812 iov_count++;
813 res = fuse_send_msg(se, ch, iov, iov_count);
814 free(mbuf);
815 return res;
816 }
817 free(mbuf);
818 res = now_len;
819 }
820 len = res;
821 out->len = headerlen + len;
822
823 if (se->debug) {
824 fuse_log(FUSE_LOG_DEBUG,
825 " unique: %llu, success, outsize: %i (splice)\n",
826 (unsigned long long) out->unique, out->len);
827 }
828
829 splice_flags = 0;
830 if ((flags & FUSE_BUF_SPLICE_MOVE) &&
831 (se->conn.want & FUSE_CAP_SPLICE_MOVE))
832 splice_flags |= SPLICE_F_MOVE;
833
834 if (se->io != NULL && se->io->splice_send != NULL) {
835 res = se->io->splice_send(llp->pipe[0], NULL,
836 ch ? ch->fd : se->fd, NULL, out->len,
837 splice_flags, se->userdata);
838 } else {
839 res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
840 out->len, splice_flags);
841 }
842 if (res == -1) {
843 res = -errno;
844 perror("fuse: splice from pipe");
845 goto clear_pipe;
846 }
847 if (res != out->len) {
848 res = -EIO;
849 fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
850 res, out->len);
851 goto clear_pipe;
852 }
853 return 0;
854
855clear_pipe:
856 fuse_ll_clear_pipe(se);
857 return res;
858
859fallback:
860 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
861}
862#else
863static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
864 struct iovec *iov, int iov_count,
865 struct fuse_bufvec *buf, unsigned int flags)
866{
867 size_t len = fuse_buf_size(buf);
868 (void) flags;
869
870 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
871}
872#endif
873
875 enum fuse_buf_copy_flags flags)
876{
877 struct iovec iov[2];
878 struct fuse_out_header out;
879 int res;
880
881 iov[0].iov_base = &out;
882 iov[0].iov_len = sizeof(struct fuse_out_header);
883
884 out.unique = req->unique;
885 out.error = 0;
886
887 res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
888 if (res <= 0) {
889 fuse_free_req(req);
890 return res;
891 } else {
892 return fuse_reply_err(req, res);
893 }
894}
895
896int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
897{
898 struct fuse_statfs_out arg;
899 size_t size = req->se->conn.proto_minor < 4 ?
900 FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
901
902 memset(&arg, 0, sizeof(arg));
903 convert_statfs(stbuf, &arg.st);
904
905 return send_reply_ok(req, &arg, size);
906}
907
908int fuse_reply_xattr(fuse_req_t req, size_t count)
909{
910 struct fuse_getxattr_out arg;
911
912 memset(&arg, 0, sizeof(arg));
913 arg.size = count;
914
915 return send_reply_ok(req, &arg, sizeof(arg));
916}
917
918int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
919{
920 struct fuse_lk_out arg;
921
922 memset(&arg, 0, sizeof(arg));
923 arg.lk.type = lock->l_type;
924 if (lock->l_type != F_UNLCK) {
925 arg.lk.start = lock->l_start;
926 if (lock->l_len == 0)
927 arg.lk.end = OFFSET_MAX;
928 else
929 arg.lk.end = lock->l_start + lock->l_len - 1;
930 }
931 arg.lk.pid = lock->l_pid;
932 return send_reply_ok(req, &arg, sizeof(arg));
933}
934
935int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
936{
937 struct fuse_bmap_out arg;
938
939 memset(&arg, 0, sizeof(arg));
940 arg.block = idx;
941
942 return send_reply_ok(req, &arg, sizeof(arg));
943}
944
945static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
946 size_t count)
947{
948 struct fuse_ioctl_iovec *fiov;
949 size_t i;
950
951 fiov = malloc(sizeof(fiov[0]) * count);
952 if (!fiov)
953 return NULL;
954
955 for (i = 0; i < count; i++) {
956 fiov[i].base = (uintptr_t) iov[i].iov_base;
957 fiov[i].len = iov[i].iov_len;
958 }
959
960 return fiov;
961}
962
964 const struct iovec *in_iov, size_t in_count,
965 const struct iovec *out_iov, size_t out_count)
966{
967 struct fuse_ioctl_out arg;
968 struct fuse_ioctl_iovec *in_fiov = NULL;
969 struct fuse_ioctl_iovec *out_fiov = NULL;
970 struct iovec iov[4];
971 size_t count = 1;
972 int res;
973
974 memset(&arg, 0, sizeof(arg));
975 arg.flags |= FUSE_IOCTL_RETRY;
976 arg.in_iovs = in_count;
977 arg.out_iovs = out_count;
978 iov[count].iov_base = &arg;
979 iov[count].iov_len = sizeof(arg);
980 count++;
981
982 if (req->se->conn.proto_minor < 16) {
983 if (in_count) {
984 iov[count].iov_base = (void *)in_iov;
985 iov[count].iov_len = sizeof(in_iov[0]) * in_count;
986 count++;
987 }
988
989 if (out_count) {
990 iov[count].iov_base = (void *)out_iov;
991 iov[count].iov_len = sizeof(out_iov[0]) * out_count;
992 count++;
993 }
994 } else {
995 /* Can't handle non-compat 64bit ioctls on 32bit */
996 if (sizeof(void *) == 4 && req->ioctl_64bit) {
997 res = fuse_reply_err(req, EINVAL);
998 goto out;
999 }
1000
1001 if (in_count) {
1002 in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
1003 if (!in_fiov)
1004 goto enomem;
1005
1006 iov[count].iov_base = (void *)in_fiov;
1007 iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1008 count++;
1009 }
1010 if (out_count) {
1011 out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1012 if (!out_fiov)
1013 goto enomem;
1014
1015 iov[count].iov_base = (void *)out_fiov;
1016 iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1017 count++;
1018 }
1019 }
1020
1021 res = send_reply_iov(req, 0, iov, count);
1022out:
1023 free(in_fiov);
1024 free(out_fiov);
1025
1026 return res;
1027
1028enomem:
1029 res = fuse_reply_err(req, ENOMEM);
1030 goto out;
1031}
1032
1033int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1034{
1035 struct fuse_ioctl_out arg;
1036 struct iovec iov[3];
1037 size_t count = 1;
1038
1039 memset(&arg, 0, sizeof(arg));
1040 arg.result = result;
1041 iov[count].iov_base = &arg;
1042 iov[count].iov_len = sizeof(arg);
1043 count++;
1044
1045 if (size) {
1046 iov[count].iov_base = (char *) buf;
1047 iov[count].iov_len = size;
1048 count++;
1049 }
1050
1051 return send_reply_iov(req, 0, iov, count);
1052}
1053
1054int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1055 int count)
1056{
1057 struct iovec *padded_iov;
1058 struct fuse_ioctl_out arg;
1059 int res;
1060
1061 padded_iov = malloc((count + 2) * sizeof(struct iovec));
1062 if (padded_iov == NULL)
1063 return fuse_reply_err(req, ENOMEM);
1064
1065 memset(&arg, 0, sizeof(arg));
1066 arg.result = result;
1067 padded_iov[1].iov_base = &arg;
1068 padded_iov[1].iov_len = sizeof(arg);
1069
1070 memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1071
1072 res = send_reply_iov(req, 0, padded_iov, count + 2);
1073 free(padded_iov);
1074
1075 return res;
1076}
1077
1078int fuse_reply_poll(fuse_req_t req, unsigned revents)
1079{
1080 struct fuse_poll_out arg;
1081
1082 memset(&arg, 0, sizeof(arg));
1083 arg.revents = revents;
1084
1085 return send_reply_ok(req, &arg, sizeof(arg));
1086}
1087
1088int fuse_reply_lseek(fuse_req_t req, off_t off)
1089{
1090 struct fuse_lseek_out arg;
1091
1092 memset(&arg, 0, sizeof(arg));
1093 arg.offset = off;
1094
1095 return send_reply_ok(req, &arg, sizeof(arg));
1096}
1097
1098static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1099{
1100 char *name = (char *) inarg;
1101
1102 if (req->se->op.lookup)
1103 req->se->op.lookup(req, nodeid, name);
1104 else
1105 fuse_reply_err(req, ENOSYS);
1106}
1107
1108static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1109{
1110 struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1111
1112 if (req->se->op.forget)
1113 req->se->op.forget(req, nodeid, arg->nlookup);
1114 else
1115 fuse_reply_none(req);
1116}
1117
1118static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1119 const void *inarg)
1120{
1121 struct fuse_batch_forget_in *arg = (void *) inarg;
1122 struct fuse_forget_one *param = (void *) PARAM(arg);
1123 unsigned int i;
1124
1125 (void) nodeid;
1126
1127 if (req->se->op.forget_multi) {
1128 req->se->op.forget_multi(req, arg->count,
1129 (struct fuse_forget_data *) param);
1130 } else if (req->se->op.forget) {
1131 for (i = 0; i < arg->count; i++) {
1132 struct fuse_forget_one *forget = &param[i];
1133 struct fuse_req *dummy_req;
1134
1135 dummy_req = fuse_ll_alloc_req(req->se);
1136 if (dummy_req == NULL)
1137 break;
1138
1139 dummy_req->unique = req->unique;
1140 dummy_req->ctx = req->ctx;
1141 dummy_req->ch = NULL;
1142
1143 req->se->op.forget(dummy_req, forget->nodeid,
1144 forget->nlookup);
1145 }
1146 fuse_reply_none(req);
1147 } else {
1148 fuse_reply_none(req);
1149 }
1150}
1151
1152static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1153{
1154 struct fuse_file_info *fip = NULL;
1155 struct fuse_file_info fi;
1156
1157 if (req->se->conn.proto_minor >= 9) {
1158 struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1159
1160 if (arg->getattr_flags & FUSE_GETATTR_FH) {
1161 memset(&fi, 0, sizeof(fi));
1162 fi.fh = arg->fh;
1163 fip = &fi;
1164 }
1165 }
1166
1167 if (req->se->op.getattr)
1168 req->se->op.getattr(req, nodeid, fip);
1169 else
1170 fuse_reply_err(req, ENOSYS);
1171}
1172
1173static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1174{
1175 struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1176
1177 if (req->se->op.setattr) {
1178 struct fuse_file_info *fi = NULL;
1179 struct fuse_file_info fi_store;
1180 struct stat stbuf;
1181 memset(&stbuf, 0, sizeof(stbuf));
1182 convert_attr(arg, &stbuf);
1183 if (arg->valid & FATTR_FH) {
1184 arg->valid &= ~FATTR_FH;
1185 memset(&fi_store, 0, sizeof(fi_store));
1186 fi = &fi_store;
1187 fi->fh = arg->fh;
1188 }
1189 arg->valid &=
1190 FUSE_SET_ATTR_MODE |
1191 FUSE_SET_ATTR_UID |
1192 FUSE_SET_ATTR_GID |
1193 FUSE_SET_ATTR_SIZE |
1194 FUSE_SET_ATTR_ATIME |
1195 FUSE_SET_ATTR_MTIME |
1196 FUSE_SET_ATTR_KILL_SUID |
1197 FUSE_SET_ATTR_KILL_SGID |
1198 FUSE_SET_ATTR_ATIME_NOW |
1199 FUSE_SET_ATTR_MTIME_NOW |
1200 FUSE_SET_ATTR_CTIME;
1201
1202 req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1203 } else
1204 fuse_reply_err(req, ENOSYS);
1205}
1206
1207static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1208{
1209 struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1210
1211 if (req->se->op.access)
1212 req->se->op.access(req, nodeid, arg->mask);
1213 else
1214 fuse_reply_err(req, ENOSYS);
1215}
1216
1217static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1218{
1219 (void) inarg;
1220
1221 if (req->se->op.readlink)
1222 req->se->op.readlink(req, nodeid);
1223 else
1224 fuse_reply_err(req, ENOSYS);
1225}
1226
1227static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1228{
1229 struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1230 char *name = PARAM(arg);
1231
1232 if (req->se->conn.proto_minor >= 12)
1233 req->ctx.umask = arg->umask;
1234 else
1235 name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1236
1237 if (req->se->op.mknod)
1238 req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1239 else
1240 fuse_reply_err(req, ENOSYS);
1241}
1242
1243static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1244{
1245 struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1246
1247 if (req->se->conn.proto_minor >= 12)
1248 req->ctx.umask = arg->umask;
1249
1250 if (req->se->op.mkdir)
1251 req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1252 else
1253 fuse_reply_err(req, ENOSYS);
1254}
1255
1256static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1257{
1258 char *name = (char *) inarg;
1259
1260 if (req->se->op.unlink)
1261 req->se->op.unlink(req, nodeid, name);
1262 else
1263 fuse_reply_err(req, ENOSYS);
1264}
1265
1266static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1267{
1268 char *name = (char *) inarg;
1269
1270 if (req->se->op.rmdir)
1271 req->se->op.rmdir(req, nodeid, name);
1272 else
1273 fuse_reply_err(req, ENOSYS);
1274}
1275
1276static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1277{
1278 char *name = (char *) inarg;
1279 char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1280
1281 if (req->se->op.symlink)
1282 req->se->op.symlink(req, linkname, nodeid, name);
1283 else
1284 fuse_reply_err(req, ENOSYS);
1285}
1286
1287static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1288{
1289 struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1290 char *oldname = PARAM(arg);
1291 char *newname = oldname + strlen(oldname) + 1;
1292
1293 if (req->se->op.rename)
1294 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1295 0);
1296 else
1297 fuse_reply_err(req, ENOSYS);
1298}
1299
1300static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1301{
1302 struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1303 char *oldname = PARAM(arg);
1304 char *newname = oldname + strlen(oldname) + 1;
1305
1306 if (req->se->op.rename)
1307 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1308 arg->flags);
1309 else
1310 fuse_reply_err(req, ENOSYS);
1311}
1312
1313static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1314{
1315 struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1316
1317 if (req->se->op.link)
1318 req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1319 else
1320 fuse_reply_err(req, ENOSYS);
1321}
1322
1323static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1324{
1325 struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1326
1327 if (req->se->op.create) {
1328 struct fuse_file_info fi;
1329 char *name = PARAM(arg);
1330
1331 memset(&fi, 0, sizeof(fi));
1332 fi.flags = arg->flags;
1333
1334 if (req->se->conn.proto_minor >= 12)
1335 req->ctx.umask = arg->umask;
1336 else
1337 name = (char *) inarg + sizeof(struct fuse_open_in);
1338
1339 req->se->op.create(req, nodeid, name, arg->mode, &fi);
1340 } else
1341 fuse_reply_err(req, ENOSYS);
1342}
1343
1344static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1345{
1346 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1347 struct fuse_file_info fi;
1348
1349 memset(&fi, 0, sizeof(fi));
1350 fi.flags = arg->flags;
1351
1352 if (req->se->op.open)
1353 req->se->op.open(req, nodeid, &fi);
1354 else
1355 fuse_reply_open(req, &fi);
1356}
1357
1358static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1359{
1360 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1361
1362 if (req->se->op.read) {
1363 struct fuse_file_info fi;
1364
1365 memset(&fi, 0, sizeof(fi));
1366 fi.fh = arg->fh;
1367 if (req->se->conn.proto_minor >= 9) {
1368 fi.lock_owner = arg->lock_owner;
1369 fi.flags = arg->flags;
1370 }
1371 req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1372 } else
1373 fuse_reply_err(req, ENOSYS);
1374}
1375
1376static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1377{
1378 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1379 struct fuse_file_info fi;
1380 char *param;
1381
1382 memset(&fi, 0, sizeof(fi));
1383 fi.fh = arg->fh;
1384 fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1385
1386 if (req->se->conn.proto_minor < 9) {
1387 param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1388 } else {
1389 fi.lock_owner = arg->lock_owner;
1390 fi.flags = arg->flags;
1391 param = PARAM(arg);
1392 }
1393
1394 if (req->se->op.write)
1395 req->se->op.write(req, nodeid, param, arg->size,
1396 arg->offset, &fi);
1397 else
1398 fuse_reply_err(req, ENOSYS);
1399}
1400
1401static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1402 const struct fuse_buf *ibuf)
1403{
1404 struct fuse_session *se = req->se;
1405 struct fuse_bufvec bufv = {
1406 .buf[0] = *ibuf,
1407 .count = 1,
1408 };
1409 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1410 struct fuse_file_info fi;
1411
1412 memset(&fi, 0, sizeof(fi));
1413 fi.fh = arg->fh;
1414 fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1415
1416 if (se->conn.proto_minor < 9) {
1417 bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1418 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1419 FUSE_COMPAT_WRITE_IN_SIZE;
1420 assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1421 } else {
1422 fi.lock_owner = arg->lock_owner;
1423 fi.flags = arg->flags;
1424 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1425 bufv.buf[0].mem = PARAM(arg);
1426
1427 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1428 sizeof(struct fuse_write_in);
1429 }
1430 if (bufv.buf[0].size < arg->size) {
1431 fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1432 fuse_reply_err(req, EIO);
1433 goto out;
1434 }
1435 bufv.buf[0].size = arg->size;
1436
1437 se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1438
1439out:
1440 /* Need to reset the pipe if ->write_buf() didn't consume all data */
1441 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1442 fuse_ll_clear_pipe(se);
1443}
1444
1445static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1446{
1447 struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1448 struct fuse_file_info fi;
1449
1450 memset(&fi, 0, sizeof(fi));
1451 fi.fh = arg->fh;
1452 fi.flush = 1;
1453 if (req->se->conn.proto_minor >= 7)
1454 fi.lock_owner = arg->lock_owner;
1455
1456 if (req->se->op.flush)
1457 req->se->op.flush(req, nodeid, &fi);
1458 else
1459 fuse_reply_err(req, ENOSYS);
1460}
1461
1462static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1463{
1464 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1465 struct fuse_file_info fi;
1466
1467 memset(&fi, 0, sizeof(fi));
1468 fi.flags = arg->flags;
1469 fi.fh = arg->fh;
1470 if (req->se->conn.proto_minor >= 8) {
1471 fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1472 fi.lock_owner = arg->lock_owner;
1473 }
1474 if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1475 fi.flock_release = 1;
1476 fi.lock_owner = arg->lock_owner;
1477 }
1478
1479 if (req->se->op.release)
1480 req->se->op.release(req, nodeid, &fi);
1481 else
1482 fuse_reply_err(req, 0);
1483}
1484
1485static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1486{
1487 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1488 struct fuse_file_info fi;
1489 int datasync = arg->fsync_flags & 1;
1490
1491 memset(&fi, 0, sizeof(fi));
1492 fi.fh = arg->fh;
1493
1494 if (req->se->op.fsync)
1495 req->se->op.fsync(req, nodeid, datasync, &fi);
1496 else
1497 fuse_reply_err(req, ENOSYS);
1498}
1499
1500static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1501{
1502 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1503 struct fuse_file_info fi;
1504
1505 memset(&fi, 0, sizeof(fi));
1506 fi.flags = arg->flags;
1507
1508 if (req->se->op.opendir)
1509 req->se->op.opendir(req, nodeid, &fi);
1510 else
1511 fuse_reply_open(req, &fi);
1512}
1513
1514static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1515{
1516 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1517 struct fuse_file_info fi;
1518
1519 memset(&fi, 0, sizeof(fi));
1520 fi.fh = arg->fh;
1521
1522 if (req->se->op.readdir)
1523 req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1524 else
1525 fuse_reply_err(req, ENOSYS);
1526}
1527
1528static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1529{
1530 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1531 struct fuse_file_info fi;
1532
1533 memset(&fi, 0, sizeof(fi));
1534 fi.fh = arg->fh;
1535
1536 if (req->se->op.readdirplus)
1537 req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1538 else
1539 fuse_reply_err(req, ENOSYS);
1540}
1541
1542static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1543{
1544 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1545 struct fuse_file_info fi;
1546
1547 memset(&fi, 0, sizeof(fi));
1548 fi.flags = arg->flags;
1549 fi.fh = arg->fh;
1550
1551 if (req->se->op.releasedir)
1552 req->se->op.releasedir(req, nodeid, &fi);
1553 else
1554 fuse_reply_err(req, 0);
1555}
1556
1557static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1558{
1559 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1560 struct fuse_file_info fi;
1561 int datasync = arg->fsync_flags & 1;
1562
1563 memset(&fi, 0, sizeof(fi));
1564 fi.fh = arg->fh;
1565
1566 if (req->se->op.fsyncdir)
1567 req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1568 else
1569 fuse_reply_err(req, ENOSYS);
1570}
1571
1572static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1573{
1574 (void) nodeid;
1575 (void) inarg;
1576
1577 if (req->se->op.statfs)
1578 req->se->op.statfs(req, nodeid);
1579 else {
1580 struct statvfs buf = {
1581 .f_namemax = 255,
1582 .f_bsize = 512,
1583 };
1584 fuse_reply_statfs(req, &buf);
1585 }
1586}
1587
1588static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1589{
1590 struct fuse_session *se = req->se;
1591 unsigned int xattr_ext = !!(se->conn.want & FUSE_CAP_SETXATTR_EXT);
1592 struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1593 char *name = xattr_ext ? PARAM(arg) :
1594 (char *)arg + FUSE_COMPAT_SETXATTR_IN_SIZE;
1595 char *value = name + strlen(name) + 1;
1596
1597 /* XXX:The API should be extended to support extra_flags/setxattr_flags */
1598 if (req->se->op.setxattr)
1599 req->se->op.setxattr(req, nodeid, name, value, arg->size,
1600 arg->flags);
1601 else
1602 fuse_reply_err(req, ENOSYS);
1603}
1604
1605static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1606{
1607 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1608
1609 if (req->se->op.getxattr)
1610 req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1611 else
1612 fuse_reply_err(req, ENOSYS);
1613}
1614
1615static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1616{
1617 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1618
1619 if (req->se->op.listxattr)
1620 req->se->op.listxattr(req, nodeid, arg->size);
1621 else
1622 fuse_reply_err(req, ENOSYS);
1623}
1624
1625static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1626{
1627 char *name = (char *) inarg;
1628
1629 if (req->se->op.removexattr)
1630 req->se->op.removexattr(req, nodeid, name);
1631 else
1632 fuse_reply_err(req, ENOSYS);
1633}
1634
1635static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1636 struct flock *flock)
1637{
1638 memset(flock, 0, sizeof(struct flock));
1639 flock->l_type = fl->type;
1640 flock->l_whence = SEEK_SET;
1641 flock->l_start = fl->start;
1642 if (fl->end == OFFSET_MAX)
1643 flock->l_len = 0;
1644 else
1645 flock->l_len = fl->end - fl->start + 1;
1646 flock->l_pid = fl->pid;
1647}
1648
1649static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1650{
1651 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1652 struct fuse_file_info fi;
1653 struct flock flock;
1654
1655 memset(&fi, 0, sizeof(fi));
1656 fi.fh = arg->fh;
1657 fi.lock_owner = arg->owner;
1658
1659 convert_fuse_file_lock(&arg->lk, &flock);
1660 if (req->se->op.getlk)
1661 req->se->op.getlk(req, nodeid, &fi, &flock);
1662 else
1663 fuse_reply_err(req, ENOSYS);
1664}
1665
1666static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1667 const void *inarg, int sleep)
1668{
1669 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1670 struct fuse_file_info fi;
1671 struct flock flock;
1672
1673 memset(&fi, 0, sizeof(fi));
1674 fi.fh = arg->fh;
1675 fi.lock_owner = arg->owner;
1676
1677 if (arg->lk_flags & FUSE_LK_FLOCK) {
1678 int op = 0;
1679
1680 switch (arg->lk.type) {
1681 case F_RDLCK:
1682 op = LOCK_SH;
1683 break;
1684 case F_WRLCK:
1685 op = LOCK_EX;
1686 break;
1687 case F_UNLCK:
1688 op = LOCK_UN;
1689 break;
1690 }
1691 if (!sleep)
1692 op |= LOCK_NB;
1693
1694 if (req->se->op.flock)
1695 req->se->op.flock(req, nodeid, &fi, op);
1696 else
1697 fuse_reply_err(req, ENOSYS);
1698 } else {
1699 convert_fuse_file_lock(&arg->lk, &flock);
1700 if (req->se->op.setlk)
1701 req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1702 else
1703 fuse_reply_err(req, ENOSYS);
1704 }
1705}
1706
1707static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1708{
1709 do_setlk_common(req, nodeid, inarg, 0);
1710}
1711
1712static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1713{
1714 do_setlk_common(req, nodeid, inarg, 1);
1715}
1716
1717static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1718{
1719 struct fuse_req *curr;
1720
1721 for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1722 if (curr->unique == req->u.i.unique) {
1724 void *data;
1725
1726 curr->ctr++;
1727 pthread_mutex_unlock(&se->lock);
1728
1729 /* Ugh, ugly locking */
1730 pthread_mutex_lock(&curr->lock);
1731 pthread_mutex_lock(&se->lock);
1732 curr->interrupted = 1;
1733 func = curr->u.ni.func;
1734 data = curr->u.ni.data;
1735 pthread_mutex_unlock(&se->lock);
1736 if (func)
1737 func(curr, data);
1738 pthread_mutex_unlock(&curr->lock);
1739
1740 pthread_mutex_lock(&se->lock);
1741 curr->ctr--;
1742 if (!curr->ctr) {
1743 fuse_chan_put(req->ch);
1744 req->ch = NULL;
1745 destroy_req(curr);
1746 }
1747
1748 return 1;
1749 }
1750 }
1751 for (curr = se->interrupts.next; curr != &se->interrupts;
1752 curr = curr->next) {
1753 if (curr->u.i.unique == req->u.i.unique)
1754 return 1;
1755 }
1756 return 0;
1757}
1758
1759static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1760{
1761 struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1762 struct fuse_session *se = req->se;
1763
1764 (void) nodeid;
1765 if (se->debug)
1766 fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1767 (unsigned long long) arg->unique);
1768
1769 req->u.i.unique = arg->unique;
1770
1771 pthread_mutex_lock(&se->lock);
1772 if (find_interrupted(se, req)) {
1773 fuse_chan_put(req->ch);
1774 req->ch = NULL;
1775 destroy_req(req);
1776 } else
1777 list_add_req(req, &se->interrupts);
1778 pthread_mutex_unlock(&se->lock);
1779}
1780
1781static struct fuse_req *check_interrupt(struct fuse_session *se,
1782 struct fuse_req *req)
1783{
1784 struct fuse_req *curr;
1785
1786 for (curr = se->interrupts.next; curr != &se->interrupts;
1787 curr = curr->next) {
1788 if (curr->u.i.unique == req->unique) {
1789 req->interrupted = 1;
1790 list_del_req(curr);
1791 fuse_chan_put(curr->ch);
1792 curr->ch = NULL;
1793 destroy_req(curr);
1794 return NULL;
1795 }
1796 }
1797 curr = se->interrupts.next;
1798 if (curr != &se->interrupts) {
1799 list_del_req(curr);
1800 list_init_req(curr);
1801 return curr;
1802 } else
1803 return NULL;
1804}
1805
1806static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1807{
1808 struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
1809
1810 if (req->se->op.bmap)
1811 req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1812 else
1813 fuse_reply_err(req, ENOSYS);
1814}
1815
1816static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1817{
1818 struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
1819 unsigned int flags = arg->flags;
1820 void *in_buf = arg->in_size ? PARAM(arg) : NULL;
1821 struct fuse_file_info fi;
1822
1823 if (flags & FUSE_IOCTL_DIR &&
1824 !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) {
1825 fuse_reply_err(req, ENOTTY);
1826 return;
1827 }
1828
1829 memset(&fi, 0, sizeof(fi));
1830 fi.fh = arg->fh;
1831
1832 if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
1833 !(flags & FUSE_IOCTL_32BIT)) {
1834 req->ioctl_64bit = 1;
1835 }
1836
1837 if (req->se->op.ioctl)
1838 req->se->op.ioctl(req, nodeid, arg->cmd,
1839 (void *)(uintptr_t)arg->arg, &fi, flags,
1840 in_buf, arg->in_size, arg->out_size);
1841 else
1842 fuse_reply_err(req, ENOSYS);
1843}
1844
1845void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1846{
1847 free(ph);
1848}
1849
1850static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1851{
1852 struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
1853 struct fuse_file_info fi;
1854
1855 memset(&fi, 0, sizeof(fi));
1856 fi.fh = arg->fh;
1857 fi.poll_events = arg->events;
1858
1859 if (req->se->op.poll) {
1860 struct fuse_pollhandle *ph = NULL;
1861
1862 if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1863 ph = malloc(sizeof(struct fuse_pollhandle));
1864 if (ph == NULL) {
1865 fuse_reply_err(req, ENOMEM);
1866 return;
1867 }
1868 ph->kh = arg->kh;
1869 ph->se = req->se;
1870 }
1871
1872 req->se->op.poll(req, nodeid, &fi, ph);
1873 } else {
1874 fuse_reply_err(req, ENOSYS);
1875 }
1876}
1877
1878static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1879{
1880 struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
1881 struct fuse_file_info fi;
1882
1883 memset(&fi, 0, sizeof(fi));
1884 fi.fh = arg->fh;
1885
1886 if (req->se->op.fallocate)
1887 req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
1888 else
1889 fuse_reply_err(req, ENOSYS);
1890}
1891
1892static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
1893{
1894 struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
1895 struct fuse_file_info fi_in, fi_out;
1896
1897 memset(&fi_in, 0, sizeof(fi_in));
1898 fi_in.fh = arg->fh_in;
1899
1900 memset(&fi_out, 0, sizeof(fi_out));
1901 fi_out.fh = arg->fh_out;
1902
1903
1904 if (req->se->op.copy_file_range)
1905 req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
1906 &fi_in, arg->nodeid_out,
1907 arg->off_out, &fi_out, arg->len,
1908 arg->flags);
1909 else
1910 fuse_reply_err(req, ENOSYS);
1911}
1912
1913static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1914{
1915 struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
1916 struct fuse_file_info fi;
1917
1918 memset(&fi, 0, sizeof(fi));
1919 fi.fh = arg->fh;
1920
1921 if (req->se->op.lseek)
1922 req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
1923 else
1924 fuse_reply_err(req, ENOSYS);
1925}
1926
1927/* Prevent bogus data races (bogus since "init" is called before
1928 * multi-threading becomes relevant */
1929static __attribute__((no_sanitize("thread")))
1930void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1931{
1932 struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
1933 struct fuse_init_out outarg;
1934 struct fuse_session *se = req->se;
1935 size_t bufsize = se->bufsize;
1936 size_t outargsize = sizeof(outarg);
1937 uint64_t inargflags = 0;
1938 uint64_t outargflags = 0;
1939 (void) nodeid;
1940 if (se->debug) {
1941 fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
1942 if (arg->major == 7 && arg->minor >= 6) {
1943 fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
1944 fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
1945 arg->max_readahead);
1946 }
1947 }
1948 se->conn.proto_major = arg->major;
1949 se->conn.proto_minor = arg->minor;
1950 se->conn.capable = 0;
1951 se->conn.want = 0;
1952
1953 memset(&outarg, 0, sizeof(outarg));
1954 outarg.major = FUSE_KERNEL_VERSION;
1955 outarg.minor = FUSE_KERNEL_MINOR_VERSION;
1956
1957 if (arg->major < 7) {
1958 fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
1959 arg->major, arg->minor);
1960 fuse_reply_err(req, EPROTO);
1961 return;
1962 }
1963
1964 if (arg->major > 7) {
1965 /* Wait for a second INIT request with a 7.X version */
1966 send_reply_ok(req, &outarg, sizeof(outarg));
1967 return;
1968 }
1969
1970 if (arg->minor >= 6) {
1971 if (arg->max_readahead < se->conn.max_readahead)
1972 se->conn.max_readahead = arg->max_readahead;
1973 inargflags = arg->flags;
1974 if (inargflags & FUSE_INIT_EXT)
1975 inargflags = inargflags | (uint64_t) arg->flags2 << 32;
1976 if (inargflags & FUSE_ASYNC_READ)
1977 se->conn.capable |= FUSE_CAP_ASYNC_READ;
1978 if (inargflags & FUSE_POSIX_LOCKS)
1979 se->conn.capable |= FUSE_CAP_POSIX_LOCKS;
1980 if (inargflags & FUSE_ATOMIC_O_TRUNC)
1981 se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
1982 if (inargflags & FUSE_EXPORT_SUPPORT)
1983 se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
1984 if (inargflags & FUSE_DONT_MASK)
1985 se->conn.capable |= FUSE_CAP_DONT_MASK;
1986 if (inargflags & FUSE_FLOCK_LOCKS)
1987 se->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
1988 if (inargflags & FUSE_AUTO_INVAL_DATA)
1989 se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA;
1990 if (inargflags & FUSE_DO_READDIRPLUS)
1991 se->conn.capable |= FUSE_CAP_READDIRPLUS;
1992 if (inargflags & FUSE_READDIRPLUS_AUTO)
1993 se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO;
1994 if (inargflags & FUSE_ASYNC_DIO)
1995 se->conn.capable |= FUSE_CAP_ASYNC_DIO;
1996 if (inargflags & FUSE_WRITEBACK_CACHE)
1997 se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
1998 if (inargflags & FUSE_NO_OPEN_SUPPORT)
1999 se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT;
2000 if (inargflags & FUSE_PARALLEL_DIROPS)
2001 se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
2002 if (inargflags & FUSE_POSIX_ACL)
2003 se->conn.capable |= FUSE_CAP_POSIX_ACL;
2004 if (inargflags & FUSE_HANDLE_KILLPRIV)
2005 se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV;
2006 if (inargflags & FUSE_CACHE_SYMLINKS)
2007 se->conn.capable |= FUSE_CAP_CACHE_SYMLINKS;
2008 if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
2009 se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2010 if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2011 se->conn.capable |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2012 if (inargflags & FUSE_SETXATTR_EXT)
2013 se->conn.capable |= FUSE_CAP_SETXATTR_EXT;
2014 if (!(inargflags & FUSE_MAX_PAGES)) {
2015 size_t max_bufsize =
2016 FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2017 + FUSE_BUFFER_HEADER_SIZE;
2018 if (bufsize > max_bufsize) {
2019 bufsize = max_bufsize;
2020 }
2021 }
2022 if (arg->minor >= 38)
2023 se->conn.capable |= FUSE_CAP_EXPIRE_ONLY;
2024 } else {
2025 se->conn.max_readahead = 0;
2026 }
2027
2028 if (se->conn.proto_minor >= 14) {
2029#ifdef HAVE_SPLICE
2030#ifdef HAVE_VMSPLICE
2031 if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2032 se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
2033 }
2034#endif
2035 if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2036 se->conn.capable |= FUSE_CAP_SPLICE_READ;
2037 }
2038#endif
2039 }
2040 if (se->conn.proto_minor >= 18)
2041 se->conn.capable |= FUSE_CAP_IOCTL_DIR;
2042
2043 /* Default settings for modern filesystems.
2044 *
2045 * Most of these capabilities were disabled by default in
2046 * libfuse2 for backwards compatibility reasons. In libfuse3,
2047 * we can finally enable them by default (as long as they're
2048 * supported by the kernel).
2049 */
2050#define LL_SET_DEFAULT(cond, cap) \
2051 if ((cond) && (se->conn.capable & (cap))) \
2052 se->conn.want |= (cap)
2053 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2054 LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS);
2055 LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2056 LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV);
2057 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2058 LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2059 LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2060 LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2061 LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2063 LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2064 LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2065 LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2067
2068 /* This could safely become default, but libfuse needs an API extension
2069 * to support it
2070 * LL_SET_DEFAULT(1, FUSE_CAP_SETXATTR_EXT);
2071 */
2072
2073 se->conn.time_gran = 1;
2074
2075 if (bufsize < FUSE_MIN_READ_BUFFER) {
2076 fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n",
2077 bufsize);
2078 bufsize = FUSE_MIN_READ_BUFFER;
2079 }
2080 se->bufsize = bufsize;
2081
2082 if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE)
2083 se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE;
2084
2085 se->got_init = 1;
2086 if (se->op.init)
2087 se->op.init(se->userdata, &se->conn);
2088
2089 if (se->conn.want & (~se->conn.capable)) {
2090 fuse_log(FUSE_LOG_ERR, "fuse: error: filesystem requested capabilities "
2091 "0x%x that are not supported by kernel, aborting.\n",
2092 se->conn.want & (~se->conn.capable));
2093 fuse_reply_err(req, EPROTO);
2094 se->error = -EPROTO;
2096 return;
2097 }
2098
2099 unsigned max_read_mo = get_max_read(se->mo);
2100 if (se->conn.max_read != max_read_mo) {
2101 fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2102 "requested different maximum read size (%u vs %u)\n",
2103 se->conn.max_read, max_read_mo);
2104 fuse_reply_err(req, EPROTO);
2105 se->error = -EPROTO;
2107 return;
2108 }
2109
2110 if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) {
2111 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2112 }
2113 if (arg->flags & FUSE_MAX_PAGES) {
2114 outarg.flags |= FUSE_MAX_PAGES;
2115 outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2116 }
2117 outargflags = outarg.flags;
2118 /* Always enable big writes, this is superseded
2119 by the max_write option */
2120 outargflags |= FUSE_BIG_WRITES;
2121
2122 if (se->conn.want & FUSE_CAP_ASYNC_READ)
2123 outargflags |= FUSE_ASYNC_READ;
2124 if (se->conn.want & FUSE_CAP_POSIX_LOCKS)
2125 outargflags |= FUSE_POSIX_LOCKS;
2126 if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC)
2127 outargflags |= FUSE_ATOMIC_O_TRUNC;
2128 if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT)
2129 outargflags |= FUSE_EXPORT_SUPPORT;
2130 if (se->conn.want & FUSE_CAP_DONT_MASK)
2131 outargflags |= FUSE_DONT_MASK;
2132 if (se->conn.want & FUSE_CAP_FLOCK_LOCKS)
2133 outargflags |= FUSE_FLOCK_LOCKS;
2134 if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA)
2135 outargflags |= FUSE_AUTO_INVAL_DATA;
2136 if (se->conn.want & FUSE_CAP_READDIRPLUS)
2137 outargflags |= FUSE_DO_READDIRPLUS;
2138 if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO)
2139 outargflags |= FUSE_READDIRPLUS_AUTO;
2140 if (se->conn.want & FUSE_CAP_ASYNC_DIO)
2141 outargflags |= FUSE_ASYNC_DIO;
2142 if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE)
2143 outargflags |= FUSE_WRITEBACK_CACHE;
2144 if (se->conn.want & FUSE_CAP_POSIX_ACL)
2145 outargflags |= FUSE_POSIX_ACL;
2146 if (se->conn.want & FUSE_CAP_CACHE_SYMLINKS)
2147 outargflags |= FUSE_CACHE_SYMLINKS;
2148 if (se->conn.want & FUSE_CAP_EXPLICIT_INVAL_DATA)
2149 outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2150 if (se->conn.want & FUSE_CAP_SETXATTR_EXT)
2151 outargflags |= FUSE_SETXATTR_EXT;
2152
2153 if (inargflags & FUSE_INIT_EXT) {
2154 outargflags |= FUSE_INIT_EXT;
2155 outarg.flags2 = outargflags >> 32;
2156 }
2157
2158 outarg.flags = outargflags;
2159
2160 outarg.max_readahead = se->conn.max_readahead;
2161 outarg.max_write = se->conn.max_write;
2162 if (se->conn.proto_minor >= 13) {
2163 if (se->conn.max_background >= (1 << 16))
2164 se->conn.max_background = (1 << 16) - 1;
2165 if (se->conn.congestion_threshold > se->conn.max_background)
2166 se->conn.congestion_threshold = se->conn.max_background;
2167 if (!se->conn.congestion_threshold) {
2168 se->conn.congestion_threshold =
2169 se->conn.max_background * 3 / 4;
2170 }
2171
2172 outarg.max_background = se->conn.max_background;
2173 outarg.congestion_threshold = se->conn.congestion_threshold;
2174 }
2175 if (se->conn.proto_minor >= 23)
2176 outarg.time_gran = se->conn.time_gran;
2177
2178 if (se->debug) {
2179 fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2180 fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2181 fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2182 outarg.max_readahead);
2183 fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2184 fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2185 outarg.max_background);
2186 fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2187 outarg.congestion_threshold);
2188 fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2189 outarg.time_gran);
2190 }
2191 if (arg->minor < 5)
2192 outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2193 else if (arg->minor < 23)
2194 outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2195
2196 send_reply_ok(req, &outarg, outargsize);
2197}
2198
2199static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2200{
2201 struct fuse_session *se = req->se;
2202
2203 (void) nodeid;
2204 (void) inarg;
2205
2206 se->got_destroy = 1;
2207 if (se->op.destroy)
2208 se->op.destroy(se->userdata);
2209
2210 send_reply_ok(req, NULL, 0);
2211}
2212
2213static void list_del_nreq(struct fuse_notify_req *nreq)
2214{
2215 struct fuse_notify_req *prev = nreq->prev;
2216 struct fuse_notify_req *next = nreq->next;
2217 prev->next = next;
2218 next->prev = prev;
2219}
2220
2221static void list_add_nreq(struct fuse_notify_req *nreq,
2222 struct fuse_notify_req *next)
2223{
2224 struct fuse_notify_req *prev = next->prev;
2225 nreq->next = next;
2226 nreq->prev = prev;
2227 prev->next = nreq;
2228 next->prev = nreq;
2229}
2230
2231static void list_init_nreq(struct fuse_notify_req *nreq)
2232{
2233 nreq->next = nreq;
2234 nreq->prev = nreq;
2235}
2236
2237static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2238 const void *inarg, const struct fuse_buf *buf)
2239{
2240 struct fuse_session *se = req->se;
2241 struct fuse_notify_req *nreq;
2242 struct fuse_notify_req *head;
2243
2244 pthread_mutex_lock(&se->lock);
2245 head = &se->notify_list;
2246 for (nreq = head->next; nreq != head; nreq = nreq->next) {
2247 if (nreq->unique == req->unique) {
2248 list_del_nreq(nreq);
2249 break;
2250 }
2251 }
2252 pthread_mutex_unlock(&se->lock);
2253
2254 if (nreq != head)
2255 nreq->reply(nreq, req, nodeid, inarg, buf);
2256}
2257
2258static int send_notify_iov(struct fuse_session *se, int notify_code,
2259 struct iovec *iov, int count)
2260{
2261 struct fuse_out_header out;
2262
2263 if (!se->got_init)
2264 return -ENOTCONN;
2265
2266 out.unique = 0;
2267 out.error = notify_code;
2268 iov[0].iov_base = &out;
2269 iov[0].iov_len = sizeof(struct fuse_out_header);
2270
2271 return fuse_send_msg(se, NULL, iov, count);
2272}
2273
2274int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2275{
2276 if (ph != NULL) {
2277 struct fuse_notify_poll_wakeup_out outarg;
2278 struct iovec iov[2];
2279
2280 outarg.kh = ph->kh;
2281
2282 iov[1].iov_base = &outarg;
2283 iov[1].iov_len = sizeof(outarg);
2284
2285 return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2286 } else {
2287 return 0;
2288 }
2289}
2290
2291int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2292 off_t off, off_t len)
2293{
2294 struct fuse_notify_inval_inode_out outarg;
2295 struct iovec iov[2];
2296
2297 if (!se)
2298 return -EINVAL;
2299
2300 if (se->conn.proto_minor < 12)
2301 return -ENOSYS;
2302
2303 outarg.ino = ino;
2304 outarg.off = off;
2305 outarg.len = len;
2306
2307 iov[1].iov_base = &outarg;
2308 iov[1].iov_len = sizeof(outarg);
2309
2310 return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2311}
2312
2332static int fuse_lowlevel_notify_entry(struct fuse_session *se, fuse_ino_t parent,
2333 const char *name, size_t namelen,
2334 enum fuse_notify_entry_flags flags)
2335{
2336 struct fuse_notify_inval_entry_out outarg;
2337 struct iovec iov[3];
2338
2339 if (!se)
2340 return -EINVAL;
2341
2342 if (se->conn.proto_minor < 12)
2343 return -ENOSYS;
2344
2345 outarg.parent = parent;
2346 outarg.namelen = namelen;
2347 outarg.flags = 0;
2348 if (flags & FUSE_LL_EXPIRE_ONLY)
2349 outarg.flags |= FUSE_EXPIRE_ONLY;
2350
2351 iov[1].iov_base = &outarg;
2352 iov[1].iov_len = sizeof(outarg);
2353 iov[2].iov_base = (void *)name;
2354 iov[2].iov_len = namelen + 1;
2355
2356 return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2357}
2358
2359int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2360 const char *name, size_t namelen)
2361{
2362 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_INVALIDATE);
2363}
2364
2365int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
2366 const char *name, size_t namelen)
2367{
2368 if (!se)
2369 return -EINVAL;
2370
2371 if (!(se->conn.capable & FUSE_CAP_EXPIRE_ONLY))
2372 return -ENOSYS;
2373
2374 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_EXPIRE_ONLY);
2375}
2376
2377
2378int fuse_lowlevel_notify_delete(struct fuse_session *se,
2379 fuse_ino_t parent, fuse_ino_t child,
2380 const char *name, size_t namelen)
2381{
2382 struct fuse_notify_delete_out outarg;
2383 struct iovec iov[3];
2384
2385 if (!se)
2386 return -EINVAL;
2387
2388 if (se->conn.proto_minor < 18)
2389 return -ENOSYS;
2390
2391 outarg.parent = parent;
2392 outarg.child = child;
2393 outarg.namelen = namelen;
2394 outarg.padding = 0;
2395
2396 iov[1].iov_base = &outarg;
2397 iov[1].iov_len = sizeof(outarg);
2398 iov[2].iov_base = (void *)name;
2399 iov[2].iov_len = namelen + 1;
2400
2401 return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2402}
2403
2404int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2405 off_t offset, struct fuse_bufvec *bufv,
2406 enum fuse_buf_copy_flags flags)
2407{
2408 struct fuse_out_header out;
2409 struct fuse_notify_store_out outarg;
2410 struct iovec iov[3];
2411 size_t size = fuse_buf_size(bufv);
2412 int res;
2413
2414 if (!se)
2415 return -EINVAL;
2416
2417 if (se->conn.proto_minor < 15)
2418 return -ENOSYS;
2419
2420 out.unique = 0;
2421 out.error = FUSE_NOTIFY_STORE;
2422
2423 outarg.nodeid = ino;
2424 outarg.offset = offset;
2425 outarg.size = size;
2426 outarg.padding = 0;
2427
2428 iov[0].iov_base = &out;
2429 iov[0].iov_len = sizeof(out);
2430 iov[1].iov_base = &outarg;
2431 iov[1].iov_len = sizeof(outarg);
2432
2433 res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2434 if (res > 0)
2435 res = -res;
2436
2437 return res;
2438}
2439
2440struct fuse_retrieve_req {
2441 struct fuse_notify_req nreq;
2442 void *cookie;
2443};
2444
2445static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2446 fuse_req_t req, fuse_ino_t ino,
2447 const void *inarg,
2448 const struct fuse_buf *ibuf)
2449{
2450 struct fuse_session *se = req->se;
2451 struct fuse_retrieve_req *rreq =
2452 container_of(nreq, struct fuse_retrieve_req, nreq);
2453 const struct fuse_notify_retrieve_in *arg = inarg;
2454 struct fuse_bufvec bufv = {
2455 .buf[0] = *ibuf,
2456 .count = 1,
2457 };
2458
2459 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2460 bufv.buf[0].mem = PARAM(arg);
2461
2462 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2463 sizeof(struct fuse_notify_retrieve_in);
2464
2465 if (bufv.buf[0].size < arg->size) {
2466 fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2467 fuse_reply_none(req);
2468 goto out;
2469 }
2470 bufv.buf[0].size = arg->size;
2471
2472 if (se->op.retrieve_reply) {
2473 se->op.retrieve_reply(req, rreq->cookie, ino,
2474 arg->offset, &bufv);
2475 } else {
2476 fuse_reply_none(req);
2477 }
2478out:
2479 free(rreq);
2480 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2481 fuse_ll_clear_pipe(se);
2482}
2483
2484int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2485 size_t size, off_t offset, void *cookie)
2486{
2487 struct fuse_notify_retrieve_out outarg;
2488 struct iovec iov[2];
2489 struct fuse_retrieve_req *rreq;
2490 int err;
2491
2492 if (!se)
2493 return -EINVAL;
2494
2495 if (se->conn.proto_minor < 15)
2496 return -ENOSYS;
2497
2498 rreq = malloc(sizeof(*rreq));
2499 if (rreq == NULL)
2500 return -ENOMEM;
2501
2502 pthread_mutex_lock(&se->lock);
2503 rreq->cookie = cookie;
2504 rreq->nreq.unique = se->notify_ctr++;
2505 rreq->nreq.reply = fuse_ll_retrieve_reply;
2506 list_add_nreq(&rreq->nreq, &se->notify_list);
2507 pthread_mutex_unlock(&se->lock);
2508
2509 outarg.notify_unique = rreq->nreq.unique;
2510 outarg.nodeid = ino;
2511 outarg.offset = offset;
2512 outarg.size = size;
2513 outarg.padding = 0;
2514
2515 iov[1].iov_base = &outarg;
2516 iov[1].iov_len = sizeof(outarg);
2517
2518 err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2519 if (err) {
2520 pthread_mutex_lock(&se->lock);
2521 list_del_nreq(&rreq->nreq);
2522 pthread_mutex_unlock(&se->lock);
2523 free(rreq);
2524 }
2525
2526 return err;
2527}
2528
2530{
2531 return req->se->userdata;
2532}
2533
2535{
2536 return &req->ctx;
2537}
2538
2540 void *data)
2541{
2542 pthread_mutex_lock(&req->lock);
2543 pthread_mutex_lock(&req->se->lock);
2544 req->u.ni.func = func;
2545 req->u.ni.data = data;
2546 pthread_mutex_unlock(&req->se->lock);
2547 if (req->interrupted && func)
2548 func(req, data);
2549 pthread_mutex_unlock(&req->lock);
2550}
2551
2553{
2554 int interrupted;
2555
2556 pthread_mutex_lock(&req->se->lock);
2557 interrupted = req->interrupted;
2558 pthread_mutex_unlock(&req->se->lock);
2559
2560 return interrupted;
2561}
2562
2563static struct {
2564 void (*func)(fuse_req_t, fuse_ino_t, const void *);
2565 const char *name;
2566} fuse_ll_ops[] = {
2567 [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2568 [FUSE_FORGET] = { do_forget, "FORGET" },
2569 [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2570 [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2571 [FUSE_READLINK] = { do_readlink, "READLINK" },
2572 [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2573 [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2574 [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2575 [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2576 [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2577 [FUSE_RENAME] = { do_rename, "RENAME" },
2578 [FUSE_LINK] = { do_link, "LINK" },
2579 [FUSE_OPEN] = { do_open, "OPEN" },
2580 [FUSE_READ] = { do_read, "READ" },
2581 [FUSE_WRITE] = { do_write, "WRITE" },
2582 [FUSE_STATFS] = { do_statfs, "STATFS" },
2583 [FUSE_RELEASE] = { do_release, "RELEASE" },
2584 [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2585 [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2586 [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2587 [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2588 [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2589 [FUSE_FLUSH] = { do_flush, "FLUSH" },
2590 [FUSE_INIT] = { do_init, "INIT" },
2591 [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2592 [FUSE_READDIR] = { do_readdir, "READDIR" },
2593 [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2594 [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2595 [FUSE_GETLK] = { do_getlk, "GETLK" },
2596 [FUSE_SETLK] = { do_setlk, "SETLK" },
2597 [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2598 [FUSE_ACCESS] = { do_access, "ACCESS" },
2599 [FUSE_CREATE] = { do_create, "CREATE" },
2600 [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2601 [FUSE_BMAP] = { do_bmap, "BMAP" },
2602 [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2603 [FUSE_POLL] = { do_poll, "POLL" },
2604 [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2605 [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2606 [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2607 [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2608 [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2609 [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2610 [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2611 [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2612 [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2613};
2614
2615#define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2616
2617static const char *opname(enum fuse_opcode opcode)
2618{
2619 if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2620 return "???";
2621 else
2622 return fuse_ll_ops[opcode].name;
2623}
2624
2625static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2626 struct fuse_bufvec *src)
2627{
2628 ssize_t res = fuse_buf_copy(dst, src, 0);
2629 if (res < 0) {
2630 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2631 return res;
2632 }
2633 if ((size_t)res < fuse_buf_size(dst)) {
2634 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2635 return -1;
2636 }
2637 return 0;
2638}
2639
2640void fuse_session_process_buf(struct fuse_session *se,
2641 const struct fuse_buf *buf)
2642{
2643 fuse_session_process_buf_int(se, buf, NULL);
2644}
2645
2646void fuse_session_process_buf_int(struct fuse_session *se,
2647 const struct fuse_buf *buf, struct fuse_chan *ch)
2648{
2649 const size_t write_header_size = sizeof(struct fuse_in_header) +
2650 sizeof(struct fuse_write_in);
2651 struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2652 struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2653 struct fuse_in_header *in;
2654 const void *inarg;
2655 struct fuse_req *req;
2656 void *mbuf = NULL;
2657 int err;
2658 int res;
2659
2660 if (buf->flags & FUSE_BUF_IS_FD) {
2661 if (buf->size < tmpbuf.buf[0].size)
2662 tmpbuf.buf[0].size = buf->size;
2663
2664 mbuf = malloc(tmpbuf.buf[0].size);
2665 if (mbuf == NULL) {
2666 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2667 goto clear_pipe;
2668 }
2669 tmpbuf.buf[0].mem = mbuf;
2670
2671 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2672 if (res < 0)
2673 goto clear_pipe;
2674
2675 in = mbuf;
2676 } else {
2677 in = buf->mem;
2678 }
2679
2680 if (se->debug) {
2681 fuse_log(FUSE_LOG_DEBUG,
2682 "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2683 (unsigned long long) in->unique,
2684 opname((enum fuse_opcode) in->opcode), in->opcode,
2685 (unsigned long long) in->nodeid, buf->size, in->pid);
2686 }
2687
2688 req = fuse_ll_alloc_req(se);
2689 if (req == NULL) {
2690 struct fuse_out_header out = {
2691 .unique = in->unique,
2692 .error = -ENOMEM,
2693 };
2694 struct iovec iov = {
2695 .iov_base = &out,
2696 .iov_len = sizeof(struct fuse_out_header),
2697 };
2698
2699 fuse_send_msg(se, ch, &iov, 1);
2700 goto clear_pipe;
2701 }
2702
2703 req->unique = in->unique;
2704 req->ctx.uid = in->uid;
2705 req->ctx.gid = in->gid;
2706 req->ctx.pid = in->pid;
2707 req->ch = ch ? fuse_chan_get(ch) : NULL;
2708
2709 err = EIO;
2710 if (!se->got_init) {
2711 enum fuse_opcode expected;
2712
2713 expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2714 if (in->opcode != expected)
2715 goto reply_err;
2716 } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2717 goto reply_err;
2718
2719 err = EACCES;
2720 /* Implement -o allow_root */
2721 if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2722 in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2723 in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2724 in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2725 in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2726 in->opcode != FUSE_NOTIFY_REPLY &&
2727 in->opcode != FUSE_READDIRPLUS)
2728 goto reply_err;
2729
2730 err = ENOSYS;
2731 if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2732 goto reply_err;
2733 if (in->opcode != FUSE_INTERRUPT) {
2734 struct fuse_req *intr;
2735 pthread_mutex_lock(&se->lock);
2736 intr = check_interrupt(se, req);
2737 list_add_req(req, &se->list);
2738 pthread_mutex_unlock(&se->lock);
2739 if (intr)
2740 fuse_reply_err(intr, EAGAIN);
2741 }
2742
2743 if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
2744 (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
2745 in->opcode != FUSE_NOTIFY_REPLY) {
2746 void *newmbuf;
2747
2748 err = ENOMEM;
2749 newmbuf = realloc(mbuf, buf->size);
2750 if (newmbuf == NULL)
2751 goto reply_err;
2752 mbuf = newmbuf;
2753
2754 tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
2755 tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
2756
2757 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2758 err = -res;
2759 if (res < 0)
2760 goto reply_err;
2761
2762 in = mbuf;
2763 }
2764
2765 inarg = (void *) &in[1];
2766 if (in->opcode == FUSE_WRITE && se->op.write_buf)
2767 do_write_buf(req, in->nodeid, inarg, buf);
2768 else if (in->opcode == FUSE_NOTIFY_REPLY)
2769 do_notify_reply(req, in->nodeid, inarg, buf);
2770 else
2771 fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
2772
2773out_free:
2774 free(mbuf);
2775 return;
2776
2777reply_err:
2778 fuse_reply_err(req, err);
2779clear_pipe:
2780 if (buf->flags & FUSE_BUF_IS_FD)
2781 fuse_ll_clear_pipe(se);
2782 goto out_free;
2783}
2784
2785#define LL_OPTION(n,o,v) \
2786 { n, offsetof(struct fuse_session, o), v }
2787
2788static const struct fuse_opt fuse_ll_opts[] = {
2789 LL_OPTION("debug", debug, 1),
2790 LL_OPTION("-d", debug, 1),
2791 LL_OPTION("--debug", debug, 1),
2792 LL_OPTION("allow_root", deny_others, 1),
2794};
2795
2797{
2798 printf("using FUSE kernel interface version %i.%i\n",
2799 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2800 fuse_mount_version();
2801}
2802
2804{
2805 /* These are not all options, but the ones that are
2806 potentially of interest to an end-user */
2807 printf(
2808" -o allow_other allow access by all users\n"
2809" -o allow_root allow access by root\n"
2810" -o auto_unmount auto unmount on process termination\n");
2811}
2812
2813void fuse_session_destroy(struct fuse_session *se)
2814{
2815 struct fuse_ll_pipe *llp;
2816
2817 if (se->got_init && !se->got_destroy) {
2818 if (se->op.destroy)
2819 se->op.destroy(se->userdata);
2820 }
2821 llp = pthread_getspecific(se->pipe_key);
2822 if (llp != NULL)
2823 fuse_ll_pipe_free(llp);
2824 pthread_key_delete(se->pipe_key);
2825 pthread_mutex_destroy(&se->lock);
2826 free(se->cuse_data);
2827 if (se->fd != -1)
2828 close(se->fd);
2829 if (se->io != NULL)
2830 free(se->io);
2831 destroy_mount_opts(se->mo);
2832 free(se);
2833}
2834
2835
2836static void fuse_ll_pipe_destructor(void *data)
2837{
2838 struct fuse_ll_pipe *llp = data;
2839 fuse_ll_pipe_free(llp);
2840}
2841
2842int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
2843{
2844 return fuse_session_receive_buf_int(se, buf, NULL);
2845}
2846
2847int fuse_session_receive_buf_int(struct fuse_session *se, struct fuse_buf *buf,
2848 struct fuse_chan *ch)
2849{
2850 int err;
2851 ssize_t res;
2852#ifdef HAVE_SPLICE
2853 size_t bufsize = se->bufsize;
2854 struct fuse_ll_pipe *llp;
2855 struct fuse_buf tmpbuf;
2856
2857 if (se->conn.proto_minor < 14 || !(se->conn.want & FUSE_CAP_SPLICE_READ))
2858 goto fallback;
2859
2860 llp = fuse_ll_get_pipe(se);
2861 if (llp == NULL)
2862 goto fallback;
2863
2864 if (llp->size < bufsize) {
2865 if (llp->can_grow) {
2866 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
2867 if (res == -1) {
2868 llp->can_grow = 0;
2869 res = grow_pipe_to_max(llp->pipe[0]);
2870 if (res > 0)
2871 llp->size = res;
2872 goto fallback;
2873 }
2874 llp->size = res;
2875 }
2876 if (llp->size < bufsize)
2877 goto fallback;
2878 }
2879
2880 if (se->io != NULL && se->io->splice_receive != NULL) {
2881 res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
2882 llp->pipe[1], NULL, bufsize, 0,
2883 se->userdata);
2884 } else {
2885 res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
2886 bufsize, 0);
2887 }
2888 err = errno;
2889
2890 if (fuse_session_exited(se))
2891 return 0;
2892
2893 if (res == -1) {
2894 if (err == ENODEV) {
2895 /* Filesystem was unmounted, or connection was aborted
2896 via /sys/fs/fuse/connections */
2898 return 0;
2899 }
2900 if (err != EINTR && err != EAGAIN)
2901 perror("fuse: splice from device");
2902 return -err;
2903 }
2904
2905 if (res < sizeof(struct fuse_in_header)) {
2906 fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
2907 return -EIO;
2908 }
2909
2910 tmpbuf = (struct fuse_buf) {
2911 .size = res,
2912 .flags = FUSE_BUF_IS_FD,
2913 .fd = llp->pipe[0],
2914 };
2915
2916 /*
2917 * Don't bother with zero copy for small requests.
2918 * fuse_loop_mt() needs to check for FORGET so this more than
2919 * just an optimization.
2920 */
2921 if (res < sizeof(struct fuse_in_header) +
2922 sizeof(struct fuse_write_in) + pagesize) {
2923 struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
2924 struct fuse_bufvec dst = { .count = 1 };
2925
2926 if (!buf->mem) {
2927 buf->mem = malloc(se->bufsize);
2928 if (!buf->mem) {
2929 fuse_log(FUSE_LOG_ERR,
2930 "fuse: failed to allocate read buffer\n");
2931 return -ENOMEM;
2932 }
2933 }
2934 buf->size = se->bufsize;
2935 buf->flags = 0;
2936 dst.buf[0] = *buf;
2937
2938 res = fuse_buf_copy(&dst, &src, 0);
2939 if (res < 0) {
2940 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
2941 strerror(-res));
2942 fuse_ll_clear_pipe(se);
2943 return res;
2944 }
2945 if (res < tmpbuf.size) {
2946 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2947 fuse_ll_clear_pipe(se);
2948 return -EIO;
2949 }
2950 assert(res == tmpbuf.size);
2951
2952 } else {
2953 /* Don't overwrite buf->mem, as that would cause a leak */
2954 buf->fd = tmpbuf.fd;
2955 buf->flags = tmpbuf.flags;
2956 }
2957 buf->size = tmpbuf.size;
2958
2959 return res;
2960
2961fallback:
2962#endif
2963 if (!buf->mem) {
2964 buf->mem = malloc(se->bufsize);
2965 if (!buf->mem) {
2966 fuse_log(FUSE_LOG_ERR,
2967 "fuse: failed to allocate read buffer\n");
2968 return -ENOMEM;
2969 }
2970 }
2971
2972restart:
2973 if (se->io != NULL) {
2974 /* se->io->read is never NULL if se->io is not NULL as
2975 specified by fuse_session_custom_io()*/
2976 res = se->io->read(ch ? ch->fd : se->fd, buf->mem, se->bufsize,
2977 se->userdata);
2978 } else {
2979 res = read(ch ? ch->fd : se->fd, buf->mem, se->bufsize);
2980 }
2981 err = errno;
2982
2983 if (fuse_session_exited(se))
2984 return 0;
2985 if (res == -1) {
2986 /* ENOENT means the operation was interrupted, it's safe
2987 to restart */
2988 if (err == ENOENT)
2989 goto restart;
2990
2991 if (err == ENODEV) {
2992 /* Filesystem was unmounted, or connection was aborted
2993 via /sys/fs/fuse/connections */
2995 return 0;
2996 }
2997 /* Errors occurring during normal operation: EINTR (read
2998 interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
2999 umounted) */
3000 if (err != EINTR && err != EAGAIN)
3001 perror("fuse: reading device");
3002 return -err;
3003 }
3004 if ((size_t) res < sizeof(struct fuse_in_header)) {
3005 fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
3006 return -EIO;
3007 }
3008
3009 buf->size = res;
3010
3011 return res;
3012}
3013
3014struct fuse_session *fuse_session_new(struct fuse_args *args,
3015 const struct fuse_lowlevel_ops *op,
3016 size_t op_size, void *userdata)
3017{
3018 int err;
3019 struct fuse_session *se;
3020 struct mount_opts *mo;
3021
3022 if (sizeof(struct fuse_lowlevel_ops) < op_size) {
3023 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
3024 op_size = sizeof(struct fuse_lowlevel_ops);
3025 }
3026
3027 if (args->argc == 0) {
3028 fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
3029 return NULL;
3030 }
3031
3032 se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
3033 if (se == NULL) {
3034 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
3035 goto out1;
3036 }
3037 se->fd = -1;
3038 se->conn.max_write = UINT_MAX;
3039 se->conn.max_readahead = UINT_MAX;
3040
3041 /* Parse options */
3042 if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
3043 goto out2;
3044 if(se->deny_others) {
3045 /* Allowing access only by root is done by instructing
3046 * kernel to allow access by everyone, and then restricting
3047 * access to root and mountpoint owner in libfuse.
3048 */
3049 // We may be adding the option a second time, but
3050 // that doesn't hurt.
3051 if(fuse_opt_add_arg(args, "-oallow_other") == -1)
3052 goto out2;
3053 }
3054 mo = parse_mount_opts(args);
3055 if (mo == NULL)
3056 goto out3;
3057
3058 if(args->argc == 1 &&
3059 args->argv[0][0] == '-') {
3060 fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
3061 "will be ignored\n");
3062 } else if (args->argc != 1) {
3063 int i;
3064 fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
3065 for(i = 1; i < args->argc-1; i++)
3066 fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
3067 fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
3068 goto out4;
3069 }
3070
3071 if (se->debug)
3072 fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
3073
3074 se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() +
3075 FUSE_BUFFER_HEADER_SIZE;
3076
3077 list_init_req(&se->list);
3078 list_init_req(&se->interrupts);
3079 list_init_nreq(&se->notify_list);
3080 se->notify_ctr = 1;
3081 pthread_mutex_init(&se->lock, NULL);
3082
3083 err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
3084 if (err) {
3085 fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
3086 strerror(err));
3087 goto out5;
3088 }
3089
3090 memcpy(&se->op, op, op_size);
3091 se->owner = getuid();
3092 se->userdata = userdata;
3093
3094 se->mo = mo;
3095 return se;
3096
3097out5:
3098 pthread_mutex_destroy(&se->lock);
3099out4:
3100 fuse_opt_free_args(args);
3101out3:
3102 if (mo != NULL)
3103 destroy_mount_opts(mo);
3104out2:
3105 free(se);
3106out1:
3107 return NULL;
3108}
3109
3110int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io,
3111 int fd)
3112{
3113 if (fd < 0) {
3114 fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
3115 "fuse_session_custom_io()\n", fd);
3116 return -EBADF;
3117 }
3118 if (io == NULL) {
3119 fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
3120 "fuse_session_custom_io()\n");
3121 return -EINVAL;
3122 } else if (io->read == NULL || io->writev == NULL) {
3123 /* If the user provides their own file descriptor, we can't
3124 guarantee that the default behavior of the io operations made
3125 in libfuse will function properly. Therefore, we enforce the
3126 user to implement these io operations when using custom io. */
3127 fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
3128 "implement both io->read() and io->writev\n");
3129 return -EINVAL;
3130 }
3131
3132 se->io = malloc(sizeof(struct fuse_custom_io));
3133 if (se->io == NULL) {
3134 fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
3135 "Error: %s\n", strerror(errno));
3136 return -errno;
3137 }
3138
3139 se->fd = fd;
3140 *se->io = *io;
3141 return 0;
3142}
3143
3144int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
3145{
3146 int fd;
3147
3148 /*
3149 * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
3150 * would ensue.
3151 */
3152 do {
3153 fd = open("/dev/null", O_RDWR);
3154 if (fd > 2)
3155 close(fd);
3156 } while (fd >= 0 && fd <= 2);
3157
3158 /*
3159 * To allow FUSE daemons to run without privileges, the caller may open
3160 * /dev/fuse before launching the file system and pass on the file
3161 * descriptor by specifying /dev/fd/N as the mount point. Note that the
3162 * parent process takes care of performing the mount in this case.
3163 */
3164 fd = fuse_mnt_parse_fuse_fd(mountpoint);
3165 if (fd != -1) {
3166 if (fcntl(fd, F_GETFD) == -1) {
3167 fuse_log(FUSE_LOG_ERR,
3168 "fuse: Invalid file descriptor /dev/fd/%u\n",
3169 fd);
3170 return -1;
3171 }
3172 se->fd = fd;
3173 return 0;
3174 }
3175
3176 /* Open channel */
3177 fd = fuse_kern_mount(mountpoint, se->mo);
3178 if (fd == -1)
3179 return -1;
3180 se->fd = fd;
3181
3182 /* Save mountpoint */
3183 se->mountpoint = strdup(mountpoint);
3184 if (se->mountpoint == NULL)
3185 goto error_out;
3186
3187 return 0;
3188
3189error_out:
3190 fuse_kern_unmount(mountpoint, fd);
3191 return -1;
3192}
3193
3194int fuse_session_fd(struct fuse_session *se)
3195{
3196 return se->fd;
3197}
3198
3199void fuse_session_unmount(struct fuse_session *se)
3200{
3201 if (se->mountpoint != NULL) {
3202 fuse_kern_unmount(se->mountpoint, se->fd);
3203 se->fd = -1;
3204 free(se->mountpoint);
3205 se->mountpoint = NULL;
3206 }
3207}
3208
3209#ifdef linux
3210int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3211{
3212 char *buf;
3213 size_t bufsize = 1024;
3214 char path[128];
3215 int ret;
3216 int fd;
3217 unsigned long pid = req->ctx.pid;
3218 char *s;
3219
3220 sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3221
3222retry:
3223 buf = malloc(bufsize);
3224 if (buf == NULL)
3225 return -ENOMEM;
3226
3227 ret = -EIO;
3228 fd = open(path, O_RDONLY);
3229 if (fd == -1)
3230 goto out_free;
3231
3232 ret = read(fd, buf, bufsize);
3233 close(fd);
3234 if (ret < 0) {
3235 ret = -EIO;
3236 goto out_free;
3237 }
3238
3239 if ((size_t)ret == bufsize) {
3240 free(buf);
3241 bufsize *= 4;
3242 goto retry;
3243 }
3244
3245 ret = -EIO;
3246 s = strstr(buf, "\nGroups:");
3247 if (s == NULL)
3248 goto out_free;
3249
3250 s += 8;
3251 ret = 0;
3252 while (1) {
3253 char *end;
3254 unsigned long val = strtoul(s, &end, 0);
3255 if (end == s)
3256 break;
3257
3258 s = end;
3259 if (ret < size)
3260 list[ret] = val;
3261 ret++;
3262 }
3263
3264out_free:
3265 free(buf);
3266 return ret;
3267}
3268#else /* linux */
3269/*
3270 * This is currently not implemented on other than Linux...
3271 */
3272int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3273{
3274 (void) req; (void) size; (void) list;
3275 return -ENOSYS;
3276}
3277#endif
3278
3279/* Prevent spurious data race warning - we don't care
3280 * about races for this flag */
3281__attribute__((no_sanitize_thread))
3282void fuse_session_exit(struct fuse_session *se)
3283{
3284 se->exited = 1;
3285}
3286
3287__attribute__((no_sanitize_thread))
3288void fuse_session_reset(struct fuse_session *se)
3289{
3290 se->exited = 0;
3291 se->error = 0;
3292}
3293
3294__attribute__((no_sanitize_thread))
3295int fuse_session_exited(struct fuse_session *se)
3296{
3297 return se->exited;
3298}
#define FUSE_CAP_IOCTL_DIR
Definition: fuse_common.h:241
#define FUSE_CAP_DONT_MASK
Definition: fuse_common.h:196
#define FUSE_CAP_HANDLE_KILLPRIV
Definition: fuse_common.h:370
#define FUSE_CAP_AUTO_INVAL_DATA
Definition: fuse_common.h:263
#define FUSE_CAP_SPLICE_READ
Definition: fuse_common.h:221
#define FUSE_CAP_PARALLEL_DIROPS
Definition: fuse_common.h:342
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition: buffer.c:22
#define FUSE_CAP_WRITEBACK_CACHE
Definition: fuse_common.h:319
#define FUSE_CAP_EXPIRE_ONLY
Definition: fuse_common.h:434
#define FUSE_CAP_ATOMIC_O_TRUNC
Definition: fuse_common.h:181
#define FUSE_CAP_ASYNC_READ
Definition: fuse_common.h:164
#define FUSE_CAP_SPLICE_WRITE
Definition: fuse_common.h:204
#define FUSE_CAP_CACHE_SYMLINKS
Definition: fuse_common.h:383
#define FUSE_CAP_POSIX_ACL
Definition: fuse_common.h:361
@ FUSE_BUF_IS_FD
Definition: fuse_common.h:679
#define FUSE_CAP_EXPORT_SUPPORT
Definition: fuse_common.h:188
#define FUSE_CAP_POSIX_LOCKS
Definition: fuse_common.h:172
#define FUSE_CAP_EXPLICIT_INVAL_DATA
Definition: fuse_common.h:418
#define FUSE_CAP_READDIRPLUS_AUTO
Definition: fuse_common.h:299
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition: buffer.c:284
#define FUSE_CAP_NO_OPENDIR_SUPPORT
Definition: fuse_common.h:395
#define FUSE_CAP_ASYNC_DIO
Definition: fuse_common.h:310
#define FUSE_CAP_NO_OPEN_SUPPORT
Definition: fuse_common.h:332
#define FUSE_CAP_READDIRPLUS
Definition: fuse_common.h:271
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
fuse_buf_copy_flags
Definition: fuse_common.h:703
@ FUSE_BUF_SPLICE_NONBLOCK
Definition: fuse_common.h:739
@ FUSE_BUF_FORCE_SPLICE
Definition: fuse_common.h:721
@ FUSE_BUF_NO_SPLICE
Definition: fuse_common.h:713
@ FUSE_BUF_SPLICE_MOVE
Definition: fuse_common.h:730
#define FUSE_CAP_SETXATTR_EXT
Definition: fuse_common.h:441
#define FUSE_CAP_SPLICE_MOVE
Definition: fuse_common.h:212
#define FUSE_CAP_FLOCK_LOCKS
Definition: fuse_common.h:234
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition: fuse_log.c:33
void fuse_session_destroy(struct fuse_session *se)
fuse_notify_entry_flags
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
void * fuse_req_userdata(fuse_req_t req)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
Definition: fuse_lowlevel.h:49
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
int fuse_session_exited(struct fuse_session *se)
int fuse_session_fd(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_session_unmount(struct fuse_session *se)
struct fuse_session * fuse_session_new(struct fuse_args *args, const struct fuse_lowlevel_ops *op, size_t op_size, void *userdata)
void fuse_reply_none(fuse_req_t req)
int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
void fuse_session_reset(struct fuse_session *se)
int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io, int fd)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
void fuse_lowlevel_version(void)
uint64_t fuse_ino_t
Definition: fuse_lowlevel.h:46
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition: fuse_opt.c:55
void fuse_opt_free_args(struct fuse_args *args)
Definition: fuse_opt.c:34
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition: fuse_opt.c:398
#define FUSE_OPT_END
Definition: fuse_opt.h:104
int argc
Definition: fuse_opt.h:111
char ** argv
Definition: fuse_opt.h:114
enum fuse_buf_flags flags
Definition: fuse_common.h:757
void * mem
Definition: fuse_common.h:764
size_t size
Definition: fuse_common.h:752
size_t off
Definition: fuse_common.h:803
size_t idx
Definition: fuse_common.h:798
struct fuse_buf buf[1]
Definition: fuse_common.h:808
size_t count
Definition: fuse_common.h:793
Definition: fuse_lowlevel.h:59
double entry_timeout
fuse_ino_t ino
Definition: fuse_lowlevel.h:67
uint64_t generation
Definition: fuse_lowlevel.h:79
double attr_timeout
Definition: fuse_lowlevel.h:94
struct stat attr
Definition: fuse_lowlevel.h:88
unsigned int direct_io
Definition: fuse_common.h:63
unsigned int keep_cache
Definition: fuse_common.h:69
unsigned int nonseekable
Definition: fuse_common.h:82
uint64_t lock_owner
Definition: fuse_common.h:109
uint32_t poll_events
Definition: fuse_common.h:113
unsigned int noflush
Definition: fuse_common.h:97
unsigned int writepage
Definition: fuse_common.h:60
unsigned int flush
Definition: fuse_common.h:78
unsigned int parallel_direct_writes
Definition: fuse_common.h:73
unsigned int cache_readdir
Definition: fuse_common.h:93