libfuse
fuse_lowlevel.c
1 /*
2  FUSE: Filesystem in Userspace
3  Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4 
5  Implementation of (most of) the low-level FUSE API. The session loop
6  functions are implemented in separate files.
7 
8  This program can be distributed under the terms of the GNU LGPLv2.
9  See the file COPYING.LIB
10 */
11 
12 #define _GNU_SOURCE
13 
14 #include "fuse_config.h"
15 #include "fuse_i.h"
16 #include "fuse_kernel.h"
17 #include "fuse_opt.h"
18 #include "fuse_misc.h"
19 #include "mount_util.h"
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <stddef.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <errno.h>
28 #include <assert.h>
29 #include <sys/file.h>
30 
31 #ifndef F_LINUX_SPECIFIC_BASE
32 #define F_LINUX_SPECIFIC_BASE 1024
33 #endif
34 #ifndef F_SETPIPE_SZ
35 #define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
36 #endif
37 
38 
39 #define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
40 #define OFFSET_MAX 0x7fffffffffffffffLL
41 
42 #define container_of(ptr, type, member) ({ \
43  const typeof( ((type *)0)->member ) *__mptr = (ptr); \
44  (type *)( (char *)__mptr - offsetof(type,member) );})
45 
46 struct fuse_pollhandle {
47  uint64_t kh;
48  struct fuse_session *se;
49 };
50 
51 static size_t pagesize;
52 
53 static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
54 {
55  pagesize = getpagesize();
56 }
57 
58 static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
59 {
60  attr->ino = stbuf->st_ino;
61  attr->mode = stbuf->st_mode;
62  attr->nlink = stbuf->st_nlink;
63  attr->uid = stbuf->st_uid;
64  attr->gid = stbuf->st_gid;
65  attr->rdev = stbuf->st_rdev;
66  attr->size = stbuf->st_size;
67  attr->blksize = stbuf->st_blksize;
68  attr->blocks = stbuf->st_blocks;
69  attr->atime = stbuf->st_atime;
70  attr->mtime = stbuf->st_mtime;
71  attr->ctime = stbuf->st_ctime;
72  attr->atimensec = ST_ATIM_NSEC(stbuf);
73  attr->mtimensec = ST_MTIM_NSEC(stbuf);
74  attr->ctimensec = ST_CTIM_NSEC(stbuf);
75 }
76 
77 static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
78 {
79  stbuf->st_mode = attr->mode;
80  stbuf->st_uid = attr->uid;
81  stbuf->st_gid = attr->gid;
82  stbuf->st_size = attr->size;
83  stbuf->st_atime = attr->atime;
84  stbuf->st_mtime = attr->mtime;
85  stbuf->st_ctime = attr->ctime;
86  ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
87  ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
88  ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
89 }
90 
91 static size_t iov_length(const struct iovec *iov, size_t count)
92 {
93  size_t seg;
94  size_t ret = 0;
95 
96  for (seg = 0; seg < count; seg++)
97  ret += iov[seg].iov_len;
98  return ret;
99 }
100 
101 static void list_init_req(struct fuse_req *req)
102 {
103  req->next = req;
104  req->prev = req;
105 }
106 
107 static void list_del_req(struct fuse_req *req)
108 {
109  struct fuse_req *prev = req->prev;
110  struct fuse_req *next = req->next;
111  prev->next = next;
112  next->prev = prev;
113 }
114 
115 static void list_add_req(struct fuse_req *req, struct fuse_req *next)
116 {
117  struct fuse_req *prev = next->prev;
118  req->next = next;
119  req->prev = prev;
120  prev->next = req;
121  next->prev = req;
122 }
123 
124 static void destroy_req(fuse_req_t req)
125 {
126  assert(req->ch == NULL);
127  pthread_mutex_destroy(&req->lock);
128  free(req);
129 }
130 
131 void fuse_free_req(fuse_req_t req)
132 {
133  int ctr;
134  struct fuse_session *se = req->se;
135 
136  pthread_mutex_lock(&se->lock);
137  req->u.ni.func = NULL;
138  req->u.ni.data = NULL;
139  list_del_req(req);
140  ctr = --req->ctr;
141  fuse_chan_put(req->ch);
142  req->ch = NULL;
143  pthread_mutex_unlock(&se->lock);
144  if (!ctr)
145  destroy_req(req);
146 }
147 
148 static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
149 {
150  struct fuse_req *req;
151 
152  req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
153  if (req == NULL) {
154  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
155  } else {
156  req->se = se;
157  req->ctr = 1;
158  list_init_req(req);
159  pthread_mutex_init(&req->lock, NULL);
160  }
161 
162  return req;
163 }
164 
165 /* Send data. If *ch* is NULL, send via session master fd */
166 static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
167  struct iovec *iov, int count)
168 {
169  struct fuse_out_header *out = iov[0].iov_base;
170 
171  assert(se != NULL);
172  out->len = iov_length(iov, count);
173  if (se->debug) {
174  if (out->unique == 0) {
175  fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
176  out->error, out->len);
177  } else if (out->error) {
178  fuse_log(FUSE_LOG_DEBUG,
179  " unique: %llu, error: %i (%s), outsize: %i\n",
180  (unsigned long long) out->unique, out->error,
181  strerror(-out->error), out->len);
182  } else {
183  fuse_log(FUSE_LOG_DEBUG,
184  " unique: %llu, success, outsize: %i\n",
185  (unsigned long long) out->unique, out->len);
186  }
187  }
188 
189  ssize_t res;
190  if (se->io != NULL)
191  /* se->io->writev is never NULL if se->io is not NULL as
192  specified by fuse_session_custom_io()*/
193  res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
194  se->userdata);
195  else
196  res = writev(ch ? ch->fd : se->fd, iov, count);
197 
198  int err = errno;
199 
200  if (res == -1) {
201  /* ENOENT means the operation was interrupted */
202  if (!fuse_session_exited(se) && err != ENOENT)
203  perror("fuse: writing device");
204  return -err;
205  }
206 
207  return 0;
208 }
209 
210 
211 int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
212  int count)
213 {
214  struct fuse_out_header out;
215 
216  if (error <= -1000 || error > 0) {
217  fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
218  error = -ERANGE;
219  }
220 
221  out.unique = req->unique;
222  out.error = error;
223 
224  iov[0].iov_base = &out;
225  iov[0].iov_len = sizeof(struct fuse_out_header);
226 
227  return fuse_send_msg(req->se, req->ch, iov, count);
228 }
229 
230 static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
231  int count)
232 {
233  int res;
234 
235  res = fuse_send_reply_iov_nofree(req, error, iov, count);
236  fuse_free_req(req);
237  return res;
238 }
239 
240 static int send_reply(fuse_req_t req, int error, const void *arg,
241  size_t argsize)
242 {
243  struct iovec iov[2];
244  int count = 1;
245  if (argsize) {
246  iov[1].iov_base = (void *) arg;
247  iov[1].iov_len = argsize;
248  count++;
249  }
250  return send_reply_iov(req, error, iov, count);
251 }
252 
253 int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
254 {
255  int res;
256  struct iovec *padded_iov;
257 
258  padded_iov = malloc((count + 1) * sizeof(struct iovec));
259  if (padded_iov == NULL)
260  return fuse_reply_err(req, ENOMEM);
261 
262  memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
263  count++;
264 
265  res = send_reply_iov(req, 0, padded_iov, count);
266  free(padded_iov);
267 
268  return res;
269 }
270 
271 
272 /* `buf` is allowed to be empty so that the proper size may be
273  allocated by the caller */
274 size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
275  const char *name, const struct stat *stbuf, off_t off)
276 {
277  (void)req;
278  size_t namelen;
279  size_t entlen;
280  size_t entlen_padded;
281  struct fuse_dirent *dirent;
282 
283  namelen = strlen(name);
284  entlen = FUSE_NAME_OFFSET + namelen;
285  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
286 
287  if ((buf == NULL) || (entlen_padded > bufsize))
288  return entlen_padded;
289 
290  dirent = (struct fuse_dirent*) buf;
291  dirent->ino = stbuf->st_ino;
292  dirent->off = off;
293  dirent->namelen = namelen;
294  dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
295  memcpy(dirent->name, name, namelen);
296  memset(dirent->name + namelen, 0, entlen_padded - entlen);
297 
298  return entlen_padded;
299 }
300 
301 static void convert_statfs(const struct statvfs *stbuf,
302  struct fuse_kstatfs *kstatfs)
303 {
304  kstatfs->bsize = stbuf->f_bsize;
305  kstatfs->frsize = stbuf->f_frsize;
306  kstatfs->blocks = stbuf->f_blocks;
307  kstatfs->bfree = stbuf->f_bfree;
308  kstatfs->bavail = stbuf->f_bavail;
309  kstatfs->files = stbuf->f_files;
310  kstatfs->ffree = stbuf->f_ffree;
311  kstatfs->namelen = stbuf->f_namemax;
312 }
313 
314 static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
315 {
316  return send_reply(req, 0, arg, argsize);
317 }
318 
319 int fuse_reply_err(fuse_req_t req, int err)
320 {
321  return send_reply(req, -err, NULL, 0);
322 }
323 
325 {
326  fuse_free_req(req);
327 }
328 
329 static unsigned long calc_timeout_sec(double t)
330 {
331  if (t > (double) ULONG_MAX)
332  return ULONG_MAX;
333  else if (t < 0.0)
334  return 0;
335  else
336  return (unsigned long) t;
337 }
338 
339 static unsigned int calc_timeout_nsec(double t)
340 {
341  double f = t - (double) calc_timeout_sec(t);
342  if (f < 0.0)
343  return 0;
344  else if (f >= 0.999999999)
345  return 999999999;
346  else
347  return (unsigned int) (f * 1.0e9);
348 }
349 
350 static void fill_entry(struct fuse_entry_out *arg,
351  const struct fuse_entry_param *e)
352 {
353  arg->nodeid = e->ino;
354  arg->generation = e->generation;
355  arg->entry_valid = calc_timeout_sec(e->entry_timeout);
356  arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
357  arg->attr_valid = calc_timeout_sec(e->attr_timeout);
358  arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
359  convert_stat(&e->attr, &arg->attr);
360 }
361 
362 /* `buf` is allowed to be empty so that the proper size may be
363  allocated by the caller */
364 size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
365  const char *name,
366  const struct fuse_entry_param *e, off_t off)
367 {
368  (void)req;
369  size_t namelen;
370  size_t entlen;
371  size_t entlen_padded;
372 
373  namelen = strlen(name);
374  entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
375  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
376  if ((buf == NULL) || (entlen_padded > bufsize))
377  return entlen_padded;
378 
379  struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
380  memset(&dp->entry_out, 0, sizeof(dp->entry_out));
381  fill_entry(&dp->entry_out, e);
382 
383  struct fuse_dirent *dirent = &dp->dirent;
384  dirent->ino = e->attr.st_ino;
385  dirent->off = off;
386  dirent->namelen = namelen;
387  dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
388  memcpy(dirent->name, name, namelen);
389  memset(dirent->name + namelen, 0, entlen_padded - entlen);
390 
391  return entlen_padded;
392 }
393 
394 static void fill_open(struct fuse_open_out *arg,
395  const struct fuse_file_info *f)
396 {
397  arg->fh = f->fh;
398  if (f->direct_io)
399  arg->open_flags |= FOPEN_DIRECT_IO;
400  if (f->keep_cache)
401  arg->open_flags |= FOPEN_KEEP_CACHE;
402  if (f->cache_readdir)
403  arg->open_flags |= FOPEN_CACHE_DIR;
404  if (f->nonseekable)
405  arg->open_flags |= FOPEN_NONSEEKABLE;
406  if (f->noflush)
407  arg->open_flags |= FOPEN_NOFLUSH;
408  if (f->parallel_direct_writes)
409  arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES;
410 }
411 
413 {
414  struct fuse_entry_out arg;
415  size_t size = req->se->conn.proto_minor < 9 ?
416  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
417 
418  /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
419  negative entry */
420  if (!e->ino && req->se->conn.proto_minor < 4)
421  return fuse_reply_err(req, ENOENT);
422 
423  memset(&arg, 0, sizeof(arg));
424  fill_entry(&arg, e);
425  return send_reply_ok(req, &arg, size);
426 }
427 
429  const struct fuse_file_info *f)
430 {
431  char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
432  size_t entrysize = req->se->conn.proto_minor < 9 ?
433  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
434  struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
435  struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
436 
437  memset(buf, 0, sizeof(buf));
438  fill_entry(earg, e);
439  fill_open(oarg, f);
440  return send_reply_ok(req, buf,
441  entrysize + sizeof(struct fuse_open_out));
442 }
443 
444 int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
445  double attr_timeout)
446 {
447  struct fuse_attr_out arg;
448  size_t size = req->se->conn.proto_minor < 9 ?
449  FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
450 
451  memset(&arg, 0, sizeof(arg));
452  arg.attr_valid = calc_timeout_sec(attr_timeout);
453  arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
454  convert_stat(attr, &arg.attr);
455 
456  return send_reply_ok(req, &arg, size);
457 }
458 
459 int fuse_reply_readlink(fuse_req_t req, const char *linkname)
460 {
461  return send_reply_ok(req, linkname, strlen(linkname));
462 }
463 
464 int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
465 {
466  struct fuse_open_out arg;
467 
468  memset(&arg, 0, sizeof(arg));
469  fill_open(&arg, f);
470  return send_reply_ok(req, &arg, sizeof(arg));
471 }
472 
473 int fuse_reply_write(fuse_req_t req, size_t count)
474 {
475  struct fuse_write_out arg;
476 
477  memset(&arg, 0, sizeof(arg));
478  arg.size = count;
479 
480  return send_reply_ok(req, &arg, sizeof(arg));
481 }
482 
483 int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
484 {
485  return send_reply_ok(req, buf, size);
486 }
487 
488 static int fuse_send_data_iov_fallback(struct fuse_session *se,
489  struct fuse_chan *ch,
490  struct iovec *iov, int iov_count,
491  struct fuse_bufvec *buf,
492  size_t len)
493 {
494  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
495  void *mbuf;
496  int res;
497 
498  /* Optimize common case */
499  if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
500  !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
501  /* FIXME: also avoid memory copy if there are multiple buffers
502  but none of them contain an fd */
503 
504  iov[iov_count].iov_base = buf->buf[0].mem;
505  iov[iov_count].iov_len = len;
506  iov_count++;
507  return fuse_send_msg(se, ch, iov, iov_count);
508  }
509 
510  res = posix_memalign(&mbuf, pagesize, len);
511  if (res != 0)
512  return res;
513 
514  mem_buf.buf[0].mem = mbuf;
515  res = fuse_buf_copy(&mem_buf, buf, 0);
516  if (res < 0) {
517  free(mbuf);
518  return -res;
519  }
520  len = res;
521 
522  iov[iov_count].iov_base = mbuf;
523  iov[iov_count].iov_len = len;
524  iov_count++;
525  res = fuse_send_msg(se, ch, iov, iov_count);
526  free(mbuf);
527 
528  return res;
529 }
530 
531 struct fuse_ll_pipe {
532  size_t size;
533  int can_grow;
534  int pipe[2];
535 };
536 
537 static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
538 {
539  close(llp->pipe[0]);
540  close(llp->pipe[1]);
541  free(llp);
542 }
543 
544 #ifdef HAVE_SPLICE
545 #if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
546 static int fuse_pipe(int fds[2])
547 {
548  int rv = pipe(fds);
549 
550  if (rv == -1)
551  return rv;
552 
553  if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
554  fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
555  fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
556  fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
557  close(fds[0]);
558  close(fds[1]);
559  rv = -1;
560  }
561  return rv;
562 }
563 #else
564 static int fuse_pipe(int fds[2])
565 {
566  return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
567 }
568 #endif
569 
570 static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
571 {
572  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
573  if (llp == NULL) {
574  int res;
575 
576  llp = malloc(sizeof(struct fuse_ll_pipe));
577  if (llp == NULL)
578  return NULL;
579 
580  res = fuse_pipe(llp->pipe);
581  if (res == -1) {
582  free(llp);
583  return NULL;
584  }
585 
586  /*
587  *the default size is 16 pages on linux
588  */
589  llp->size = pagesize * 16;
590  llp->can_grow = 1;
591 
592  pthread_setspecific(se->pipe_key, llp);
593  }
594 
595  return llp;
596 }
597 #endif
598 
599 static void fuse_ll_clear_pipe(struct fuse_session *se)
600 {
601  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
602  if (llp) {
603  pthread_setspecific(se->pipe_key, NULL);
604  fuse_ll_pipe_free(llp);
605  }
606 }
607 
608 #if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
609 static int read_back(int fd, char *buf, size_t len)
610 {
611  int res;
612 
613  res = read(fd, buf, len);
614  if (res == -1) {
615  fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
616  return -EIO;
617  }
618  if (res != len) {
619  fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
620  return -EIO;
621  }
622  return 0;
623 }
624 
625 static int grow_pipe_to_max(int pipefd)
626 {
627  int max;
628  int res;
629  int maxfd;
630  char buf[32];
631 
632  maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
633  if (maxfd < 0)
634  return -errno;
635 
636  res = read(maxfd, buf, sizeof(buf) - 1);
637  if (res < 0) {
638  int saved_errno;
639 
640  saved_errno = errno;
641  close(maxfd);
642  return -saved_errno;
643  }
644  close(maxfd);
645  buf[res] = '\0';
646 
647  max = atoi(buf);
648  res = fcntl(pipefd, F_SETPIPE_SZ, max);
649  if (res < 0)
650  return -errno;
651  return max;
652 }
653 
654 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
655  struct iovec *iov, int iov_count,
656  struct fuse_bufvec *buf, unsigned int flags)
657 {
658  int res;
659  size_t len = fuse_buf_size(buf);
660  struct fuse_out_header *out = iov[0].iov_base;
661  struct fuse_ll_pipe *llp;
662  int splice_flags;
663  size_t pipesize;
664  size_t total_buf_size;
665  size_t idx;
666  size_t headerlen;
667  struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
668 
669  if (se->broken_splice_nonblock)
670  goto fallback;
671 
672  if (flags & FUSE_BUF_NO_SPLICE)
673  goto fallback;
674 
675  total_buf_size = 0;
676  for (idx = buf->idx; idx < buf->count; idx++) {
677  total_buf_size += buf->buf[idx].size;
678  if (idx == buf->idx)
679  total_buf_size -= buf->off;
680  }
681  if (total_buf_size < 2 * pagesize)
682  goto fallback;
683 
684  if (se->conn.proto_minor < 14 ||
685  !(se->conn.want & FUSE_CAP_SPLICE_WRITE))
686  goto fallback;
687 
688  llp = fuse_ll_get_pipe(se);
689  if (llp == NULL)
690  goto fallback;
691 
692 
693  headerlen = iov_length(iov, iov_count);
694 
695  out->len = headerlen + len;
696 
697  /*
698  * Heuristic for the required pipe size, does not work if the
699  * source contains less than page size fragments
700  */
701  pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
702 
703  if (llp->size < pipesize) {
704  if (llp->can_grow) {
705  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
706  if (res == -1) {
707  res = grow_pipe_to_max(llp->pipe[0]);
708  if (res > 0)
709  llp->size = res;
710  llp->can_grow = 0;
711  goto fallback;
712  }
713  llp->size = res;
714  }
715  if (llp->size < pipesize)
716  goto fallback;
717  }
718 
719 
720  res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
721  if (res == -1)
722  goto fallback;
723 
724  if (res != headerlen) {
725  res = -EIO;
726  fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
727  headerlen);
728  goto clear_pipe;
729  }
730 
731  pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
732  pipe_buf.buf[0].fd = llp->pipe[1];
733 
734  res = fuse_buf_copy(&pipe_buf, buf,
736  if (res < 0) {
737  if (res == -EAGAIN || res == -EINVAL) {
738  /*
739  * Should only get EAGAIN on kernels with
740  * broken SPLICE_F_NONBLOCK support (<=
741  * 2.6.35) where this error or a short read is
742  * returned even if the pipe itself is not
743  * full
744  *
745  * EINVAL might mean that splice can't handle
746  * this combination of input and output.
747  */
748  if (res == -EAGAIN)
749  se->broken_splice_nonblock = 1;
750 
751  pthread_setspecific(se->pipe_key, NULL);
752  fuse_ll_pipe_free(llp);
753  goto fallback;
754  }
755  res = -res;
756  goto clear_pipe;
757  }
758 
759  if (res != 0 && res < len) {
760  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
761  void *mbuf;
762  size_t now_len = res;
763  /*
764  * For regular files a short count is either
765  * 1) due to EOF, or
766  * 2) because of broken SPLICE_F_NONBLOCK (see above)
767  *
768  * For other inputs it's possible that we overflowed
769  * the pipe because of small buffer fragments.
770  */
771 
772  res = posix_memalign(&mbuf, pagesize, len);
773  if (res != 0)
774  goto clear_pipe;
775 
776  mem_buf.buf[0].mem = mbuf;
777  mem_buf.off = now_len;
778  res = fuse_buf_copy(&mem_buf, buf, 0);
779  if (res > 0) {
780  char *tmpbuf;
781  size_t extra_len = res;
782  /*
783  * Trickiest case: got more data. Need to get
784  * back the data from the pipe and then fall
785  * back to regular write.
786  */
787  tmpbuf = malloc(headerlen);
788  if (tmpbuf == NULL) {
789  free(mbuf);
790  res = ENOMEM;
791  goto clear_pipe;
792  }
793  res = read_back(llp->pipe[0], tmpbuf, headerlen);
794  free(tmpbuf);
795  if (res != 0) {
796  free(mbuf);
797  goto clear_pipe;
798  }
799  res = read_back(llp->pipe[0], mbuf, now_len);
800  if (res != 0) {
801  free(mbuf);
802  goto clear_pipe;
803  }
804  len = now_len + extra_len;
805  iov[iov_count].iov_base = mbuf;
806  iov[iov_count].iov_len = len;
807  iov_count++;
808  res = fuse_send_msg(se, ch, iov, iov_count);
809  free(mbuf);
810  return res;
811  }
812  free(mbuf);
813  res = now_len;
814  }
815  len = res;
816  out->len = headerlen + len;
817 
818  if (se->debug) {
819  fuse_log(FUSE_LOG_DEBUG,
820  " unique: %llu, success, outsize: %i (splice)\n",
821  (unsigned long long) out->unique, out->len);
822  }
823 
824  splice_flags = 0;
825  if ((flags & FUSE_BUF_SPLICE_MOVE) &&
826  (se->conn.want & FUSE_CAP_SPLICE_MOVE))
827  splice_flags |= SPLICE_F_MOVE;
828 
829  if (se->io != NULL && se->io->splice_send != NULL) {
830  res = se->io->splice_send(llp->pipe[0], NULL,
831  ch ? ch->fd : se->fd, NULL, out->len,
832  splice_flags, se->userdata);
833  } else {
834  res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
835  out->len, splice_flags);
836  }
837  if (res == -1) {
838  res = -errno;
839  perror("fuse: splice from pipe");
840  goto clear_pipe;
841  }
842  if (res != out->len) {
843  res = -EIO;
844  fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
845  res, out->len);
846  goto clear_pipe;
847  }
848  return 0;
849 
850 clear_pipe:
851  fuse_ll_clear_pipe(se);
852  return res;
853 
854 fallback:
855  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
856 }
857 #else
858 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
859  struct iovec *iov, int iov_count,
860  struct fuse_bufvec *buf, unsigned int flags)
861 {
862  size_t len = fuse_buf_size(buf);
863  (void) flags;
864 
865  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
866 }
867 #endif
868 
869 int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
870  enum fuse_buf_copy_flags flags)
871 {
872  struct iovec iov[2];
873  struct fuse_out_header out;
874  int res;
875 
876  iov[0].iov_base = &out;
877  iov[0].iov_len = sizeof(struct fuse_out_header);
878 
879  out.unique = req->unique;
880  out.error = 0;
881 
882  res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
883  if (res <= 0) {
884  fuse_free_req(req);
885  return res;
886  } else {
887  return fuse_reply_err(req, res);
888  }
889 }
890 
891 int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
892 {
893  struct fuse_statfs_out arg;
894  size_t size = req->se->conn.proto_minor < 4 ?
895  FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
896 
897  memset(&arg, 0, sizeof(arg));
898  convert_statfs(stbuf, &arg.st);
899 
900  return send_reply_ok(req, &arg, size);
901 }
902 
903 int fuse_reply_xattr(fuse_req_t req, size_t count)
904 {
905  struct fuse_getxattr_out arg;
906 
907  memset(&arg, 0, sizeof(arg));
908  arg.size = count;
909 
910  return send_reply_ok(req, &arg, sizeof(arg));
911 }
912 
913 int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
914 {
915  struct fuse_lk_out arg;
916 
917  memset(&arg, 0, sizeof(arg));
918  arg.lk.type = lock->l_type;
919  if (lock->l_type != F_UNLCK) {
920  arg.lk.start = lock->l_start;
921  if (lock->l_len == 0)
922  arg.lk.end = OFFSET_MAX;
923  else
924  arg.lk.end = lock->l_start + lock->l_len - 1;
925  }
926  arg.lk.pid = lock->l_pid;
927  return send_reply_ok(req, &arg, sizeof(arg));
928 }
929 
930 int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
931 {
932  struct fuse_bmap_out arg;
933 
934  memset(&arg, 0, sizeof(arg));
935  arg.block = idx;
936 
937  return send_reply_ok(req, &arg, sizeof(arg));
938 }
939 
940 static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
941  size_t count)
942 {
943  struct fuse_ioctl_iovec *fiov;
944  size_t i;
945 
946  fiov = malloc(sizeof(fiov[0]) * count);
947  if (!fiov)
948  return NULL;
949 
950  for (i = 0; i < count; i++) {
951  fiov[i].base = (uintptr_t) iov[i].iov_base;
952  fiov[i].len = iov[i].iov_len;
953  }
954 
955  return fiov;
956 }
957 
959  const struct iovec *in_iov, size_t in_count,
960  const struct iovec *out_iov, size_t out_count)
961 {
962  struct fuse_ioctl_out arg;
963  struct fuse_ioctl_iovec *in_fiov = NULL;
964  struct fuse_ioctl_iovec *out_fiov = NULL;
965  struct iovec iov[4];
966  size_t count = 1;
967  int res;
968 
969  memset(&arg, 0, sizeof(arg));
970  arg.flags |= FUSE_IOCTL_RETRY;
971  arg.in_iovs = in_count;
972  arg.out_iovs = out_count;
973  iov[count].iov_base = &arg;
974  iov[count].iov_len = sizeof(arg);
975  count++;
976 
977  if (req->se->conn.proto_minor < 16) {
978  if (in_count) {
979  iov[count].iov_base = (void *)in_iov;
980  iov[count].iov_len = sizeof(in_iov[0]) * in_count;
981  count++;
982  }
983 
984  if (out_count) {
985  iov[count].iov_base = (void *)out_iov;
986  iov[count].iov_len = sizeof(out_iov[0]) * out_count;
987  count++;
988  }
989  } else {
990  /* Can't handle non-compat 64bit ioctls on 32bit */
991  if (sizeof(void *) == 4 && req->ioctl_64bit) {
992  res = fuse_reply_err(req, EINVAL);
993  goto out;
994  }
995 
996  if (in_count) {
997  in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
998  if (!in_fiov)
999  goto enomem;
1000 
1001  iov[count].iov_base = (void *)in_fiov;
1002  iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1003  count++;
1004  }
1005  if (out_count) {
1006  out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1007  if (!out_fiov)
1008  goto enomem;
1009 
1010  iov[count].iov_base = (void *)out_fiov;
1011  iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1012  count++;
1013  }
1014  }
1015 
1016  res = send_reply_iov(req, 0, iov, count);
1017 out:
1018  free(in_fiov);
1019  free(out_fiov);
1020 
1021  return res;
1022 
1023 enomem:
1024  res = fuse_reply_err(req, ENOMEM);
1025  goto out;
1026 }
1027 
1028 int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1029 {
1030  struct fuse_ioctl_out arg;
1031  struct iovec iov[3];
1032  size_t count = 1;
1033 
1034  memset(&arg, 0, sizeof(arg));
1035  arg.result = result;
1036  iov[count].iov_base = &arg;
1037  iov[count].iov_len = sizeof(arg);
1038  count++;
1039 
1040  if (size) {
1041  iov[count].iov_base = (char *) buf;
1042  iov[count].iov_len = size;
1043  count++;
1044  }
1045 
1046  return send_reply_iov(req, 0, iov, count);
1047 }
1048 
1049 int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1050  int count)
1051 {
1052  struct iovec *padded_iov;
1053  struct fuse_ioctl_out arg;
1054  int res;
1055 
1056  padded_iov = malloc((count + 2) * sizeof(struct iovec));
1057  if (padded_iov == NULL)
1058  return fuse_reply_err(req, ENOMEM);
1059 
1060  memset(&arg, 0, sizeof(arg));
1061  arg.result = result;
1062  padded_iov[1].iov_base = &arg;
1063  padded_iov[1].iov_len = sizeof(arg);
1064 
1065  memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1066 
1067  res = send_reply_iov(req, 0, padded_iov, count + 2);
1068  free(padded_iov);
1069 
1070  return res;
1071 }
1072 
1073 int fuse_reply_poll(fuse_req_t req, unsigned revents)
1074 {
1075  struct fuse_poll_out arg;
1076 
1077  memset(&arg, 0, sizeof(arg));
1078  arg.revents = revents;
1079 
1080  return send_reply_ok(req, &arg, sizeof(arg));
1081 }
1082 
1083 int fuse_reply_lseek(fuse_req_t req, off_t off)
1084 {
1085  struct fuse_lseek_out arg;
1086 
1087  memset(&arg, 0, sizeof(arg));
1088  arg.offset = off;
1089 
1090  return send_reply_ok(req, &arg, sizeof(arg));
1091 }
1092 
1093 static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1094 {
1095  char *name = (char *) inarg;
1096 
1097  if (req->se->op.lookup)
1098  req->se->op.lookup(req, nodeid, name);
1099  else
1100  fuse_reply_err(req, ENOSYS);
1101 }
1102 
1103 static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1104 {
1105  struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1106 
1107  if (req->se->op.forget)
1108  req->se->op.forget(req, nodeid, arg->nlookup);
1109  else
1110  fuse_reply_none(req);
1111 }
1112 
1113 static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1114  const void *inarg)
1115 {
1116  struct fuse_batch_forget_in *arg = (void *) inarg;
1117  struct fuse_forget_one *param = (void *) PARAM(arg);
1118  unsigned int i;
1119 
1120  (void) nodeid;
1121 
1122  if (req->se->op.forget_multi) {
1123  req->se->op.forget_multi(req, arg->count,
1124  (struct fuse_forget_data *) param);
1125  } else if (req->se->op.forget) {
1126  for (i = 0; i < arg->count; i++) {
1127  struct fuse_forget_one *forget = &param[i];
1128  struct fuse_req *dummy_req;
1129 
1130  dummy_req = fuse_ll_alloc_req(req->se);
1131  if (dummy_req == NULL)
1132  break;
1133 
1134  dummy_req->unique = req->unique;
1135  dummy_req->ctx = req->ctx;
1136  dummy_req->ch = NULL;
1137 
1138  req->se->op.forget(dummy_req, forget->nodeid,
1139  forget->nlookup);
1140  }
1141  fuse_reply_none(req);
1142  } else {
1143  fuse_reply_none(req);
1144  }
1145 }
1146 
1147 static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1148 {
1149  struct fuse_file_info *fip = NULL;
1150  struct fuse_file_info fi;
1151 
1152  if (req->se->conn.proto_minor >= 9) {
1153  struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1154 
1155  if (arg->getattr_flags & FUSE_GETATTR_FH) {
1156  memset(&fi, 0, sizeof(fi));
1157  fi.fh = arg->fh;
1158  fip = &fi;
1159  }
1160  }
1161 
1162  if (req->se->op.getattr)
1163  req->se->op.getattr(req, nodeid, fip);
1164  else
1165  fuse_reply_err(req, ENOSYS);
1166 }
1167 
1168 static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1169 {
1170  struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1171 
1172  if (req->se->op.setattr) {
1173  struct fuse_file_info *fi = NULL;
1174  struct fuse_file_info fi_store;
1175  struct stat stbuf;
1176  memset(&stbuf, 0, sizeof(stbuf));
1177  convert_attr(arg, &stbuf);
1178  if (arg->valid & FATTR_FH) {
1179  arg->valid &= ~FATTR_FH;
1180  memset(&fi_store, 0, sizeof(fi_store));
1181  fi = &fi_store;
1182  fi->fh = arg->fh;
1183  }
1184  arg->valid &=
1185  FUSE_SET_ATTR_MODE |
1186  FUSE_SET_ATTR_UID |
1187  FUSE_SET_ATTR_GID |
1188  FUSE_SET_ATTR_SIZE |
1189  FUSE_SET_ATTR_ATIME |
1190  FUSE_SET_ATTR_MTIME |
1191  FUSE_SET_ATTR_KILL_SUID |
1192  FUSE_SET_ATTR_KILL_SGID |
1193  FUSE_SET_ATTR_ATIME_NOW |
1194  FUSE_SET_ATTR_MTIME_NOW |
1195  FUSE_SET_ATTR_CTIME;
1196 
1197  req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1198  } else
1199  fuse_reply_err(req, ENOSYS);
1200 }
1201 
1202 static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1203 {
1204  struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1205 
1206  if (req->se->op.access)
1207  req->se->op.access(req, nodeid, arg->mask);
1208  else
1209  fuse_reply_err(req, ENOSYS);
1210 }
1211 
1212 static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1213 {
1214  (void) inarg;
1215 
1216  if (req->se->op.readlink)
1217  req->se->op.readlink(req, nodeid);
1218  else
1219  fuse_reply_err(req, ENOSYS);
1220 }
1221 
1222 static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1223 {
1224  struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1225  char *name = PARAM(arg);
1226 
1227  if (req->se->conn.proto_minor >= 12)
1228  req->ctx.umask = arg->umask;
1229  else
1230  name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1231 
1232  if (req->se->op.mknod)
1233  req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1234  else
1235  fuse_reply_err(req, ENOSYS);
1236 }
1237 
1238 static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1239 {
1240  struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1241 
1242  if (req->se->conn.proto_minor >= 12)
1243  req->ctx.umask = arg->umask;
1244 
1245  if (req->se->op.mkdir)
1246  req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1247  else
1248  fuse_reply_err(req, ENOSYS);
1249 }
1250 
1251 static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1252 {
1253  char *name = (char *) inarg;
1254 
1255  if (req->se->op.unlink)
1256  req->se->op.unlink(req, nodeid, name);
1257  else
1258  fuse_reply_err(req, ENOSYS);
1259 }
1260 
1261 static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1262 {
1263  char *name = (char *) inarg;
1264 
1265  if (req->se->op.rmdir)
1266  req->se->op.rmdir(req, nodeid, name);
1267  else
1268  fuse_reply_err(req, ENOSYS);
1269 }
1270 
1271 static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1272 {
1273  char *name = (char *) inarg;
1274  char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1275 
1276  if (req->se->op.symlink)
1277  req->se->op.symlink(req, linkname, nodeid, name);
1278  else
1279  fuse_reply_err(req, ENOSYS);
1280 }
1281 
1282 static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1283 {
1284  struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1285  char *oldname = PARAM(arg);
1286  char *newname = oldname + strlen(oldname) + 1;
1287 
1288  if (req->se->op.rename)
1289  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1290  0);
1291  else
1292  fuse_reply_err(req, ENOSYS);
1293 }
1294 
1295 static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1296 {
1297  struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1298  char *oldname = PARAM(arg);
1299  char *newname = oldname + strlen(oldname) + 1;
1300 
1301  if (req->se->op.rename)
1302  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1303  arg->flags);
1304  else
1305  fuse_reply_err(req, ENOSYS);
1306 }
1307 
1308 static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1309 {
1310  struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1311 
1312  if (req->se->op.link)
1313  req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1314  else
1315  fuse_reply_err(req, ENOSYS);
1316 }
1317 
1318 static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1319 {
1320  struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1321 
1322  if (req->se->op.create) {
1323  struct fuse_file_info fi;
1324  char *name = PARAM(arg);
1325 
1326  memset(&fi, 0, sizeof(fi));
1327  fi.flags = arg->flags;
1328 
1329  if (req->se->conn.proto_minor >= 12)
1330  req->ctx.umask = arg->umask;
1331  else
1332  name = (char *) inarg + sizeof(struct fuse_open_in);
1333 
1334  req->se->op.create(req, nodeid, name, arg->mode, &fi);
1335  } else
1336  fuse_reply_err(req, ENOSYS);
1337 }
1338 
1339 static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1340 {
1341  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1342  struct fuse_file_info fi;
1343 
1344  memset(&fi, 0, sizeof(fi));
1345  fi.flags = arg->flags;
1346 
1347  if (req->se->op.open)
1348  req->se->op.open(req, nodeid, &fi);
1349  else
1350  fuse_reply_open(req, &fi);
1351 }
1352 
1353 static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1354 {
1355  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1356 
1357  if (req->se->op.read) {
1358  struct fuse_file_info fi;
1359 
1360  memset(&fi, 0, sizeof(fi));
1361  fi.fh = arg->fh;
1362  if (req->se->conn.proto_minor >= 9) {
1363  fi.lock_owner = arg->lock_owner;
1364  fi.flags = arg->flags;
1365  }
1366  req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1367  } else
1368  fuse_reply_err(req, ENOSYS);
1369 }
1370 
1371 static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1372 {
1373  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1374  struct fuse_file_info fi;
1375  char *param;
1376 
1377  memset(&fi, 0, sizeof(fi));
1378  fi.fh = arg->fh;
1379  fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1380 
1381  if (req->se->conn.proto_minor < 9) {
1382  param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1383  } else {
1384  fi.lock_owner = arg->lock_owner;
1385  fi.flags = arg->flags;
1386  param = PARAM(arg);
1387  }
1388 
1389  if (req->se->op.write)
1390  req->se->op.write(req, nodeid, param, arg->size,
1391  arg->offset, &fi);
1392  else
1393  fuse_reply_err(req, ENOSYS);
1394 }
1395 
1396 static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1397  const struct fuse_buf *ibuf)
1398 {
1399  struct fuse_session *se = req->se;
1400  struct fuse_bufvec bufv = {
1401  .buf[0] = *ibuf,
1402  .count = 1,
1403  };
1404  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1405  struct fuse_file_info fi;
1406 
1407  memset(&fi, 0, sizeof(fi));
1408  fi.fh = arg->fh;
1409  fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1410 
1411  if (se->conn.proto_minor < 9) {
1412  bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1413  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1414  FUSE_COMPAT_WRITE_IN_SIZE;
1415  assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1416  } else {
1417  fi.lock_owner = arg->lock_owner;
1418  fi.flags = arg->flags;
1419  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1420  bufv.buf[0].mem = PARAM(arg);
1421 
1422  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1423  sizeof(struct fuse_write_in);
1424  }
1425  if (bufv.buf[0].size < arg->size) {
1426  fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1427  fuse_reply_err(req, EIO);
1428  goto out;
1429  }
1430  bufv.buf[0].size = arg->size;
1431 
1432  se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1433 
1434 out:
1435  /* Need to reset the pipe if ->write_buf() didn't consume all data */
1436  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1437  fuse_ll_clear_pipe(se);
1438 }
1439 
1440 static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1441 {
1442  struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1443  struct fuse_file_info fi;
1444 
1445  memset(&fi, 0, sizeof(fi));
1446  fi.fh = arg->fh;
1447  fi.flush = 1;
1448  if (req->se->conn.proto_minor >= 7)
1449  fi.lock_owner = arg->lock_owner;
1450 
1451  if (req->se->op.flush)
1452  req->se->op.flush(req, nodeid, &fi);
1453  else
1454  fuse_reply_err(req, ENOSYS);
1455 }
1456 
1457 static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1458 {
1459  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1460  struct fuse_file_info fi;
1461 
1462  memset(&fi, 0, sizeof(fi));
1463  fi.flags = arg->flags;
1464  fi.fh = arg->fh;
1465  if (req->se->conn.proto_minor >= 8) {
1466  fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1467  fi.lock_owner = arg->lock_owner;
1468  }
1469  if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1470  fi.flock_release = 1;
1471  fi.lock_owner = arg->lock_owner;
1472  }
1473 
1474  if (req->se->op.release)
1475  req->se->op.release(req, nodeid, &fi);
1476  else
1477  fuse_reply_err(req, 0);
1478 }
1479 
1480 static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1481 {
1482  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1483  struct fuse_file_info fi;
1484  int datasync = arg->fsync_flags & 1;
1485 
1486  memset(&fi, 0, sizeof(fi));
1487  fi.fh = arg->fh;
1488 
1489  if (req->se->op.fsync)
1490  req->se->op.fsync(req, nodeid, datasync, &fi);
1491  else
1492  fuse_reply_err(req, ENOSYS);
1493 }
1494 
1495 static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1496 {
1497  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1498  struct fuse_file_info fi;
1499 
1500  memset(&fi, 0, sizeof(fi));
1501  fi.flags = arg->flags;
1502 
1503  if (req->se->op.opendir)
1504  req->se->op.opendir(req, nodeid, &fi);
1505  else
1506  fuse_reply_open(req, &fi);
1507 }
1508 
1509 static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1510 {
1511  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1512  struct fuse_file_info fi;
1513 
1514  memset(&fi, 0, sizeof(fi));
1515  fi.fh = arg->fh;
1516 
1517  if (req->se->op.readdir)
1518  req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1519  else
1520  fuse_reply_err(req, ENOSYS);
1521 }
1522 
1523 static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1524 {
1525  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1526  struct fuse_file_info fi;
1527 
1528  memset(&fi, 0, sizeof(fi));
1529  fi.fh = arg->fh;
1530 
1531  if (req->se->op.readdirplus)
1532  req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1533  else
1534  fuse_reply_err(req, ENOSYS);
1535 }
1536 
1537 static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1538 {
1539  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1540  struct fuse_file_info fi;
1541 
1542  memset(&fi, 0, sizeof(fi));
1543  fi.flags = arg->flags;
1544  fi.fh = arg->fh;
1545 
1546  if (req->se->op.releasedir)
1547  req->se->op.releasedir(req, nodeid, &fi);
1548  else
1549  fuse_reply_err(req, 0);
1550 }
1551 
1552 static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1553 {
1554  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1555  struct fuse_file_info fi;
1556  int datasync = arg->fsync_flags & 1;
1557 
1558  memset(&fi, 0, sizeof(fi));
1559  fi.fh = arg->fh;
1560 
1561  if (req->se->op.fsyncdir)
1562  req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1563  else
1564  fuse_reply_err(req, ENOSYS);
1565 }
1566 
1567 static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1568 {
1569  (void) nodeid;
1570  (void) inarg;
1571 
1572  if (req->se->op.statfs)
1573  req->se->op.statfs(req, nodeid);
1574  else {
1575  struct statvfs buf = {
1576  .f_namemax = 255,
1577  .f_bsize = 512,
1578  };
1579  fuse_reply_statfs(req, &buf);
1580  }
1581 }
1582 
1583 static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1584 {
1585  struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1586  char *name = PARAM(arg);
1587  char *value = name + strlen(name) + 1;
1588 
1589  if (req->se->op.setxattr)
1590  req->se->op.setxattr(req, nodeid, name, value, arg->size,
1591  arg->flags);
1592  else
1593  fuse_reply_err(req, ENOSYS);
1594 }
1595 
1596 static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1597 {
1598  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1599 
1600  if (req->se->op.getxattr)
1601  req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1602  else
1603  fuse_reply_err(req, ENOSYS);
1604 }
1605 
1606 static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1607 {
1608  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1609 
1610  if (req->se->op.listxattr)
1611  req->se->op.listxattr(req, nodeid, arg->size);
1612  else
1613  fuse_reply_err(req, ENOSYS);
1614 }
1615 
1616 static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1617 {
1618  char *name = (char *) inarg;
1619 
1620  if (req->se->op.removexattr)
1621  req->se->op.removexattr(req, nodeid, name);
1622  else
1623  fuse_reply_err(req, ENOSYS);
1624 }
1625 
1626 static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1627  struct flock *flock)
1628 {
1629  memset(flock, 0, sizeof(struct flock));
1630  flock->l_type = fl->type;
1631  flock->l_whence = SEEK_SET;
1632  flock->l_start = fl->start;
1633  if (fl->end == OFFSET_MAX)
1634  flock->l_len = 0;
1635  else
1636  flock->l_len = fl->end - fl->start + 1;
1637  flock->l_pid = fl->pid;
1638 }
1639 
1640 static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1641 {
1642  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1643  struct fuse_file_info fi;
1644  struct flock flock;
1645 
1646  memset(&fi, 0, sizeof(fi));
1647  fi.fh = arg->fh;
1648  fi.lock_owner = arg->owner;
1649 
1650  convert_fuse_file_lock(&arg->lk, &flock);
1651  if (req->se->op.getlk)
1652  req->se->op.getlk(req, nodeid, &fi, &flock);
1653  else
1654  fuse_reply_err(req, ENOSYS);
1655 }
1656 
1657 static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1658  const void *inarg, int sleep)
1659 {
1660  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1661  struct fuse_file_info fi;
1662  struct flock flock;
1663 
1664  memset(&fi, 0, sizeof(fi));
1665  fi.fh = arg->fh;
1666  fi.lock_owner = arg->owner;
1667 
1668  if (arg->lk_flags & FUSE_LK_FLOCK) {
1669  int op = 0;
1670 
1671  switch (arg->lk.type) {
1672  case F_RDLCK:
1673  op = LOCK_SH;
1674  break;
1675  case F_WRLCK:
1676  op = LOCK_EX;
1677  break;
1678  case F_UNLCK:
1679  op = LOCK_UN;
1680  break;
1681  }
1682  if (!sleep)
1683  op |= LOCK_NB;
1684 
1685  if (req->se->op.flock)
1686  req->se->op.flock(req, nodeid, &fi, op);
1687  else
1688  fuse_reply_err(req, ENOSYS);
1689  } else {
1690  convert_fuse_file_lock(&arg->lk, &flock);
1691  if (req->se->op.setlk)
1692  req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1693  else
1694  fuse_reply_err(req, ENOSYS);
1695  }
1696 }
1697 
1698 static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1699 {
1700  do_setlk_common(req, nodeid, inarg, 0);
1701 }
1702 
1703 static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1704 {
1705  do_setlk_common(req, nodeid, inarg, 1);
1706 }
1707 
1708 static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1709 {
1710  struct fuse_req *curr;
1711 
1712  for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1713  if (curr->unique == req->u.i.unique) {
1714  fuse_interrupt_func_t func;
1715  void *data;
1716 
1717  curr->ctr++;
1718  pthread_mutex_unlock(&se->lock);
1719 
1720  /* Ugh, ugly locking */
1721  pthread_mutex_lock(&curr->lock);
1722  pthread_mutex_lock(&se->lock);
1723  curr->interrupted = 1;
1724  func = curr->u.ni.func;
1725  data = curr->u.ni.data;
1726  pthread_mutex_unlock(&se->lock);
1727  if (func)
1728  func(curr, data);
1729  pthread_mutex_unlock(&curr->lock);
1730 
1731  pthread_mutex_lock(&se->lock);
1732  curr->ctr--;
1733  if (!curr->ctr) {
1734  fuse_chan_put(req->ch);
1735  req->ch = NULL;
1736  destroy_req(curr);
1737  }
1738 
1739  return 1;
1740  }
1741  }
1742  for (curr = se->interrupts.next; curr != &se->interrupts;
1743  curr = curr->next) {
1744  if (curr->u.i.unique == req->u.i.unique)
1745  return 1;
1746  }
1747  return 0;
1748 }
1749 
1750 static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1751 {
1752  struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1753  struct fuse_session *se = req->se;
1754 
1755  (void) nodeid;
1756  if (se->debug)
1757  fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1758  (unsigned long long) arg->unique);
1759 
1760  req->u.i.unique = arg->unique;
1761 
1762  pthread_mutex_lock(&se->lock);
1763  if (find_interrupted(se, req)) {
1764  fuse_chan_put(req->ch);
1765  req->ch = NULL;
1766  destroy_req(req);
1767  } else
1768  list_add_req(req, &se->interrupts);
1769  pthread_mutex_unlock(&se->lock);
1770 }
1771 
1772 static struct fuse_req *check_interrupt(struct fuse_session *se,
1773  struct fuse_req *req)
1774 {
1775  struct fuse_req *curr;
1776 
1777  for (curr = se->interrupts.next; curr != &se->interrupts;
1778  curr = curr->next) {
1779  if (curr->u.i.unique == req->unique) {
1780  req->interrupted = 1;
1781  list_del_req(curr);
1782  fuse_chan_put(curr->ch);
1783  curr->ch = NULL;
1784  destroy_req(curr);
1785  return NULL;
1786  }
1787  }
1788  curr = se->interrupts.next;
1789  if (curr != &se->interrupts) {
1790  list_del_req(curr);
1791  list_init_req(curr);
1792  return curr;
1793  } else
1794  return NULL;
1795 }
1796 
1797 static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1798 {
1799  struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
1800 
1801  if (req->se->op.bmap)
1802  req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1803  else
1804  fuse_reply_err(req, ENOSYS);
1805 }
1806 
1807 static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1808 {
1809  struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
1810  unsigned int flags = arg->flags;
1811  void *in_buf = arg->in_size ? PARAM(arg) : NULL;
1812  struct fuse_file_info fi;
1813 
1814  if (flags & FUSE_IOCTL_DIR &&
1815  !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) {
1816  fuse_reply_err(req, ENOTTY);
1817  return;
1818  }
1819 
1820  memset(&fi, 0, sizeof(fi));
1821  fi.fh = arg->fh;
1822 
1823  if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
1824  !(flags & FUSE_IOCTL_32BIT)) {
1825  req->ioctl_64bit = 1;
1826  }
1827 
1828  if (req->se->op.ioctl)
1829  req->se->op.ioctl(req, nodeid, arg->cmd,
1830  (void *)(uintptr_t)arg->arg, &fi, flags,
1831  in_buf, arg->in_size, arg->out_size);
1832  else
1833  fuse_reply_err(req, ENOSYS);
1834 }
1835 
1836 void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1837 {
1838  free(ph);
1839 }
1840 
1841 static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1842 {
1843  struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
1844  struct fuse_file_info fi;
1845 
1846  memset(&fi, 0, sizeof(fi));
1847  fi.fh = arg->fh;
1848  fi.poll_events = arg->events;
1849 
1850  if (req->se->op.poll) {
1851  struct fuse_pollhandle *ph = NULL;
1852 
1853  if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1854  ph = malloc(sizeof(struct fuse_pollhandle));
1855  if (ph == NULL) {
1856  fuse_reply_err(req, ENOMEM);
1857  return;
1858  }
1859  ph->kh = arg->kh;
1860  ph->se = req->se;
1861  }
1862 
1863  req->se->op.poll(req, nodeid, &fi, ph);
1864  } else {
1865  fuse_reply_err(req, ENOSYS);
1866  }
1867 }
1868 
1869 static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1870 {
1871  struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
1872  struct fuse_file_info fi;
1873 
1874  memset(&fi, 0, sizeof(fi));
1875  fi.fh = arg->fh;
1876 
1877  if (req->se->op.fallocate)
1878  req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
1879  else
1880  fuse_reply_err(req, ENOSYS);
1881 }
1882 
1883 static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
1884 {
1885  struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
1886  struct fuse_file_info fi_in, fi_out;
1887 
1888  memset(&fi_in, 0, sizeof(fi_in));
1889  fi_in.fh = arg->fh_in;
1890 
1891  memset(&fi_out, 0, sizeof(fi_out));
1892  fi_out.fh = arg->fh_out;
1893 
1894 
1895  if (req->se->op.copy_file_range)
1896  req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
1897  &fi_in, arg->nodeid_out,
1898  arg->off_out, &fi_out, arg->len,
1899  arg->flags);
1900  else
1901  fuse_reply_err(req, ENOSYS);
1902 }
1903 
1904 static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1905 {
1906  struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
1907  struct fuse_file_info fi;
1908 
1909  memset(&fi, 0, sizeof(fi));
1910  fi.fh = arg->fh;
1911 
1912  if (req->se->op.lseek)
1913  req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
1914  else
1915  fuse_reply_err(req, ENOSYS);
1916 }
1917 
1918 /* Prevent bogus data races (bogus since "init" is called before
1919  * multi-threading becomes relevant */
1920 static __attribute__((no_sanitize("thread")))
1921 void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1922 {
1923  struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
1924  struct fuse_init_out outarg;
1925  struct fuse_session *se = req->se;
1926  size_t bufsize = se->bufsize;
1927  size_t outargsize = sizeof(outarg);
1928  uint64_t inargflags = 0;
1929  uint64_t outargflags = 0;
1930  (void) nodeid;
1931  if (se->debug) {
1932  fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
1933  if (arg->major == 7 && arg->minor >= 6) {
1934  fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
1935  fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
1936  arg->max_readahead);
1937  }
1938  }
1939  se->conn.proto_major = arg->major;
1940  se->conn.proto_minor = arg->minor;
1941  se->conn.capable = 0;
1942  se->conn.want = 0;
1943 
1944  memset(&outarg, 0, sizeof(outarg));
1945  outarg.major = FUSE_KERNEL_VERSION;
1946  outarg.minor = FUSE_KERNEL_MINOR_VERSION;
1947 
1948  if (arg->major < 7) {
1949  fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
1950  arg->major, arg->minor);
1951  fuse_reply_err(req, EPROTO);
1952  return;
1953  }
1954 
1955  if (arg->major > 7) {
1956  /* Wait for a second INIT request with a 7.X version */
1957  send_reply_ok(req, &outarg, sizeof(outarg));
1958  return;
1959  }
1960 
1961  if (arg->minor >= 6) {
1962  if (arg->max_readahead < se->conn.max_readahead)
1963  se->conn.max_readahead = arg->max_readahead;
1964  inargflags = arg->flags;
1965  if (inargflags & FUSE_INIT_EXT)
1966  inargflags = inargflags | (uint64_t) arg->flags2 << 32;
1967  if (inargflags & FUSE_ASYNC_READ)
1968  se->conn.capable |= FUSE_CAP_ASYNC_READ;
1969  if (inargflags & FUSE_POSIX_LOCKS)
1970  se->conn.capable |= FUSE_CAP_POSIX_LOCKS;
1971  if (inargflags & FUSE_ATOMIC_O_TRUNC)
1972  se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
1973  if (inargflags & FUSE_EXPORT_SUPPORT)
1974  se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
1975  if (inargflags & FUSE_DONT_MASK)
1976  se->conn.capable |= FUSE_CAP_DONT_MASK;
1977  if (inargflags & FUSE_FLOCK_LOCKS)
1978  se->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
1979  if (inargflags & FUSE_AUTO_INVAL_DATA)
1980  se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA;
1981  if (inargflags & FUSE_DO_READDIRPLUS)
1982  se->conn.capable |= FUSE_CAP_READDIRPLUS;
1983  if (inargflags & FUSE_READDIRPLUS_AUTO)
1984  se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO;
1985  if (inargflags & FUSE_ASYNC_DIO)
1986  se->conn.capable |= FUSE_CAP_ASYNC_DIO;
1987  if (inargflags & FUSE_WRITEBACK_CACHE)
1988  se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
1989  if (inargflags & FUSE_NO_OPEN_SUPPORT)
1990  se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT;
1991  if (inargflags & FUSE_PARALLEL_DIROPS)
1992  se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
1993  if (inargflags & FUSE_POSIX_ACL)
1994  se->conn.capable |= FUSE_CAP_POSIX_ACL;
1995  if (inargflags & FUSE_HANDLE_KILLPRIV)
1996  se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV;
1997  if (inargflags & FUSE_CACHE_SYMLINKS)
1998  se->conn.capable |= FUSE_CAP_CACHE_SYMLINKS;
1999  if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
2000  se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2001  if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2002  se->conn.capable |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2003  if (!(inargflags & FUSE_MAX_PAGES)) {
2004  size_t max_bufsize =
2005  FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2006  + FUSE_BUFFER_HEADER_SIZE;
2007  if (bufsize > max_bufsize) {
2008  bufsize = max_bufsize;
2009  }
2010  }
2011  if (arg->minor >= 38)
2012  se->conn.capable |= FUSE_CAP_EXPIRE_ONLY;
2013  } else {
2014  se->conn.max_readahead = 0;
2015  }
2016 
2017  if (se->conn.proto_minor >= 14) {
2018 #ifdef HAVE_SPLICE
2019 #ifdef HAVE_VMSPLICE
2020  if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2021  se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
2022  }
2023 #endif
2024  if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2025  se->conn.capable |= FUSE_CAP_SPLICE_READ;
2026  }
2027 #endif
2028  }
2029  if (se->conn.proto_minor >= 18)
2030  se->conn.capable |= FUSE_CAP_IOCTL_DIR;
2031 
2032  /* Default settings for modern filesystems.
2033  *
2034  * Most of these capabilities were disabled by default in
2035  * libfuse2 for backwards compatibility reasons. In libfuse3,
2036  * we can finally enable them by default (as long as they're
2037  * supported by the kernel).
2038  */
2039 #define LL_SET_DEFAULT(cond, cap) \
2040  if ((cond) && (se->conn.capable & (cap))) \
2041  se->conn.want |= (cap)
2042  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2043  LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS);
2044  LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2045  LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV);
2046  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2047  LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2048  LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2049  LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2050  LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2052  LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2053  LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2054  LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2056  se->conn.time_gran = 1;
2057 
2058  if (bufsize < FUSE_MIN_READ_BUFFER) {
2059  fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n",
2060  bufsize);
2061  bufsize = FUSE_MIN_READ_BUFFER;
2062  }
2063  se->bufsize = bufsize;
2064 
2065  if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE)
2066  se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE;
2067 
2068  se->got_init = 1;
2069  if (se->op.init)
2070  se->op.init(se->userdata, &se->conn);
2071 
2072  if (se->conn.want & (~se->conn.capable)) {
2073  fuse_log(FUSE_LOG_ERR, "fuse: error: filesystem requested capabilities "
2074  "0x%x that are not supported by kernel, aborting.\n",
2075  se->conn.want & (~se->conn.capable));
2076  fuse_reply_err(req, EPROTO);
2077  se->error = -EPROTO;
2078  fuse_session_exit(se);
2079  return;
2080  }
2081 
2082  unsigned max_read_mo = get_max_read(se->mo);
2083  if (se->conn.max_read != max_read_mo) {
2084  fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2085  "requested different maximum read size (%u vs %u)\n",
2086  se->conn.max_read, max_read_mo);
2087  fuse_reply_err(req, EPROTO);
2088  se->error = -EPROTO;
2089  fuse_session_exit(se);
2090  return;
2091  }
2092 
2093  if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) {
2094  se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2095  }
2096  if (arg->flags & FUSE_MAX_PAGES) {
2097  outarg.flags |= FUSE_MAX_PAGES;
2098  outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2099  }
2100  outargflags = outarg.flags;
2101  /* Always enable big writes, this is superseded
2102  by the max_write option */
2103  outargflags |= FUSE_BIG_WRITES;
2104 
2105  if (se->conn.want & FUSE_CAP_ASYNC_READ)
2106  outargflags |= FUSE_ASYNC_READ;
2107  if (se->conn.want & FUSE_CAP_POSIX_LOCKS)
2108  outargflags |= FUSE_POSIX_LOCKS;
2109  if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC)
2110  outargflags |= FUSE_ATOMIC_O_TRUNC;
2111  if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT)
2112  outargflags |= FUSE_EXPORT_SUPPORT;
2113  if (se->conn.want & FUSE_CAP_DONT_MASK)
2114  outargflags |= FUSE_DONT_MASK;
2115  if (se->conn.want & FUSE_CAP_FLOCK_LOCKS)
2116  outargflags |= FUSE_FLOCK_LOCKS;
2117  if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA)
2118  outargflags |= FUSE_AUTO_INVAL_DATA;
2119  if (se->conn.want & FUSE_CAP_READDIRPLUS)
2120  outargflags |= FUSE_DO_READDIRPLUS;
2121  if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO)
2122  outargflags |= FUSE_READDIRPLUS_AUTO;
2123  if (se->conn.want & FUSE_CAP_ASYNC_DIO)
2124  outargflags |= FUSE_ASYNC_DIO;
2125  if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE)
2126  outargflags |= FUSE_WRITEBACK_CACHE;
2127  if (se->conn.want & FUSE_CAP_POSIX_ACL)
2128  outargflags |= FUSE_POSIX_ACL;
2129  if (se->conn.want & FUSE_CAP_CACHE_SYMLINKS)
2130  outargflags |= FUSE_CACHE_SYMLINKS;
2131  if (se->conn.want & FUSE_CAP_EXPLICIT_INVAL_DATA)
2132  outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2133 
2134  if (inargflags & FUSE_INIT_EXT) {
2135  outargflags |= FUSE_INIT_EXT;
2136  outarg.flags2 = outargflags >> 32;
2137  }
2138 
2139  outarg.flags = outargflags;
2140 
2141  outarg.max_readahead = se->conn.max_readahead;
2142  outarg.max_write = se->conn.max_write;
2143  if (se->conn.proto_minor >= 13) {
2144  if (se->conn.max_background >= (1 << 16))
2145  se->conn.max_background = (1 << 16) - 1;
2146  if (se->conn.congestion_threshold > se->conn.max_background)
2147  se->conn.congestion_threshold = se->conn.max_background;
2148  if (!se->conn.congestion_threshold) {
2149  se->conn.congestion_threshold =
2150  se->conn.max_background * 3 / 4;
2151  }
2152 
2153  outarg.max_background = se->conn.max_background;
2154  outarg.congestion_threshold = se->conn.congestion_threshold;
2155  }
2156  if (se->conn.proto_minor >= 23)
2157  outarg.time_gran = se->conn.time_gran;
2158 
2159  if (se->debug) {
2160  fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2161  fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2162  fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2163  outarg.max_readahead);
2164  fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2165  fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2166  outarg.max_background);
2167  fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2168  outarg.congestion_threshold);
2169  fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2170  outarg.time_gran);
2171  }
2172  if (arg->minor < 5)
2173  outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2174  else if (arg->minor < 23)
2175  outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2176 
2177  send_reply_ok(req, &outarg, outargsize);
2178 }
2179 
2180 static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2181 {
2182  struct fuse_session *se = req->se;
2183 
2184  (void) nodeid;
2185  (void) inarg;
2186 
2187  se->got_destroy = 1;
2188  if (se->op.destroy)
2189  se->op.destroy(se->userdata);
2190 
2191  send_reply_ok(req, NULL, 0);
2192 }
2193 
2194 static void list_del_nreq(struct fuse_notify_req *nreq)
2195 {
2196  struct fuse_notify_req *prev = nreq->prev;
2197  struct fuse_notify_req *next = nreq->next;
2198  prev->next = next;
2199  next->prev = prev;
2200 }
2201 
2202 static void list_add_nreq(struct fuse_notify_req *nreq,
2203  struct fuse_notify_req *next)
2204 {
2205  struct fuse_notify_req *prev = next->prev;
2206  nreq->next = next;
2207  nreq->prev = prev;
2208  prev->next = nreq;
2209  next->prev = nreq;
2210 }
2211 
2212 static void list_init_nreq(struct fuse_notify_req *nreq)
2213 {
2214  nreq->next = nreq;
2215  nreq->prev = nreq;
2216 }
2217 
2218 static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2219  const void *inarg, const struct fuse_buf *buf)
2220 {
2221  struct fuse_session *se = req->se;
2222  struct fuse_notify_req *nreq;
2223  struct fuse_notify_req *head;
2224 
2225  pthread_mutex_lock(&se->lock);
2226  head = &se->notify_list;
2227  for (nreq = head->next; nreq != head; nreq = nreq->next) {
2228  if (nreq->unique == req->unique) {
2229  list_del_nreq(nreq);
2230  break;
2231  }
2232  }
2233  pthread_mutex_unlock(&se->lock);
2234 
2235  if (nreq != head)
2236  nreq->reply(nreq, req, nodeid, inarg, buf);
2237 }
2238 
2239 static int send_notify_iov(struct fuse_session *se, int notify_code,
2240  struct iovec *iov, int count)
2241 {
2242  struct fuse_out_header out;
2243 
2244  if (!se->got_init)
2245  return -ENOTCONN;
2246 
2247  out.unique = 0;
2248  out.error = notify_code;
2249  iov[0].iov_base = &out;
2250  iov[0].iov_len = sizeof(struct fuse_out_header);
2251 
2252  return fuse_send_msg(se, NULL, iov, count);
2253 }
2254 
2255 int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2256 {
2257  if (ph != NULL) {
2258  struct fuse_notify_poll_wakeup_out outarg;
2259  struct iovec iov[2];
2260 
2261  outarg.kh = ph->kh;
2262 
2263  iov[1].iov_base = &outarg;
2264  iov[1].iov_len = sizeof(outarg);
2265 
2266  return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2267  } else {
2268  return 0;
2269  }
2270 }
2271 
2272 int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2273  off_t off, off_t len)
2274 {
2275  struct fuse_notify_inval_inode_out outarg;
2276  struct iovec iov[2];
2277 
2278  if (!se)
2279  return -EINVAL;
2280 
2281  if (se->conn.proto_minor < 12)
2282  return -ENOSYS;
2283 
2284  outarg.ino = ino;
2285  outarg.off = off;
2286  outarg.len = len;
2287 
2288  iov[1].iov_base = &outarg;
2289  iov[1].iov_len = sizeof(outarg);
2290 
2291  return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2292 }
2293 
2294 int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
2295  const char *name, size_t namelen,
2296  enum fuse_expire_flags flags)
2297 {
2298  struct fuse_notify_inval_entry_out outarg;
2299  struct iovec iov[3];
2300 
2301  if (!se)
2302  return -EINVAL;
2303 
2304  if (se->conn.proto_minor < 12)
2305  return -ENOSYS;
2306 
2307  outarg.parent = parent;
2308  outarg.namelen = namelen;
2309  outarg.flags = 0;
2310  if (flags & FUSE_LL_EXPIRE_ONLY)
2311  outarg.flags |= FUSE_EXPIRE_ONLY;
2312 
2313  iov[1].iov_base = &outarg;
2314  iov[1].iov_len = sizeof(outarg);
2315  iov[2].iov_base = (void *)name;
2316  iov[2].iov_len = namelen + 1;
2317 
2318  return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2319 }
2320 
2321 int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2322  const char *name, size_t namelen)
2323 {
2324  return fuse_lowlevel_notify_expire_entry(se, parent, name, namelen, 0);
2325 }
2326 
2327 
2328 int fuse_lowlevel_notify_delete(struct fuse_session *se,
2329  fuse_ino_t parent, fuse_ino_t child,
2330  const char *name, size_t namelen)
2331 {
2332  struct fuse_notify_delete_out outarg;
2333  struct iovec iov[3];
2334 
2335  if (!se)
2336  return -EINVAL;
2337 
2338  if (se->conn.proto_minor < 18)
2339  return -ENOSYS;
2340 
2341  outarg.parent = parent;
2342  outarg.child = child;
2343  outarg.namelen = namelen;
2344  outarg.padding = 0;
2345 
2346  iov[1].iov_base = &outarg;
2347  iov[1].iov_len = sizeof(outarg);
2348  iov[2].iov_base = (void *)name;
2349  iov[2].iov_len = namelen + 1;
2350 
2351  return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2352 }
2353 
2354 int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2355  off_t offset, struct fuse_bufvec *bufv,
2356  enum fuse_buf_copy_flags flags)
2357 {
2358  struct fuse_out_header out;
2359  struct fuse_notify_store_out outarg;
2360  struct iovec iov[3];
2361  size_t size = fuse_buf_size(bufv);
2362  int res;
2363 
2364  if (!se)
2365  return -EINVAL;
2366 
2367  if (se->conn.proto_minor < 15)
2368  return -ENOSYS;
2369 
2370  out.unique = 0;
2371  out.error = FUSE_NOTIFY_STORE;
2372 
2373  outarg.nodeid = ino;
2374  outarg.offset = offset;
2375  outarg.size = size;
2376  outarg.padding = 0;
2377 
2378  iov[0].iov_base = &out;
2379  iov[0].iov_len = sizeof(out);
2380  iov[1].iov_base = &outarg;
2381  iov[1].iov_len = sizeof(outarg);
2382 
2383  res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2384  if (res > 0)
2385  res = -res;
2386 
2387  return res;
2388 }
2389 
2390 struct fuse_retrieve_req {
2391  struct fuse_notify_req nreq;
2392  void *cookie;
2393 };
2394 
2395 static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2396  fuse_req_t req, fuse_ino_t ino,
2397  const void *inarg,
2398  const struct fuse_buf *ibuf)
2399 {
2400  struct fuse_session *se = req->se;
2401  struct fuse_retrieve_req *rreq =
2402  container_of(nreq, struct fuse_retrieve_req, nreq);
2403  const struct fuse_notify_retrieve_in *arg = inarg;
2404  struct fuse_bufvec bufv = {
2405  .buf[0] = *ibuf,
2406  .count = 1,
2407  };
2408 
2409  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2410  bufv.buf[0].mem = PARAM(arg);
2411 
2412  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2413  sizeof(struct fuse_notify_retrieve_in);
2414 
2415  if (bufv.buf[0].size < arg->size) {
2416  fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2417  fuse_reply_none(req);
2418  goto out;
2419  }
2420  bufv.buf[0].size = arg->size;
2421 
2422  if (se->op.retrieve_reply) {
2423  se->op.retrieve_reply(req, rreq->cookie, ino,
2424  arg->offset, &bufv);
2425  } else {
2426  fuse_reply_none(req);
2427  }
2428 out:
2429  free(rreq);
2430  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2431  fuse_ll_clear_pipe(se);
2432 }
2433 
2434 int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2435  size_t size, off_t offset, void *cookie)
2436 {
2437  struct fuse_notify_retrieve_out outarg;
2438  struct iovec iov[2];
2439  struct fuse_retrieve_req *rreq;
2440  int err;
2441 
2442  if (!se)
2443  return -EINVAL;
2444 
2445  if (se->conn.proto_minor < 15)
2446  return -ENOSYS;
2447 
2448  rreq = malloc(sizeof(*rreq));
2449  if (rreq == NULL)
2450  return -ENOMEM;
2451 
2452  pthread_mutex_lock(&se->lock);
2453  rreq->cookie = cookie;
2454  rreq->nreq.unique = se->notify_ctr++;
2455  rreq->nreq.reply = fuse_ll_retrieve_reply;
2456  list_add_nreq(&rreq->nreq, &se->notify_list);
2457  pthread_mutex_unlock(&se->lock);
2458 
2459  outarg.notify_unique = rreq->nreq.unique;
2460  outarg.nodeid = ino;
2461  outarg.offset = offset;
2462  outarg.size = size;
2463  outarg.padding = 0;
2464 
2465  iov[1].iov_base = &outarg;
2466  iov[1].iov_len = sizeof(outarg);
2467 
2468  err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2469  if (err) {
2470  pthread_mutex_lock(&se->lock);
2471  list_del_nreq(&rreq->nreq);
2472  pthread_mutex_unlock(&se->lock);
2473  free(rreq);
2474  }
2475 
2476  return err;
2477 }
2478 
2480 {
2481  return req->se->userdata;
2482 }
2483 
2485 {
2486  return &req->ctx;
2487 }
2488 
2490  void *data)
2491 {
2492  pthread_mutex_lock(&req->lock);
2493  pthread_mutex_lock(&req->se->lock);
2494  req->u.ni.func = func;
2495  req->u.ni.data = data;
2496  pthread_mutex_unlock(&req->se->lock);
2497  if (req->interrupted && func)
2498  func(req, data);
2499  pthread_mutex_unlock(&req->lock);
2500 }
2501 
2503 {
2504  int interrupted;
2505 
2506  pthread_mutex_lock(&req->se->lock);
2507  interrupted = req->interrupted;
2508  pthread_mutex_unlock(&req->se->lock);
2509 
2510  return interrupted;
2511 }
2512 
2513 static struct {
2514  void (*func)(fuse_req_t, fuse_ino_t, const void *);
2515  const char *name;
2516 } fuse_ll_ops[] = {
2517  [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2518  [FUSE_FORGET] = { do_forget, "FORGET" },
2519  [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2520  [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2521  [FUSE_READLINK] = { do_readlink, "READLINK" },
2522  [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2523  [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2524  [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2525  [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2526  [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2527  [FUSE_RENAME] = { do_rename, "RENAME" },
2528  [FUSE_LINK] = { do_link, "LINK" },
2529  [FUSE_OPEN] = { do_open, "OPEN" },
2530  [FUSE_READ] = { do_read, "READ" },
2531  [FUSE_WRITE] = { do_write, "WRITE" },
2532  [FUSE_STATFS] = { do_statfs, "STATFS" },
2533  [FUSE_RELEASE] = { do_release, "RELEASE" },
2534  [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2535  [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2536  [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2537  [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2538  [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2539  [FUSE_FLUSH] = { do_flush, "FLUSH" },
2540  [FUSE_INIT] = { do_init, "INIT" },
2541  [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2542  [FUSE_READDIR] = { do_readdir, "READDIR" },
2543  [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2544  [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2545  [FUSE_GETLK] = { do_getlk, "GETLK" },
2546  [FUSE_SETLK] = { do_setlk, "SETLK" },
2547  [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2548  [FUSE_ACCESS] = { do_access, "ACCESS" },
2549  [FUSE_CREATE] = { do_create, "CREATE" },
2550  [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2551  [FUSE_BMAP] = { do_bmap, "BMAP" },
2552  [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2553  [FUSE_POLL] = { do_poll, "POLL" },
2554  [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2555  [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2556  [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2557  [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2558  [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2559  [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2560  [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2561  [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2562  [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2563 };
2564 
2565 #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2566 
2567 static const char *opname(enum fuse_opcode opcode)
2568 {
2569  if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2570  return "???";
2571  else
2572  return fuse_ll_ops[opcode].name;
2573 }
2574 
2575 static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2576  struct fuse_bufvec *src)
2577 {
2578  ssize_t res = fuse_buf_copy(dst, src, 0);
2579  if (res < 0) {
2580  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2581  return res;
2582  }
2583  if ((size_t)res < fuse_buf_size(dst)) {
2584  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2585  return -1;
2586  }
2587  return 0;
2588 }
2589 
2590 void fuse_session_process_buf(struct fuse_session *se,
2591  const struct fuse_buf *buf)
2592 {
2593  fuse_session_process_buf_int(se, buf, NULL);
2594 }
2595 
2596 void fuse_session_process_buf_int(struct fuse_session *se,
2597  const struct fuse_buf *buf, struct fuse_chan *ch)
2598 {
2599  const size_t write_header_size = sizeof(struct fuse_in_header) +
2600  sizeof(struct fuse_write_in);
2601  struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2602  struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2603  struct fuse_in_header *in;
2604  const void *inarg;
2605  struct fuse_req *req;
2606  void *mbuf = NULL;
2607  int err;
2608  int res;
2609 
2610  if (buf->flags & FUSE_BUF_IS_FD) {
2611  if (buf->size < tmpbuf.buf[0].size)
2612  tmpbuf.buf[0].size = buf->size;
2613 
2614  mbuf = malloc(tmpbuf.buf[0].size);
2615  if (mbuf == NULL) {
2616  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2617  goto clear_pipe;
2618  }
2619  tmpbuf.buf[0].mem = mbuf;
2620 
2621  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2622  if (res < 0)
2623  goto clear_pipe;
2624 
2625  in = mbuf;
2626  } else {
2627  in = buf->mem;
2628  }
2629 
2630  if (se->debug) {
2631  fuse_log(FUSE_LOG_DEBUG,
2632  "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2633  (unsigned long long) in->unique,
2634  opname((enum fuse_opcode) in->opcode), in->opcode,
2635  (unsigned long long) in->nodeid, buf->size, in->pid);
2636  }
2637 
2638  req = fuse_ll_alloc_req(se);
2639  if (req == NULL) {
2640  struct fuse_out_header out = {
2641  .unique = in->unique,
2642  .error = -ENOMEM,
2643  };
2644  struct iovec iov = {
2645  .iov_base = &out,
2646  .iov_len = sizeof(struct fuse_out_header),
2647  };
2648 
2649  fuse_send_msg(se, ch, &iov, 1);
2650  goto clear_pipe;
2651  }
2652 
2653  req->unique = in->unique;
2654  req->ctx.uid = in->uid;
2655  req->ctx.gid = in->gid;
2656  req->ctx.pid = in->pid;
2657  req->ch = ch ? fuse_chan_get(ch) : NULL;
2658 
2659  err = EIO;
2660  if (!se->got_init) {
2661  enum fuse_opcode expected;
2662 
2663  expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2664  if (in->opcode != expected)
2665  goto reply_err;
2666  } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2667  goto reply_err;
2668 
2669  err = EACCES;
2670  /* Implement -o allow_root */
2671  if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2672  in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2673  in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2674  in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2675  in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2676  in->opcode != FUSE_NOTIFY_REPLY &&
2677  in->opcode != FUSE_READDIRPLUS)
2678  goto reply_err;
2679 
2680  err = ENOSYS;
2681  if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2682  goto reply_err;
2683  if (in->opcode != FUSE_INTERRUPT) {
2684  struct fuse_req *intr;
2685  pthread_mutex_lock(&se->lock);
2686  intr = check_interrupt(se, req);
2687  list_add_req(req, &se->list);
2688  pthread_mutex_unlock(&se->lock);
2689  if (intr)
2690  fuse_reply_err(intr, EAGAIN);
2691  }
2692 
2693  if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
2694  (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
2695  in->opcode != FUSE_NOTIFY_REPLY) {
2696  void *newmbuf;
2697 
2698  err = ENOMEM;
2699  newmbuf = realloc(mbuf, buf->size);
2700  if (newmbuf == NULL)
2701  goto reply_err;
2702  mbuf = newmbuf;
2703 
2704  tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
2705  tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
2706 
2707  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2708  err = -res;
2709  if (res < 0)
2710  goto reply_err;
2711 
2712  in = mbuf;
2713  }
2714 
2715  inarg = (void *) &in[1];
2716  if (in->opcode == FUSE_WRITE && se->op.write_buf)
2717  do_write_buf(req, in->nodeid, inarg, buf);
2718  else if (in->opcode == FUSE_NOTIFY_REPLY)
2719  do_notify_reply(req, in->nodeid, inarg, buf);
2720  else
2721  fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
2722 
2723 out_free:
2724  free(mbuf);
2725  return;
2726 
2727 reply_err:
2728  fuse_reply_err(req, err);
2729 clear_pipe:
2730  if (buf->flags & FUSE_BUF_IS_FD)
2731  fuse_ll_clear_pipe(se);
2732  goto out_free;
2733 }
2734 
2735 #define LL_OPTION(n,o,v) \
2736  { n, offsetof(struct fuse_session, o), v }
2737 
2738 static const struct fuse_opt fuse_ll_opts[] = {
2739  LL_OPTION("debug", debug, 1),
2740  LL_OPTION("-d", debug, 1),
2741  LL_OPTION("--debug", debug, 1),
2742  LL_OPTION("allow_root", deny_others, 1),
2743  FUSE_OPT_END
2744 };
2745 
2747 {
2748  printf("using FUSE kernel interface version %i.%i\n",
2749  FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2750  fuse_mount_version();
2751 }
2752 
2754 {
2755  /* These are not all options, but the ones that are
2756  potentially of interest to an end-user */
2757  printf(
2758 " -o allow_other allow access by all users\n"
2759 " -o allow_root allow access by root\n"
2760 " -o auto_unmount auto unmount on process termination\n");
2761 }
2762 
2763 void fuse_session_destroy(struct fuse_session *se)
2764 {
2765  struct fuse_ll_pipe *llp;
2766 
2767  if (se->got_init && !se->got_destroy) {
2768  if (se->op.destroy)
2769  se->op.destroy(se->userdata);
2770  }
2771  llp = pthread_getspecific(se->pipe_key);
2772  if (llp != NULL)
2773  fuse_ll_pipe_free(llp);
2774  pthread_key_delete(se->pipe_key);
2775  pthread_mutex_destroy(&se->lock);
2776  free(se->cuse_data);
2777  if (se->fd != -1)
2778  close(se->fd);
2779  if (se->io != NULL)
2780  free(se->io);
2781  destroy_mount_opts(se->mo);
2782  free(se);
2783 }
2784 
2785 
2786 static void fuse_ll_pipe_destructor(void *data)
2787 {
2788  struct fuse_ll_pipe *llp = data;
2789  fuse_ll_pipe_free(llp);
2790 }
2791 
2792 int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
2793 {
2794  return fuse_session_receive_buf_int(se, buf, NULL);
2795 }
2796 
2797 int fuse_session_receive_buf_int(struct fuse_session *se, struct fuse_buf *buf,
2798  struct fuse_chan *ch)
2799 {
2800  int err;
2801  ssize_t res;
2802 #ifdef HAVE_SPLICE
2803  size_t bufsize = se->bufsize;
2804  struct fuse_ll_pipe *llp;
2805  struct fuse_buf tmpbuf;
2806 
2807  if (se->conn.proto_minor < 14 || !(se->conn.want & FUSE_CAP_SPLICE_READ))
2808  goto fallback;
2809 
2810  llp = fuse_ll_get_pipe(se);
2811  if (llp == NULL)
2812  goto fallback;
2813 
2814  if (llp->size < bufsize) {
2815  if (llp->can_grow) {
2816  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
2817  if (res == -1) {
2818  llp->can_grow = 0;
2819  res = grow_pipe_to_max(llp->pipe[0]);
2820  if (res > 0)
2821  llp->size = res;
2822  goto fallback;
2823  }
2824  llp->size = res;
2825  }
2826  if (llp->size < bufsize)
2827  goto fallback;
2828  }
2829 
2830  if (se->io != NULL && se->io->splice_receive != NULL) {
2831  res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
2832  llp->pipe[1], NULL, bufsize, 0,
2833  se->userdata);
2834  } else {
2835  res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
2836  bufsize, 0);
2837  }
2838  err = errno;
2839 
2840  if (fuse_session_exited(se))
2841  return 0;
2842 
2843  if (res == -1) {
2844  if (err == ENODEV) {
2845  /* Filesystem was unmounted, or connection was aborted
2846  via /sys/fs/fuse/connections */
2847  fuse_session_exit(se);
2848  return 0;
2849  }
2850  if (err != EINTR && err != EAGAIN)
2851  perror("fuse: splice from device");
2852  return -err;
2853  }
2854 
2855  if (res < sizeof(struct fuse_in_header)) {
2856  fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
2857  return -EIO;
2858  }
2859 
2860  tmpbuf = (struct fuse_buf) {
2861  .size = res,
2862  .flags = FUSE_BUF_IS_FD,
2863  .fd = llp->pipe[0],
2864  };
2865 
2866  /*
2867  * Don't bother with zero copy for small requests.
2868  * fuse_loop_mt() needs to check for FORGET so this more than
2869  * just an optimization.
2870  */
2871  if (res < sizeof(struct fuse_in_header) +
2872  sizeof(struct fuse_write_in) + pagesize) {
2873  struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
2874  struct fuse_bufvec dst = { .count = 1 };
2875 
2876  if (!buf->mem) {
2877  buf->mem = malloc(se->bufsize);
2878  if (!buf->mem) {
2879  fuse_log(FUSE_LOG_ERR,
2880  "fuse: failed to allocate read buffer\n");
2881  return -ENOMEM;
2882  }
2883  }
2884  buf->size = se->bufsize;
2885  buf->flags = 0;
2886  dst.buf[0] = *buf;
2887 
2888  res = fuse_buf_copy(&dst, &src, 0);
2889  if (res < 0) {
2890  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
2891  strerror(-res));
2892  fuse_ll_clear_pipe(se);
2893  return res;
2894  }
2895  if (res < tmpbuf.size) {
2896  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2897  fuse_ll_clear_pipe(se);
2898  return -EIO;
2899  }
2900  assert(res == tmpbuf.size);
2901 
2902  } else {
2903  /* Don't overwrite buf->mem, as that would cause a leak */
2904  buf->fd = tmpbuf.fd;
2905  buf->flags = tmpbuf.flags;
2906  }
2907  buf->size = tmpbuf.size;
2908 
2909  return res;
2910 
2911 fallback:
2912 #endif
2913  if (!buf->mem) {
2914  buf->mem = malloc(se->bufsize);
2915  if (!buf->mem) {
2916  fuse_log(FUSE_LOG_ERR,
2917  "fuse: failed to allocate read buffer\n");
2918  return -ENOMEM;
2919  }
2920  }
2921 
2922 restart:
2923  if (se->io != NULL) {
2924  /* se->io->read is never NULL if se->io is not NULL as
2925  specified by fuse_session_custom_io()*/
2926  res = se->io->read(ch ? ch->fd : se->fd, buf->mem, se->bufsize,
2927  se->userdata);
2928  } else {
2929  res = read(ch ? ch->fd : se->fd, buf->mem, se->bufsize);
2930  }
2931  err = errno;
2932 
2933  if (fuse_session_exited(se))
2934  return 0;
2935  if (res == -1) {
2936  /* ENOENT means the operation was interrupted, it's safe
2937  to restart */
2938  if (err == ENOENT)
2939  goto restart;
2940 
2941  if (err == ENODEV) {
2942  /* Filesystem was unmounted, or connection was aborted
2943  via /sys/fs/fuse/connections */
2944  fuse_session_exit(se);
2945  return 0;
2946  }
2947  /* Errors occurring during normal operation: EINTR (read
2948  interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
2949  umounted) */
2950  if (err != EINTR && err != EAGAIN)
2951  perror("fuse: reading device");
2952  return -err;
2953  }
2954  if ((size_t) res < sizeof(struct fuse_in_header)) {
2955  fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
2956  return -EIO;
2957  }
2958 
2959  buf->size = res;
2960 
2961  return res;
2962 }
2963 
2964 struct fuse_session *fuse_session_new(struct fuse_args *args,
2965  const struct fuse_lowlevel_ops *op,
2966  size_t op_size, void *userdata)
2967 {
2968  int err;
2969  struct fuse_session *se;
2970  struct mount_opts *mo;
2971 
2972  if (sizeof(struct fuse_lowlevel_ops) < op_size) {
2973  fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
2974  op_size = sizeof(struct fuse_lowlevel_ops);
2975  }
2976 
2977  if (args->argc == 0) {
2978  fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
2979  return NULL;
2980  }
2981 
2982  se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
2983  if (se == NULL) {
2984  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
2985  goto out1;
2986  }
2987  se->fd = -1;
2988  se->conn.max_write = UINT_MAX;
2989  se->conn.max_readahead = UINT_MAX;
2990 
2991  /* Parse options */
2992  if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
2993  goto out2;
2994  if(se->deny_others) {
2995  /* Allowing access only by root is done by instructing
2996  * kernel to allow access by everyone, and then restricting
2997  * access to root and mountpoint owner in libfuse.
2998  */
2999  // We may be adding the option a second time, but
3000  // that doesn't hurt.
3001  if(fuse_opt_add_arg(args, "-oallow_other") == -1)
3002  goto out2;
3003  }
3004  mo = parse_mount_opts(args);
3005  if (mo == NULL)
3006  goto out3;
3007 
3008  if(args->argc == 1 &&
3009  args->argv[0][0] == '-') {
3010  fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
3011  "will be ignored\n");
3012  } else if (args->argc != 1) {
3013  int i;
3014  fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
3015  for(i = 1; i < args->argc-1; i++)
3016  fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
3017  fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
3018  goto out4;
3019  }
3020 
3021  if (se->debug)
3022  fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
3023 
3024  se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() +
3025  FUSE_BUFFER_HEADER_SIZE;
3026 
3027  list_init_req(&se->list);
3028  list_init_req(&se->interrupts);
3029  list_init_nreq(&se->notify_list);
3030  se->notify_ctr = 1;
3031  pthread_mutex_init(&se->lock, NULL);
3032 
3033  err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
3034  if (err) {
3035  fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
3036  strerror(err));
3037  goto out5;
3038  }
3039 
3040  memcpy(&se->op, op, op_size);
3041  se->owner = getuid();
3042  se->userdata = userdata;
3043 
3044  se->mo = mo;
3045  return se;
3046 
3047 out5:
3048  pthread_mutex_destroy(&se->lock);
3049 out4:
3050  fuse_opt_free_args(args);
3051 out3:
3052  if (mo != NULL)
3053  destroy_mount_opts(mo);
3054 out2:
3055  free(se);
3056 out1:
3057  return NULL;
3058 }
3059 
3060 int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io,
3061  int fd)
3062 {
3063  if (fd < 0) {
3064  fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
3065  "fuse_session_custom_io()\n", fd);
3066  return -EBADF;
3067  }
3068  if (io == NULL) {
3069  fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
3070  "fuse_session_custom_io()\n");
3071  return -EINVAL;
3072  } else if (io->read == NULL || io->writev == NULL) {
3073  /* If the user provides their own file descriptor, we can't
3074  guarantee that the default behavior of the io operations made
3075  in libfuse will function properly. Therefore, we enforce the
3076  user to implement these io operations when using custom io. */
3077  fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
3078  "implement both io->read() and io->writev\n");
3079  return -EINVAL;
3080  }
3081 
3082  se->io = malloc(sizeof(struct fuse_custom_io));
3083  if (se->io == NULL) {
3084  fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
3085  "Error: %s\n", strerror(errno));
3086  return -errno;
3087  }
3088 
3089  se->fd = fd;
3090  *se->io = *io;
3091  return 0;
3092 }
3093 
3094 int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
3095 {
3096  int fd;
3097 
3098  /*
3099  * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
3100  * would ensue.
3101  */
3102  do {
3103  fd = open("/dev/null", O_RDWR);
3104  if (fd > 2)
3105  close(fd);
3106  } while (fd >= 0 && fd <= 2);
3107 
3108  /*
3109  * To allow FUSE daemons to run without privileges, the caller may open
3110  * /dev/fuse before launching the file system and pass on the file
3111  * descriptor by specifying /dev/fd/N as the mount point. Note that the
3112  * parent process takes care of performing the mount in this case.
3113  */
3114  fd = fuse_mnt_parse_fuse_fd(mountpoint);
3115  if (fd != -1) {
3116  if (fcntl(fd, F_GETFD) == -1) {
3117  fuse_log(FUSE_LOG_ERR,
3118  "fuse: Invalid file descriptor /dev/fd/%u\n",
3119  fd);
3120  return -1;
3121  }
3122  se->fd = fd;
3123  return 0;
3124  }
3125 
3126  /* Open channel */
3127  fd = fuse_kern_mount(mountpoint, se->mo);
3128  if (fd == -1)
3129  return -1;
3130  se->fd = fd;
3131 
3132  /* Save mountpoint */
3133  se->mountpoint = strdup(mountpoint);
3134  if (se->mountpoint == NULL)
3135  goto error_out;
3136 
3137  return 0;
3138 
3139 error_out:
3140  fuse_kern_unmount(mountpoint, fd);
3141  return -1;
3142 }
3143 
3144 int fuse_session_fd(struct fuse_session *se)
3145 {
3146  return se->fd;
3147 }
3148 
3149 void fuse_session_unmount(struct fuse_session *se)
3150 {
3151  if (se->mountpoint != NULL) {
3152  fuse_kern_unmount(se->mountpoint, se->fd);
3153  se->fd = -1;
3154  free(se->mountpoint);
3155  se->mountpoint = NULL;
3156  }
3157 }
3158 
3159 #ifdef linux
3160 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3161 {
3162  char *buf;
3163  size_t bufsize = 1024;
3164  char path[128];
3165  int ret;
3166  int fd;
3167  unsigned long pid = req->ctx.pid;
3168  char *s;
3169 
3170  sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3171 
3172 retry:
3173  buf = malloc(bufsize);
3174  if (buf == NULL)
3175  return -ENOMEM;
3176 
3177  ret = -EIO;
3178  fd = open(path, O_RDONLY);
3179  if (fd == -1)
3180  goto out_free;
3181 
3182  ret = read(fd, buf, bufsize);
3183  close(fd);
3184  if (ret < 0) {
3185  ret = -EIO;
3186  goto out_free;
3187  }
3188 
3189  if ((size_t)ret == bufsize) {
3190  free(buf);
3191  bufsize *= 4;
3192  goto retry;
3193  }
3194 
3195  ret = -EIO;
3196  s = strstr(buf, "\nGroups:");
3197  if (s == NULL)
3198  goto out_free;
3199 
3200  s += 8;
3201  ret = 0;
3202  while (1) {
3203  char *end;
3204  unsigned long val = strtoul(s, &end, 0);
3205  if (end == s)
3206  break;
3207 
3208  s = end;
3209  if (ret < size)
3210  list[ret] = val;
3211  ret++;
3212  }
3213 
3214 out_free:
3215  free(buf);
3216  return ret;
3217 }
3218 #else /* linux */
3219 /*
3220  * This is currently not implemented on other than Linux...
3221  */
3222 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3223 {
3224  (void) req; (void) size; (void) list;
3225  return -ENOSYS;
3226 }
3227 #endif
3228 
3229 /* Prevent spurious data race warning - we don't care
3230  * about races for this flag */
3231 __attribute__((no_sanitize_thread))
3232 void fuse_session_exit(struct fuse_session *se)
3233 {
3234  se->exited = 1;
3235 }
3236 
3237 __attribute__((no_sanitize_thread))
3238 void fuse_session_reset(struct fuse_session *se)
3239 {
3240  se->exited = 0;
3241  se->error = 0;
3242 }
3243 
3244 __attribute__((no_sanitize_thread))
3245 int fuse_session_exited(struct fuse_session *se)
3246 {
3247  return se->exited;
3248 }
#define FUSE_CAP_IOCTL_DIR
Definition: fuse_common.h:241
#define FUSE_CAP_DONT_MASK
Definition: fuse_common.h:196
#define FUSE_CAP_HANDLE_KILLPRIV
Definition: fuse_common.h:370
#define FUSE_CAP_AUTO_INVAL_DATA
Definition: fuse_common.h:263
#define FUSE_CAP_SPLICE_READ
Definition: fuse_common.h:221
#define FUSE_CAP_PARALLEL_DIROPS
Definition: fuse_common.h:342
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition: buffer.c:22
#define FUSE_CAP_WRITEBACK_CACHE
Definition: fuse_common.h:319
#define FUSE_CAP_EXPIRE_ONLY
Definition: fuse_common.h:434
#define FUSE_CAP_ATOMIC_O_TRUNC
Definition: fuse_common.h:181
#define FUSE_CAP_ASYNC_READ
Definition: fuse_common.h:164
#define FUSE_CAP_SPLICE_WRITE
Definition: fuse_common.h:204
#define FUSE_CAP_CACHE_SYMLINKS
Definition: fuse_common.h:383
#define FUSE_CAP_POSIX_ACL
Definition: fuse_common.h:361
@ FUSE_BUF_IS_FD
Definition: fuse_common.h:672
#define FUSE_CAP_EXPORT_SUPPORT
Definition: fuse_common.h:188
#define FUSE_CAP_POSIX_LOCKS
Definition: fuse_common.h:172
#define FUSE_CAP_EXPLICIT_INVAL_DATA
Definition: fuse_common.h:418
#define FUSE_CAP_READDIRPLUS_AUTO
Definition: fuse_common.h:299
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition: buffer.c:284
#define FUSE_CAP_NO_OPENDIR_SUPPORT
Definition: fuse_common.h:395
#define FUSE_CAP_ASYNC_DIO
Definition: fuse_common.h:310
#define FUSE_CAP_NO_OPEN_SUPPORT
Definition: fuse_common.h:332
#define FUSE_CAP_READDIRPLUS
Definition: fuse_common.h:271
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
fuse_buf_copy_flags
Definition: fuse_common.h:696
@ FUSE_BUF_SPLICE_NONBLOCK
Definition: fuse_common.h:732
@ FUSE_BUF_FORCE_SPLICE
Definition: fuse_common.h:714
@ FUSE_BUF_NO_SPLICE
Definition: fuse_common.h:706
@ FUSE_BUF_SPLICE_MOVE
Definition: fuse_common.h:723
#define FUSE_CAP_SPLICE_MOVE
Definition: fuse_common.h:212
#define FUSE_CAP_FLOCK_LOCKS
Definition: fuse_common.h:234
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition: fuse_log.c:33
void fuse_session_destroy(struct fuse_session *se)
fuse_expire_flags
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
Definition: fuse_lowlevel.h:49
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
int fuse_session_exited(struct fuse_session *se)
int fuse_session_fd(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_session_unmount(struct fuse_session *se)
void fuse_reply_none(fuse_req_t req)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
struct fuse_session * fuse_session_new(struct fuse_args *args, const struct fuse_lowlevel_ops *op, size_t op_size, void *userdata)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
void * fuse_req_userdata(fuse_req_t req)
int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen, enum fuse_expire_flags flags)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
void fuse_session_reset(struct fuse_session *se)
int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io, int fd)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
void fuse_lowlevel_version(void)
uint64_t fuse_ino_t
Definition: fuse_lowlevel.h:46
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition: fuse_opt.c:55
void fuse_opt_free_args(struct fuse_args *args)
Definition: fuse_opt.c:34
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition: fuse_opt.c:398
#define FUSE_OPT_END
Definition: fuse_opt.h:104
int argc
Definition: fuse_opt.h:111
char ** argv
Definition: fuse_opt.h:114
enum fuse_buf_flags flags
Definition: fuse_common.h:750
void * mem
Definition: fuse_common.h:757
size_t size
Definition: fuse_common.h:745
size_t off
Definition: fuse_common.h:796
struct fuse_buf buf[1]
Definition: fuse_common.h:801
size_t idx
Definition: fuse_common.h:791
size_t count
Definition: fuse_common.h:786
Definition: fuse_lowlevel.h:59
double entry_timeout
fuse_ino_t ino
Definition: fuse_lowlevel.h:67
uint64_t generation
Definition: fuse_lowlevel.h:79
double attr_timeout
Definition: fuse_lowlevel.h:94
struct stat attr
Definition: fuse_lowlevel.h:88
unsigned int direct_io
Definition: fuse_common.h:63
unsigned int keep_cache
Definition: fuse_common.h:69
unsigned int nonseekable
Definition: fuse_common.h:82
uint64_t lock_owner
Definition: fuse_common.h:109
uint32_t poll_events
Definition: fuse_common.h:113
unsigned int noflush
Definition: fuse_common.h:97
unsigned int writepage
Definition: fuse_common.h:60
unsigned int flush
Definition: fuse_common.h:78
unsigned int parallel_direct_writes
Definition: fuse_common.h:73
unsigned int cache_readdir
Definition: fuse_common.h:93