libfuse
fuse_lowlevel.c
1 /*
2  FUSE: Filesystem in Userspace
3  Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4 
5  Implementation of (most of) the low-level FUSE API. The session loop
6  functions are implemented in separate files.
7 
8  This program can be distributed under the terms of the GNU LGPLv2.
9  See the file COPYING.LIB
10 */
11 
12 #define _GNU_SOURCE
13 
14 #include "fuse_config.h"
15 #include "fuse_i.h"
16 #include "fuse_kernel.h"
17 #include "fuse_opt.h"
18 #include "fuse_misc.h"
19 #include "mount_util.h"
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <stddef.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <errno.h>
28 #include <assert.h>
29 #include <sys/file.h>
30 
31 #ifndef F_LINUX_SPECIFIC_BASE
32 #define F_LINUX_SPECIFIC_BASE 1024
33 #endif
34 #ifndef F_SETPIPE_SZ
35 #define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
36 #endif
37 
38 
39 #define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
40 #define OFFSET_MAX 0x7fffffffffffffffLL
41 
42 #define container_of(ptr, type, member) ({ \
43  const typeof( ((type *)0)->member ) *__mptr = (ptr); \
44  (type *)( (char *)__mptr - offsetof(type,member) );})
45 
46 struct fuse_pollhandle {
47  uint64_t kh;
48  struct fuse_session *se;
49 };
50 
51 static size_t pagesize;
52 
53 static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
54 {
55  pagesize = getpagesize();
56 }
57 
58 static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
59 {
60  attr->ino = stbuf->st_ino;
61  attr->mode = stbuf->st_mode;
62  attr->nlink = stbuf->st_nlink;
63  attr->uid = stbuf->st_uid;
64  attr->gid = stbuf->st_gid;
65  attr->rdev = stbuf->st_rdev;
66  attr->size = stbuf->st_size;
67  attr->blksize = stbuf->st_blksize;
68  attr->blocks = stbuf->st_blocks;
69  attr->atime = stbuf->st_atime;
70  attr->mtime = stbuf->st_mtime;
71  attr->ctime = stbuf->st_ctime;
72  attr->atimensec = ST_ATIM_NSEC(stbuf);
73  attr->mtimensec = ST_MTIM_NSEC(stbuf);
74  attr->ctimensec = ST_CTIM_NSEC(stbuf);
75 }
76 
77 static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
78 {
79  stbuf->st_mode = attr->mode;
80  stbuf->st_uid = attr->uid;
81  stbuf->st_gid = attr->gid;
82  stbuf->st_size = attr->size;
83  stbuf->st_atime = attr->atime;
84  stbuf->st_mtime = attr->mtime;
85  stbuf->st_ctime = attr->ctime;
86  ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
87  ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
88  ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
89 }
90 
91 static size_t iov_length(const struct iovec *iov, size_t count)
92 {
93  size_t seg;
94  size_t ret = 0;
95 
96  for (seg = 0; seg < count; seg++)
97  ret += iov[seg].iov_len;
98  return ret;
99 }
100 
101 static void list_init_req(struct fuse_req *req)
102 {
103  req->next = req;
104  req->prev = req;
105 }
106 
107 static void list_del_req(struct fuse_req *req)
108 {
109  struct fuse_req *prev = req->prev;
110  struct fuse_req *next = req->next;
111  prev->next = next;
112  next->prev = prev;
113 }
114 
115 static void list_add_req(struct fuse_req *req, struct fuse_req *next)
116 {
117  struct fuse_req *prev = next->prev;
118  req->next = next;
119  req->prev = prev;
120  prev->next = req;
121  next->prev = req;
122 }
123 
124 static void destroy_req(fuse_req_t req)
125 {
126  assert(req->ch == NULL);
127  pthread_mutex_destroy(&req->lock);
128  free(req);
129 }
130 
131 void fuse_free_req(fuse_req_t req)
132 {
133  int ctr;
134  struct fuse_session *se = req->se;
135 
136  pthread_mutex_lock(&se->lock);
137  req->u.ni.func = NULL;
138  req->u.ni.data = NULL;
139  list_del_req(req);
140  ctr = --req->ctr;
141  fuse_chan_put(req->ch);
142  req->ch = NULL;
143  pthread_mutex_unlock(&se->lock);
144  if (!ctr)
145  destroy_req(req);
146 }
147 
148 static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
149 {
150  struct fuse_req *req;
151 
152  req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
153  if (req == NULL) {
154  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
155  } else {
156  req->se = se;
157  req->ctr = 1;
158  list_init_req(req);
159  pthread_mutex_init(&req->lock, NULL);
160  }
161 
162  return req;
163 }
164 
165 /* Send data. If *ch* is NULL, send via session master fd */
166 static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
167  struct iovec *iov, int count)
168 {
169  struct fuse_out_header *out = iov[0].iov_base;
170 
171  assert(se != NULL);
172  out->len = iov_length(iov, count);
173  if (se->debug) {
174  if (out->unique == 0) {
175  fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
176  out->error, out->len);
177  } else if (out->error) {
178  fuse_log(FUSE_LOG_DEBUG,
179  " unique: %llu, error: %i (%s), outsize: %i\n",
180  (unsigned long long) out->unique, out->error,
181  strerror(-out->error), out->len);
182  } else {
183  fuse_log(FUSE_LOG_DEBUG,
184  " unique: %llu, success, outsize: %i\n",
185  (unsigned long long) out->unique, out->len);
186  }
187  }
188 
189  ssize_t res;
190  if (se->io != NULL)
191  /* se->io->writev is never NULL if se->io is not NULL as
192  specified by fuse_session_custom_io()*/
193  res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
194  se->userdata);
195  else
196  res = writev(ch ? ch->fd : se->fd, iov, count);
197 
198  int err = errno;
199 
200  if (res == -1) {
201  /* ENOENT means the operation was interrupted */
202  if (!fuse_session_exited(se) && err != ENOENT)
203  perror("fuse: writing device");
204  return -err;
205  }
206 
207  return 0;
208 }
209 
210 
211 int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
212  int count)
213 {
214  struct fuse_out_header out;
215 
216  if (error <= -1000 || error > 0) {
217  fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
218  error = -ERANGE;
219  }
220 
221  out.unique = req->unique;
222  out.error = error;
223 
224  iov[0].iov_base = &out;
225  iov[0].iov_len = sizeof(struct fuse_out_header);
226 
227  return fuse_send_msg(req->se, req->ch, iov, count);
228 }
229 
230 static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
231  int count)
232 {
233  int res;
234 
235  res = fuse_send_reply_iov_nofree(req, error, iov, count);
236  fuse_free_req(req);
237  return res;
238 }
239 
240 static int send_reply(fuse_req_t req, int error, const void *arg,
241  size_t argsize)
242 {
243  struct iovec iov[2];
244  int count = 1;
245  if (argsize) {
246  iov[1].iov_base = (void *) arg;
247  iov[1].iov_len = argsize;
248  count++;
249  }
250  return send_reply_iov(req, error, iov, count);
251 }
252 
253 int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
254 {
255  int res;
256  struct iovec *padded_iov;
257 
258  padded_iov = malloc((count + 1) * sizeof(struct iovec));
259  if (padded_iov == NULL)
260  return fuse_reply_err(req, ENOMEM);
261 
262  memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
263  count++;
264 
265  res = send_reply_iov(req, 0, padded_iov, count);
266  free(padded_iov);
267 
268  return res;
269 }
270 
271 
272 /* `buf` is allowed to be empty so that the proper size may be
273  allocated by the caller */
274 size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
275  const char *name, const struct stat *stbuf, off_t off)
276 {
277  (void)req;
278  size_t namelen;
279  size_t entlen;
280  size_t entlen_padded;
281  struct fuse_dirent *dirent;
282 
283  namelen = strlen(name);
284  entlen = FUSE_NAME_OFFSET + namelen;
285  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
286 
287  if ((buf == NULL) || (entlen_padded > bufsize))
288  return entlen_padded;
289 
290  dirent = (struct fuse_dirent*) buf;
291  dirent->ino = stbuf->st_ino;
292  dirent->off = off;
293  dirent->namelen = namelen;
294  dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
295  memcpy(dirent->name, name, namelen);
296  memset(dirent->name + namelen, 0, entlen_padded - entlen);
297 
298  return entlen_padded;
299 }
300 
301 static void convert_statfs(const struct statvfs *stbuf,
302  struct fuse_kstatfs *kstatfs)
303 {
304  kstatfs->bsize = stbuf->f_bsize;
305  kstatfs->frsize = stbuf->f_frsize;
306  kstatfs->blocks = stbuf->f_blocks;
307  kstatfs->bfree = stbuf->f_bfree;
308  kstatfs->bavail = stbuf->f_bavail;
309  kstatfs->files = stbuf->f_files;
310  kstatfs->ffree = stbuf->f_ffree;
311  kstatfs->namelen = stbuf->f_namemax;
312 }
313 
314 static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
315 {
316  return send_reply(req, 0, arg, argsize);
317 }
318 
319 int fuse_reply_err(fuse_req_t req, int err)
320 {
321  return send_reply(req, -err, NULL, 0);
322 }
323 
324 void fuse_reply_none(fuse_req_t req)
325 {
326  fuse_free_req(req);
327 }
328 
329 static unsigned long calc_timeout_sec(double t)
330 {
331  if (t > (double) ULONG_MAX)
332  return ULONG_MAX;
333  else if (t < 0.0)
334  return 0;
335  else
336  return (unsigned long) t;
337 }
338 
339 static unsigned int calc_timeout_nsec(double t)
340 {
341  double f = t - (double) calc_timeout_sec(t);
342  if (f < 0.0)
343  return 0;
344  else if (f >= 0.999999999)
345  return 999999999;
346  else
347  return (unsigned int) (f * 1.0e9);
348 }
349 
350 static void fill_entry(struct fuse_entry_out *arg,
351  const struct fuse_entry_param *e)
352 {
353  arg->nodeid = e->ino;
354  arg->generation = e->generation;
355  arg->entry_valid = calc_timeout_sec(e->entry_timeout);
356  arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
357  arg->attr_valid = calc_timeout_sec(e->attr_timeout);
358  arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
359  convert_stat(&e->attr, &arg->attr);
360 }
361 
362 /* `buf` is allowed to be empty so that the proper size may be
363  allocated by the caller */
364 size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
365  const char *name,
366  const struct fuse_entry_param *e, off_t off)
367 {
368  (void)req;
369  size_t namelen;
370  size_t entlen;
371  size_t entlen_padded;
372 
373  namelen = strlen(name);
374  entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
375  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
376  if ((buf == NULL) || (entlen_padded > bufsize))
377  return entlen_padded;
378 
379  struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
380  memset(&dp->entry_out, 0, sizeof(dp->entry_out));
381  fill_entry(&dp->entry_out, e);
382 
383  struct fuse_dirent *dirent = &dp->dirent;
384  dirent->ino = e->attr.st_ino;
385  dirent->off = off;
386  dirent->namelen = namelen;
387  dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
388  memcpy(dirent->name, name, namelen);
389  memset(dirent->name + namelen, 0, entlen_padded - entlen);
390 
391  return entlen_padded;
392 }
393 
394 static void fill_open(struct fuse_open_out *arg,
395  const struct fuse_file_info *f)
396 {
397  arg->fh = f->fh;
398  if (f->direct_io)
399  arg->open_flags |= FOPEN_DIRECT_IO;
400  if (f->keep_cache)
401  arg->open_flags |= FOPEN_KEEP_CACHE;
402  if (f->cache_readdir)
403  arg->open_flags |= FOPEN_CACHE_DIR;
404  if (f->nonseekable)
405  arg->open_flags |= FOPEN_NONSEEKABLE;
406  if (f->noflush)
407  arg->open_flags |= FOPEN_NOFLUSH;
408  if (f->parallel_direct_writes)
409  arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES;
410 }
411 
412 int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
413 {
414  struct fuse_entry_out arg;
415  size_t size = req->se->conn.proto_minor < 9 ?
416  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
417 
418  /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
419  negative entry */
420  if (!e->ino && req->se->conn.proto_minor < 4)
421  return fuse_reply_err(req, ENOENT);
422 
423  memset(&arg, 0, sizeof(arg));
424  fill_entry(&arg, e);
425  return send_reply_ok(req, &arg, size);
426 }
427 
428 int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
429  const struct fuse_file_info *f)
430 {
431  char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
432  size_t entrysize = req->se->conn.proto_minor < 9 ?
433  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
434  struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
435  struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
436 
437  memset(buf, 0, sizeof(buf));
438  fill_entry(earg, e);
439  fill_open(oarg, f);
440  return send_reply_ok(req, buf,
441  entrysize + sizeof(struct fuse_open_out));
442 }
443 
444 int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
445  double attr_timeout)
446 {
447  struct fuse_attr_out arg;
448  size_t size = req->se->conn.proto_minor < 9 ?
449  FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
450 
451  memset(&arg, 0, sizeof(arg));
452  arg.attr_valid = calc_timeout_sec(attr_timeout);
453  arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
454  convert_stat(attr, &arg.attr);
455 
456  return send_reply_ok(req, &arg, size);
457 }
458 
459 int fuse_reply_readlink(fuse_req_t req, const char *linkname)
460 {
461  return send_reply_ok(req, linkname, strlen(linkname));
462 }
463 
464 int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
465 {
466  struct fuse_open_out arg;
467 
468  memset(&arg, 0, sizeof(arg));
469  fill_open(&arg, f);
470  return send_reply_ok(req, &arg, sizeof(arg));
471 }
472 
473 int fuse_reply_write(fuse_req_t req, size_t count)
474 {
475  struct fuse_write_out arg;
476 
477  memset(&arg, 0, sizeof(arg));
478  arg.size = count;
479 
480  return send_reply_ok(req, &arg, sizeof(arg));
481 }
482 
483 int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
484 {
485  return send_reply_ok(req, buf, size);
486 }
487 
488 static int fuse_send_data_iov_fallback(struct fuse_session *se,
489  struct fuse_chan *ch,
490  struct iovec *iov, int iov_count,
491  struct fuse_bufvec *buf,
492  size_t len)
493 {
494  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
495  void *mbuf;
496  int res;
497 
498  /* Optimize common case */
499  if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
500  !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
501  /* FIXME: also avoid memory copy if there are multiple buffers
502  but none of them contain an fd */
503 
504  iov[iov_count].iov_base = buf->buf[0].mem;
505  iov[iov_count].iov_len = len;
506  iov_count++;
507  return fuse_send_msg(se, ch, iov, iov_count);
508  }
509 
510  res = posix_memalign(&mbuf, pagesize, len);
511  if (res != 0)
512  return res;
513 
514  mem_buf.buf[0].mem = mbuf;
515  res = fuse_buf_copy(&mem_buf, buf, 0);
516  if (res < 0) {
517  free(mbuf);
518  return -res;
519  }
520  len = res;
521 
522  iov[iov_count].iov_base = mbuf;
523  iov[iov_count].iov_len = len;
524  iov_count++;
525  res = fuse_send_msg(se, ch, iov, iov_count);
526  free(mbuf);
527 
528  return res;
529 }
530 
531 struct fuse_ll_pipe {
532  size_t size;
533  int can_grow;
534  int pipe[2];
535 };
536 
537 static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
538 {
539  close(llp->pipe[0]);
540  close(llp->pipe[1]);
541  free(llp);
542 }
543 
544 #ifdef HAVE_SPLICE
545 #if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
546 static int fuse_pipe(int fds[2])
547 {
548  int rv = pipe(fds);
549 
550  if (rv == -1)
551  return rv;
552 
553  if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
554  fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
555  fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
556  fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
557  close(fds[0]);
558  close(fds[1]);
559  rv = -1;
560  }
561  return rv;
562 }
563 #else
564 static int fuse_pipe(int fds[2])
565 {
566  return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
567 }
568 #endif
569 
570 static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
571 {
572  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
573  if (llp == NULL) {
574  int res;
575 
576  llp = malloc(sizeof(struct fuse_ll_pipe));
577  if (llp == NULL)
578  return NULL;
579 
580  res = fuse_pipe(llp->pipe);
581  if (res == -1) {
582  free(llp);
583  return NULL;
584  }
585 
586  /*
587  *the default size is 16 pages on linux
588  */
589  llp->size = pagesize * 16;
590  llp->can_grow = 1;
591 
592  pthread_setspecific(se->pipe_key, llp);
593  }
594 
595  return llp;
596 }
597 #endif
598 
599 static void fuse_ll_clear_pipe(struct fuse_session *se)
600 {
601  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
602  if (llp) {
603  pthread_setspecific(se->pipe_key, NULL);
604  fuse_ll_pipe_free(llp);
605  }
606 }
607 
608 #if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
609 static int read_back(int fd, char *buf, size_t len)
610 {
611  int res;
612 
613  res = read(fd, buf, len);
614  if (res == -1) {
615  fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
616  return -EIO;
617  }
618  if (res != len) {
619  fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
620  return -EIO;
621  }
622  return 0;
623 }
624 
625 static int grow_pipe_to_max(int pipefd)
626 {
627  int max;
628  int res;
629  int maxfd;
630  char buf[32];
631 
632  maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
633  if (maxfd < 0)
634  return -errno;
635 
636  res = read(maxfd, buf, sizeof(buf) - 1);
637  if (res < 0) {
638  int saved_errno;
639 
640  saved_errno = errno;
641  close(maxfd);
642  return -saved_errno;
643  }
644  close(maxfd);
645  buf[res] = '\0';
646 
647  max = atoi(buf);
648  res = fcntl(pipefd, F_SETPIPE_SZ, max);
649  if (res < 0)
650  return -errno;
651  return max;
652 }
653 
654 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
655  struct iovec *iov, int iov_count,
656  struct fuse_bufvec *buf, unsigned int flags)
657 {
658  int res;
659  size_t len = fuse_buf_size(buf);
660  struct fuse_out_header *out = iov[0].iov_base;
661  struct fuse_ll_pipe *llp;
662  int splice_flags;
663  size_t pipesize;
664  size_t total_buf_size;
665  size_t idx;
666  size_t headerlen;
667  struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
668 
669  if (se->broken_splice_nonblock)
670  goto fallback;
671 
672  if (flags & FUSE_BUF_NO_SPLICE)
673  goto fallback;
674 
675  total_buf_size = 0;
676  for (idx = buf->idx; idx < buf->count; idx++) {
677  total_buf_size += buf->buf[idx].size;
678  if (idx == buf->idx)
679  total_buf_size -= buf->off;
680  }
681  if (total_buf_size < 2 * pagesize)
682  goto fallback;
683 
684  if (se->conn.proto_minor < 14 ||
685  !(se->conn.want & FUSE_CAP_SPLICE_WRITE))
686  goto fallback;
687 
688  llp = fuse_ll_get_pipe(se);
689  if (llp == NULL)
690  goto fallback;
691 
692 
693  headerlen = iov_length(iov, iov_count);
694 
695  out->len = headerlen + len;
696 
697  /*
698  * Heuristic for the required pipe size, does not work if the
699  * source contains less than page size fragments
700  */
701  pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
702 
703  if (llp->size < pipesize) {
704  if (llp->can_grow) {
705  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
706  if (res == -1) {
707  res = grow_pipe_to_max(llp->pipe[0]);
708  if (res > 0)
709  llp->size = res;
710  llp->can_grow = 0;
711  goto fallback;
712  }
713  llp->size = res;
714  }
715  if (llp->size < pipesize)
716  goto fallback;
717  }
718 
719 
720  res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
721  if (res == -1)
722  goto fallback;
723 
724  if (res != headerlen) {
725  res = -EIO;
726  fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
727  headerlen);
728  goto clear_pipe;
729  }
730 
731  pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
732  pipe_buf.buf[0].fd = llp->pipe[1];
733 
734  res = fuse_buf_copy(&pipe_buf, buf,
736  if (res < 0) {
737  if (res == -EAGAIN || res == -EINVAL) {
738  /*
739  * Should only get EAGAIN on kernels with
740  * broken SPLICE_F_NONBLOCK support (<=
741  * 2.6.35) where this error or a short read is
742  * returned even if the pipe itself is not
743  * full
744  *
745  * EINVAL might mean that splice can't handle
746  * this combination of input and output.
747  */
748  if (res == -EAGAIN)
749  se->broken_splice_nonblock = 1;
750 
751  pthread_setspecific(se->pipe_key, NULL);
752  fuse_ll_pipe_free(llp);
753  goto fallback;
754  }
755  res = -res;
756  goto clear_pipe;
757  }
758 
759  if (res != 0 && res < len) {
760  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
761  void *mbuf;
762  size_t now_len = res;
763  /*
764  * For regular files a short count is either
765  * 1) due to EOF, or
766  * 2) because of broken SPLICE_F_NONBLOCK (see above)
767  *
768  * For other inputs it's possible that we overflowed
769  * the pipe because of small buffer fragments.
770  */
771 
772  res = posix_memalign(&mbuf, pagesize, len);
773  if (res != 0)
774  goto clear_pipe;
775 
776  mem_buf.buf[0].mem = mbuf;
777  mem_buf.off = now_len;
778  res = fuse_buf_copy(&mem_buf, buf, 0);
779  if (res > 0) {
780  char *tmpbuf;
781  size_t extra_len = res;
782  /*
783  * Trickiest case: got more data. Need to get
784  * back the data from the pipe and then fall
785  * back to regular write.
786  */
787  tmpbuf = malloc(headerlen);
788  if (tmpbuf == NULL) {
789  free(mbuf);
790  res = ENOMEM;
791  goto clear_pipe;
792  }
793  res = read_back(llp->pipe[0], tmpbuf, headerlen);
794  free(tmpbuf);
795  if (res != 0) {
796  free(mbuf);
797  goto clear_pipe;
798  }
799  res = read_back(llp->pipe[0], mbuf, now_len);
800  if (res != 0) {
801  free(mbuf);
802  goto clear_pipe;
803  }
804  len = now_len + extra_len;
805  iov[iov_count].iov_base = mbuf;
806  iov[iov_count].iov_len = len;
807  iov_count++;
808  res = fuse_send_msg(se, ch, iov, iov_count);
809  free(mbuf);
810  return res;
811  }
812  free(mbuf);
813  res = now_len;
814  }
815  len = res;
816  out->len = headerlen + len;
817 
818  if (se->debug) {
819  fuse_log(FUSE_LOG_DEBUG,
820  " unique: %llu, success, outsize: %i (splice)\n",
821  (unsigned long long) out->unique, out->len);
822  }
823 
824  splice_flags = 0;
825  if ((flags & FUSE_BUF_SPLICE_MOVE) &&
826  (se->conn.want & FUSE_CAP_SPLICE_MOVE))
827  splice_flags |= SPLICE_F_MOVE;
828 
829  if (se->io != NULL && se->io->splice_send != NULL) {
830  res = se->io->splice_send(llp->pipe[0], NULL,
831  ch ? ch->fd : se->fd, NULL, out->len,
832  splice_flags, se->userdata);
833  } else {
834  res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
835  out->len, splice_flags);
836  }
837  if (res == -1) {
838  res = -errno;
839  perror("fuse: splice from pipe");
840  goto clear_pipe;
841  }
842  if (res != out->len) {
843  res = -EIO;
844  fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
845  res, out->len);
846  goto clear_pipe;
847  }
848  return 0;
849 
850 clear_pipe:
851  fuse_ll_clear_pipe(se);
852  return res;
853 
854 fallback:
855  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
856 }
857 #else
858 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
859  struct iovec *iov, int iov_count,
860  struct fuse_bufvec *buf, unsigned int flags)
861 {
862  size_t len = fuse_buf_size(buf);
863  (void) flags;
864 
865  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
866 }
867 #endif
868 
869 int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
870  enum fuse_buf_copy_flags flags)
871 {
872  struct iovec iov[2];
873  struct fuse_out_header out;
874  int res;
875 
876  iov[0].iov_base = &out;
877  iov[0].iov_len = sizeof(struct fuse_out_header);
878 
879  out.unique = req->unique;
880  out.error = 0;
881 
882  res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
883  if (res <= 0) {
884  fuse_free_req(req);
885  return res;
886  } else {
887  return fuse_reply_err(req, res);
888  }
889 }
890 
891 int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
892 {
893  struct fuse_statfs_out arg;
894  size_t size = req->se->conn.proto_minor < 4 ?
895  FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
896 
897  memset(&arg, 0, sizeof(arg));
898  convert_statfs(stbuf, &arg.st);
899 
900  return send_reply_ok(req, &arg, size);
901 }
902 
903 int fuse_reply_xattr(fuse_req_t req, size_t count)
904 {
905  struct fuse_getxattr_out arg;
906 
907  memset(&arg, 0, sizeof(arg));
908  arg.size = count;
909 
910  return send_reply_ok(req, &arg, sizeof(arg));
911 }
912 
913 int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
914 {
915  struct fuse_lk_out arg;
916 
917  memset(&arg, 0, sizeof(arg));
918  arg.lk.type = lock->l_type;
919  if (lock->l_type != F_UNLCK) {
920  arg.lk.start = lock->l_start;
921  if (lock->l_len == 0)
922  arg.lk.end = OFFSET_MAX;
923  else
924  arg.lk.end = lock->l_start + lock->l_len - 1;
925  }
926  arg.lk.pid = lock->l_pid;
927  return send_reply_ok(req, &arg, sizeof(arg));
928 }
929 
930 int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
931 {
932  struct fuse_bmap_out arg;
933 
934  memset(&arg, 0, sizeof(arg));
935  arg.block = idx;
936 
937  return send_reply_ok(req, &arg, sizeof(arg));
938 }
939 
940 static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
941  size_t count)
942 {
943  struct fuse_ioctl_iovec *fiov;
944  size_t i;
945 
946  fiov = malloc(sizeof(fiov[0]) * count);
947  if (!fiov)
948  return NULL;
949 
950  for (i = 0; i < count; i++) {
951  fiov[i].base = (uintptr_t) iov[i].iov_base;
952  fiov[i].len = iov[i].iov_len;
953  }
954 
955  return fiov;
956 }
957 
959  const struct iovec *in_iov, size_t in_count,
960  const struct iovec *out_iov, size_t out_count)
961 {
962  struct fuse_ioctl_out arg;
963  struct fuse_ioctl_iovec *in_fiov = NULL;
964  struct fuse_ioctl_iovec *out_fiov = NULL;
965  struct iovec iov[4];
966  size_t count = 1;
967  int res;
968 
969  memset(&arg, 0, sizeof(arg));
970  arg.flags |= FUSE_IOCTL_RETRY;
971  arg.in_iovs = in_count;
972  arg.out_iovs = out_count;
973  iov[count].iov_base = &arg;
974  iov[count].iov_len = sizeof(arg);
975  count++;
976 
977  if (req->se->conn.proto_minor < 16) {
978  if (in_count) {
979  iov[count].iov_base = (void *)in_iov;
980  iov[count].iov_len = sizeof(in_iov[0]) * in_count;
981  count++;
982  }
983 
984  if (out_count) {
985  iov[count].iov_base = (void *)out_iov;
986  iov[count].iov_len = sizeof(out_iov[0]) * out_count;
987  count++;
988  }
989  } else {
990  /* Can't handle non-compat 64bit ioctls on 32bit */
991  if (sizeof(void *) == 4 && req->ioctl_64bit) {
992  res = fuse_reply_err(req, EINVAL);
993  goto out;
994  }
995 
996  if (in_count) {
997  in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
998  if (!in_fiov)
999  goto enomem;
1000 
1001  iov[count].iov_base = (void *)in_fiov;
1002  iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1003  count++;
1004  }
1005  if (out_count) {
1006  out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1007  if (!out_fiov)
1008  goto enomem;
1009 
1010  iov[count].iov_base = (void *)out_fiov;
1011  iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1012  count++;
1013  }
1014  }
1015 
1016  res = send_reply_iov(req, 0, iov, count);
1017 out:
1018  free(in_fiov);
1019  free(out_fiov);
1020 
1021  return res;
1022 
1023 enomem:
1024  res = fuse_reply_err(req, ENOMEM);
1025  goto out;
1026 }
1027 
1028 int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1029 {
1030  struct fuse_ioctl_out arg;
1031  struct iovec iov[3];
1032  size_t count = 1;
1033 
1034  memset(&arg, 0, sizeof(arg));
1035  arg.result = result;
1036  iov[count].iov_base = &arg;
1037  iov[count].iov_len = sizeof(arg);
1038  count++;
1039 
1040  if (size) {
1041  iov[count].iov_base = (char *) buf;
1042  iov[count].iov_len = size;
1043  count++;
1044  }
1045 
1046  return send_reply_iov(req, 0, iov, count);
1047 }
1048 
1049 int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1050  int count)
1051 {
1052  struct iovec *padded_iov;
1053  struct fuse_ioctl_out arg;
1054  int res;
1055 
1056  padded_iov = malloc((count + 2) * sizeof(struct iovec));
1057  if (padded_iov == NULL)
1058  return fuse_reply_err(req, ENOMEM);
1059 
1060  memset(&arg, 0, sizeof(arg));
1061  arg.result = result;
1062  padded_iov[1].iov_base = &arg;
1063  padded_iov[1].iov_len = sizeof(arg);
1064 
1065  memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1066 
1067  res = send_reply_iov(req, 0, padded_iov, count + 2);
1068  free(padded_iov);
1069 
1070  return res;
1071 }
1072 
1073 int fuse_reply_poll(fuse_req_t req, unsigned revents)
1074 {
1075  struct fuse_poll_out arg;
1076 
1077  memset(&arg, 0, sizeof(arg));
1078  arg.revents = revents;
1079 
1080  return send_reply_ok(req, &arg, sizeof(arg));
1081 }
1082 
1083 int fuse_reply_lseek(fuse_req_t req, off_t off)
1084 {
1085  struct fuse_lseek_out arg;
1086 
1087  memset(&arg, 0, sizeof(arg));
1088  arg.offset = off;
1089 
1090  return send_reply_ok(req, &arg, sizeof(arg));
1091 }
1092 
1093 static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1094 {
1095  char *name = (char *) inarg;
1096 
1097  if (req->se->op.lookup)
1098  req->se->op.lookup(req, nodeid, name);
1099  else
1100  fuse_reply_err(req, ENOSYS);
1101 }
1102 
1103 static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1104 {
1105  struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1106 
1107  if (req->se->op.forget)
1108  req->se->op.forget(req, nodeid, arg->nlookup);
1109  else
1110  fuse_reply_none(req);
1111 }
1112 
1113 static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1114  const void *inarg)
1115 {
1116  struct fuse_batch_forget_in *arg = (void *) inarg;
1117  struct fuse_forget_one *param = (void *) PARAM(arg);
1118  unsigned int i;
1119 
1120  (void) nodeid;
1121 
1122  if (req->se->op.forget_multi) {
1123  req->se->op.forget_multi(req, arg->count,
1124  (struct fuse_forget_data *) param);
1125  } else if (req->se->op.forget) {
1126  for (i = 0; i < arg->count; i++) {
1127  struct fuse_forget_one *forget = &param[i];
1128  struct fuse_req *dummy_req;
1129 
1130  dummy_req = fuse_ll_alloc_req(req->se);
1131  if (dummy_req == NULL)
1132  break;
1133 
1134  dummy_req->unique = req->unique;
1135  dummy_req->ctx = req->ctx;
1136  dummy_req->ch = NULL;
1137 
1138  req->se->op.forget(dummy_req, forget->nodeid,
1139  forget->nlookup);
1140  }
1141  fuse_reply_none(req);
1142  } else {
1143  fuse_reply_none(req);
1144  }
1145 }
1146 
1147 static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1148 {
1149  struct fuse_file_info *fip = NULL;
1150  struct fuse_file_info fi;
1151 
1152  if (req->se->conn.proto_minor >= 9) {
1153  struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1154 
1155  if (arg->getattr_flags & FUSE_GETATTR_FH) {
1156  memset(&fi, 0, sizeof(fi));
1157  fi.fh = arg->fh;
1158  fip = &fi;
1159  }
1160  }
1161 
1162  if (req->se->op.getattr)
1163  req->se->op.getattr(req, nodeid, fip);
1164  else
1165  fuse_reply_err(req, ENOSYS);
1166 }
1167 
1168 static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1169 {
1170  struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1171 
1172  if (req->se->op.setattr) {
1173  struct fuse_file_info *fi = NULL;
1174  struct fuse_file_info fi_store;
1175  struct stat stbuf;
1176  memset(&stbuf, 0, sizeof(stbuf));
1177  convert_attr(arg, &stbuf);
1178  if (arg->valid & FATTR_FH) {
1179  arg->valid &= ~FATTR_FH;
1180  memset(&fi_store, 0, sizeof(fi_store));
1181  fi = &fi_store;
1182  fi->fh = arg->fh;
1183  }
1184  arg->valid &=
1185  FUSE_SET_ATTR_MODE |
1186  FUSE_SET_ATTR_UID |
1187  FUSE_SET_ATTR_GID |
1188  FUSE_SET_ATTR_SIZE |
1189  FUSE_SET_ATTR_ATIME |
1190  FUSE_SET_ATTR_MTIME |
1191  FUSE_SET_ATTR_KILL_SUID |
1192  FUSE_SET_ATTR_KILL_SGID |
1193  FUSE_SET_ATTR_ATIME_NOW |
1194  FUSE_SET_ATTR_MTIME_NOW |
1195  FUSE_SET_ATTR_CTIME;
1196 
1197  req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1198  } else
1199  fuse_reply_err(req, ENOSYS);
1200 }
1201 
1202 static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1203 {
1204  struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1205 
1206  if (req->se->op.access)
1207  req->se->op.access(req, nodeid, arg->mask);
1208  else
1209  fuse_reply_err(req, ENOSYS);
1210 }
1211 
1212 static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1213 {
1214  (void) inarg;
1215 
1216  if (req->se->op.readlink)
1217  req->se->op.readlink(req, nodeid);
1218  else
1219  fuse_reply_err(req, ENOSYS);
1220 }
1221 
1222 static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1223 {
1224  struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1225  char *name = PARAM(arg);
1226 
1227  if (req->se->conn.proto_minor >= 12)
1228  req->ctx.umask = arg->umask;
1229  else
1230  name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1231 
1232  if (req->se->op.mknod)
1233  req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1234  else
1235  fuse_reply_err(req, ENOSYS);
1236 }
1237 
1238 static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1239 {
1240  struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1241 
1242  if (req->se->conn.proto_minor >= 12)
1243  req->ctx.umask = arg->umask;
1244 
1245  if (req->se->op.mkdir)
1246  req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1247  else
1248  fuse_reply_err(req, ENOSYS);
1249 }
1250 
1251 static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1252 {
1253  char *name = (char *) inarg;
1254 
1255  if (req->se->op.unlink)
1256  req->se->op.unlink(req, nodeid, name);
1257  else
1258  fuse_reply_err(req, ENOSYS);
1259 }
1260 
1261 static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1262 {
1263  char *name = (char *) inarg;
1264 
1265  if (req->se->op.rmdir)
1266  req->se->op.rmdir(req, nodeid, name);
1267  else
1268  fuse_reply_err(req, ENOSYS);
1269 }
1270 
1271 static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1272 {
1273  char *name = (char *) inarg;
1274  char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1275 
1276  if (req->se->op.symlink)
1277  req->se->op.symlink(req, linkname, nodeid, name);
1278  else
1279  fuse_reply_err(req, ENOSYS);
1280 }
1281 
1282 static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1283 {
1284  struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1285  char *oldname = PARAM(arg);
1286  char *newname = oldname + strlen(oldname) + 1;
1287 
1288  if (req->se->op.rename)
1289  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1290  0);
1291  else
1292  fuse_reply_err(req, ENOSYS);
1293 }
1294 
1295 static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1296 {
1297  struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1298  char *oldname = PARAM(arg);
1299  char *newname = oldname + strlen(oldname) + 1;
1300 
1301  if (req->se->op.rename)
1302  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1303  arg->flags);
1304  else
1305  fuse_reply_err(req, ENOSYS);
1306 }
1307 
1308 static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1309 {
1310  struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1311 
1312  if (req->se->op.link)
1313  req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1314  else
1315  fuse_reply_err(req, ENOSYS);
1316 }
1317 
1318 static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1319 {
1320  struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1321 
1322  if (req->se->op.create) {
1323  struct fuse_file_info fi;
1324  char *name = PARAM(arg);
1325 
1326  memset(&fi, 0, sizeof(fi));
1327  fi.flags = arg->flags;
1328 
1329  if (req->se->conn.proto_minor >= 12)
1330  req->ctx.umask = arg->umask;
1331  else
1332  name = (char *) inarg + sizeof(struct fuse_open_in);
1333 
1334  req->se->op.create(req, nodeid, name, arg->mode, &fi);
1335  } else
1336  fuse_reply_err(req, ENOSYS);
1337 }
1338 
1339 static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1340 {
1341  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1342  struct fuse_file_info fi;
1343 
1344  memset(&fi, 0, sizeof(fi));
1345  fi.flags = arg->flags;
1346 
1347  if (req->se->op.open)
1348  req->se->op.open(req, nodeid, &fi);
1349  else
1350  fuse_reply_open(req, &fi);
1351 }
1352 
1353 static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1354 {
1355  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1356 
1357  if (req->se->op.read) {
1358  struct fuse_file_info fi;
1359 
1360  memset(&fi, 0, sizeof(fi));
1361  fi.fh = arg->fh;
1362  if (req->se->conn.proto_minor >= 9) {
1363  fi.lock_owner = arg->lock_owner;
1364  fi.flags = arg->flags;
1365  }
1366  req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1367  } else
1368  fuse_reply_err(req, ENOSYS);
1369 }
1370 
1371 static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1372 {
1373  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1374  struct fuse_file_info fi;
1375  char *param;
1376 
1377  memset(&fi, 0, sizeof(fi));
1378  fi.fh = arg->fh;
1379  fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1380 
1381  if (req->se->conn.proto_minor < 9) {
1382  param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1383  } else {
1384  fi.lock_owner = arg->lock_owner;
1385  fi.flags = arg->flags;
1386  param = PARAM(arg);
1387  }
1388 
1389  if (req->se->op.write)
1390  req->se->op.write(req, nodeid, param, arg->size,
1391  arg->offset, &fi);
1392  else
1393  fuse_reply_err(req, ENOSYS);
1394 }
1395 
1396 static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1397  const struct fuse_buf *ibuf)
1398 {
1399  struct fuse_session *se = req->se;
1400  struct fuse_bufvec bufv = {
1401  .buf[0] = *ibuf,
1402  .count = 1,
1403  };
1404  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1405  struct fuse_file_info fi;
1406 
1407  memset(&fi, 0, sizeof(fi));
1408  fi.fh = arg->fh;
1409  fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1410 
1411  if (se->conn.proto_minor < 9) {
1412  bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1413  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1414  FUSE_COMPAT_WRITE_IN_SIZE;
1415  assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1416  } else {
1417  fi.lock_owner = arg->lock_owner;
1418  fi.flags = arg->flags;
1419  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1420  bufv.buf[0].mem = PARAM(arg);
1421 
1422  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1423  sizeof(struct fuse_write_in);
1424  }
1425  if (bufv.buf[0].size < arg->size) {
1426  fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1427  fuse_reply_err(req, EIO);
1428  goto out;
1429  }
1430  bufv.buf[0].size = arg->size;
1431 
1432  se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1433 
1434 out:
1435  /* Need to reset the pipe if ->write_buf() didn't consume all data */
1436  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1437  fuse_ll_clear_pipe(se);
1438 }
1439 
1440 static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1441 {
1442  struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1443  struct fuse_file_info fi;
1444 
1445  memset(&fi, 0, sizeof(fi));
1446  fi.fh = arg->fh;
1447  fi.flush = 1;
1448  if (req->se->conn.proto_minor >= 7)
1449  fi.lock_owner = arg->lock_owner;
1450 
1451  if (req->se->op.flush)
1452  req->se->op.flush(req, nodeid, &fi);
1453  else
1454  fuse_reply_err(req, ENOSYS);
1455 }
1456 
1457 static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1458 {
1459  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1460  struct fuse_file_info fi;
1461 
1462  memset(&fi, 0, sizeof(fi));
1463  fi.flags = arg->flags;
1464  fi.fh = arg->fh;
1465  if (req->se->conn.proto_minor >= 8) {
1466  fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1467  fi.lock_owner = arg->lock_owner;
1468  }
1469  if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1470  fi.flock_release = 1;
1471  fi.lock_owner = arg->lock_owner;
1472  }
1473 
1474  if (req->se->op.release)
1475  req->se->op.release(req, nodeid, &fi);
1476  else
1477  fuse_reply_err(req, 0);
1478 }
1479 
1480 static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1481 {
1482  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1483  struct fuse_file_info fi;
1484  int datasync = arg->fsync_flags & 1;
1485 
1486  memset(&fi, 0, sizeof(fi));
1487  fi.fh = arg->fh;
1488 
1489  if (req->se->op.fsync)
1490  req->se->op.fsync(req, nodeid, datasync, &fi);
1491  else
1492  fuse_reply_err(req, ENOSYS);
1493 }
1494 
1495 static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1496 {
1497  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1498  struct fuse_file_info fi;
1499 
1500  memset(&fi, 0, sizeof(fi));
1501  fi.flags = arg->flags;
1502 
1503  if (req->se->op.opendir)
1504  req->se->op.opendir(req, nodeid, &fi);
1505  else
1506  fuse_reply_open(req, &fi);
1507 }
1508 
1509 static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1510 {
1511  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1512  struct fuse_file_info fi;
1513 
1514  memset(&fi, 0, sizeof(fi));
1515  fi.fh = arg->fh;
1516 
1517  if (req->se->op.readdir)
1518  req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1519  else
1520  fuse_reply_err(req, ENOSYS);
1521 }
1522 
1523 static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1524 {
1525  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1526  struct fuse_file_info fi;
1527 
1528  memset(&fi, 0, sizeof(fi));
1529  fi.fh = arg->fh;
1530 
1531  if (req->se->op.readdirplus)
1532  req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1533  else
1534  fuse_reply_err(req, ENOSYS);
1535 }
1536 
1537 static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1538 {
1539  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1540  struct fuse_file_info fi;
1541 
1542  memset(&fi, 0, sizeof(fi));
1543  fi.flags = arg->flags;
1544  fi.fh = arg->fh;
1545 
1546  if (req->se->op.releasedir)
1547  req->se->op.releasedir(req, nodeid, &fi);
1548  else
1549  fuse_reply_err(req, 0);
1550 }
1551 
1552 static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1553 {
1554  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1555  struct fuse_file_info fi;
1556  int datasync = arg->fsync_flags & 1;
1557 
1558  memset(&fi, 0, sizeof(fi));
1559  fi.fh = arg->fh;
1560 
1561  if (req->se->op.fsyncdir)
1562  req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1563  else
1564  fuse_reply_err(req, ENOSYS);
1565 }
1566 
1567 static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1568 {
1569  (void) nodeid;
1570  (void) inarg;
1571 
1572  if (req->se->op.statfs)
1573  req->se->op.statfs(req, nodeid);
1574  else {
1575  struct statvfs buf = {
1576  .f_namemax = 255,
1577  .f_bsize = 512,
1578  };
1579  fuse_reply_statfs(req, &buf);
1580  }
1581 }
1582 
1583 static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1584 {
1585  struct fuse_session *se = req->se;
1586  unsigned int xattr_ext = !!(se->conn.want & FUSE_CAP_SETXATTR_EXT);
1587  struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1588  char *name = xattr_ext ? PARAM(arg) :
1589  (char *)arg + FUSE_COMPAT_SETXATTR_IN_SIZE;
1590  char *value = name + strlen(name) + 1;
1591 
1592  /* XXX:The API should be extended to support extra_flags/setxattr_flags */
1593  if (req->se->op.setxattr)
1594  req->se->op.setxattr(req, nodeid, name, value, arg->size,
1595  arg->flags);
1596  else
1597  fuse_reply_err(req, ENOSYS);
1598 }
1599 
1600 static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1601 {
1602  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1603 
1604  if (req->se->op.getxattr)
1605  req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1606  else
1607  fuse_reply_err(req, ENOSYS);
1608 }
1609 
1610 static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1611 {
1612  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1613 
1614  if (req->se->op.listxattr)
1615  req->se->op.listxattr(req, nodeid, arg->size);
1616  else
1617  fuse_reply_err(req, ENOSYS);
1618 }
1619 
1620 static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1621 {
1622  char *name = (char *) inarg;
1623 
1624  if (req->se->op.removexattr)
1625  req->se->op.removexattr(req, nodeid, name);
1626  else
1627  fuse_reply_err(req, ENOSYS);
1628 }
1629 
1630 static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1631  struct flock *flock)
1632 {
1633  memset(flock, 0, sizeof(struct flock));
1634  flock->l_type = fl->type;
1635  flock->l_whence = SEEK_SET;
1636  flock->l_start = fl->start;
1637  if (fl->end == OFFSET_MAX)
1638  flock->l_len = 0;
1639  else
1640  flock->l_len = fl->end - fl->start + 1;
1641  flock->l_pid = fl->pid;
1642 }
1643 
1644 static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1645 {
1646  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1647  struct fuse_file_info fi;
1648  struct flock flock;
1649 
1650  memset(&fi, 0, sizeof(fi));
1651  fi.fh = arg->fh;
1652  fi.lock_owner = arg->owner;
1653 
1654  convert_fuse_file_lock(&arg->lk, &flock);
1655  if (req->se->op.getlk)
1656  req->se->op.getlk(req, nodeid, &fi, &flock);
1657  else
1658  fuse_reply_err(req, ENOSYS);
1659 }
1660 
1661 static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1662  const void *inarg, int sleep)
1663 {
1664  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1665  struct fuse_file_info fi;
1666  struct flock flock;
1667 
1668  memset(&fi, 0, sizeof(fi));
1669  fi.fh = arg->fh;
1670  fi.lock_owner = arg->owner;
1671 
1672  if (arg->lk_flags & FUSE_LK_FLOCK) {
1673  int op = 0;
1674 
1675  switch (arg->lk.type) {
1676  case F_RDLCK:
1677  op = LOCK_SH;
1678  break;
1679  case F_WRLCK:
1680  op = LOCK_EX;
1681  break;
1682  case F_UNLCK:
1683  op = LOCK_UN;
1684  break;
1685  }
1686  if (!sleep)
1687  op |= LOCK_NB;
1688 
1689  if (req->se->op.flock)
1690  req->se->op.flock(req, nodeid, &fi, op);
1691  else
1692  fuse_reply_err(req, ENOSYS);
1693  } else {
1694  convert_fuse_file_lock(&arg->lk, &flock);
1695  if (req->se->op.setlk)
1696  req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1697  else
1698  fuse_reply_err(req, ENOSYS);
1699  }
1700 }
1701 
1702 static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1703 {
1704  do_setlk_common(req, nodeid, inarg, 0);
1705 }
1706 
1707 static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1708 {
1709  do_setlk_common(req, nodeid, inarg, 1);
1710 }
1711 
1712 static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1713 {
1714  struct fuse_req *curr;
1715 
1716  for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1717  if (curr->unique == req->u.i.unique) {
1718  fuse_interrupt_func_t func;
1719  void *data;
1720 
1721  curr->ctr++;
1722  pthread_mutex_unlock(&se->lock);
1723 
1724  /* Ugh, ugly locking */
1725  pthread_mutex_lock(&curr->lock);
1726  pthread_mutex_lock(&se->lock);
1727  curr->interrupted = 1;
1728  func = curr->u.ni.func;
1729  data = curr->u.ni.data;
1730  pthread_mutex_unlock(&se->lock);
1731  if (func)
1732  func(curr, data);
1733  pthread_mutex_unlock(&curr->lock);
1734 
1735  pthread_mutex_lock(&se->lock);
1736  curr->ctr--;
1737  if (!curr->ctr) {
1738  fuse_chan_put(req->ch);
1739  req->ch = NULL;
1740  destroy_req(curr);
1741  }
1742 
1743  return 1;
1744  }
1745  }
1746  for (curr = se->interrupts.next; curr != &se->interrupts;
1747  curr = curr->next) {
1748  if (curr->u.i.unique == req->u.i.unique)
1749  return 1;
1750  }
1751  return 0;
1752 }
1753 
1754 static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1755 {
1756  struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1757  struct fuse_session *se = req->se;
1758 
1759  (void) nodeid;
1760  if (se->debug)
1761  fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1762  (unsigned long long) arg->unique);
1763 
1764  req->u.i.unique = arg->unique;
1765 
1766  pthread_mutex_lock(&se->lock);
1767  if (find_interrupted(se, req)) {
1768  fuse_chan_put(req->ch);
1769  req->ch = NULL;
1770  destroy_req(req);
1771  } else
1772  list_add_req(req, &se->interrupts);
1773  pthread_mutex_unlock(&se->lock);
1774 }
1775 
1776 static struct fuse_req *check_interrupt(struct fuse_session *se,
1777  struct fuse_req *req)
1778 {
1779  struct fuse_req *curr;
1780 
1781  for (curr = se->interrupts.next; curr != &se->interrupts;
1782  curr = curr->next) {
1783  if (curr->u.i.unique == req->unique) {
1784  req->interrupted = 1;
1785  list_del_req(curr);
1786  fuse_chan_put(curr->ch);
1787  curr->ch = NULL;
1788  destroy_req(curr);
1789  return NULL;
1790  }
1791  }
1792  curr = se->interrupts.next;
1793  if (curr != &se->interrupts) {
1794  list_del_req(curr);
1795  list_init_req(curr);
1796  return curr;
1797  } else
1798  return NULL;
1799 }
1800 
1801 static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1802 {
1803  struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
1804 
1805  if (req->se->op.bmap)
1806  req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1807  else
1808  fuse_reply_err(req, ENOSYS);
1809 }
1810 
1811 static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1812 {
1813  struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
1814  unsigned int flags = arg->flags;
1815  void *in_buf = arg->in_size ? PARAM(arg) : NULL;
1816  struct fuse_file_info fi;
1817 
1818  if (flags & FUSE_IOCTL_DIR &&
1819  !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) {
1820  fuse_reply_err(req, ENOTTY);
1821  return;
1822  }
1823 
1824  memset(&fi, 0, sizeof(fi));
1825  fi.fh = arg->fh;
1826 
1827  if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
1828  !(flags & FUSE_IOCTL_32BIT)) {
1829  req->ioctl_64bit = 1;
1830  }
1831 
1832  if (req->se->op.ioctl)
1833  req->se->op.ioctl(req, nodeid, arg->cmd,
1834  (void *)(uintptr_t)arg->arg, &fi, flags,
1835  in_buf, arg->in_size, arg->out_size);
1836  else
1837  fuse_reply_err(req, ENOSYS);
1838 }
1839 
1840 void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1841 {
1842  free(ph);
1843 }
1844 
1845 static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1846 {
1847  struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
1848  struct fuse_file_info fi;
1849 
1850  memset(&fi, 0, sizeof(fi));
1851  fi.fh = arg->fh;
1852  fi.poll_events = arg->events;
1853 
1854  if (req->se->op.poll) {
1855  struct fuse_pollhandle *ph = NULL;
1856 
1857  if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1858  ph = malloc(sizeof(struct fuse_pollhandle));
1859  if (ph == NULL) {
1860  fuse_reply_err(req, ENOMEM);
1861  return;
1862  }
1863  ph->kh = arg->kh;
1864  ph->se = req->se;
1865  }
1866 
1867  req->se->op.poll(req, nodeid, &fi, ph);
1868  } else {
1869  fuse_reply_err(req, ENOSYS);
1870  }
1871 }
1872 
1873 static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1874 {
1875  struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
1876  struct fuse_file_info fi;
1877 
1878  memset(&fi, 0, sizeof(fi));
1879  fi.fh = arg->fh;
1880 
1881  if (req->se->op.fallocate)
1882  req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
1883  else
1884  fuse_reply_err(req, ENOSYS);
1885 }
1886 
1887 static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
1888 {
1889  struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
1890  struct fuse_file_info fi_in, fi_out;
1891 
1892  memset(&fi_in, 0, sizeof(fi_in));
1893  fi_in.fh = arg->fh_in;
1894 
1895  memset(&fi_out, 0, sizeof(fi_out));
1896  fi_out.fh = arg->fh_out;
1897 
1898 
1899  if (req->se->op.copy_file_range)
1900  req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
1901  &fi_in, arg->nodeid_out,
1902  arg->off_out, &fi_out, arg->len,
1903  arg->flags);
1904  else
1905  fuse_reply_err(req, ENOSYS);
1906 }
1907 
1908 static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1909 {
1910  struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
1911  struct fuse_file_info fi;
1912 
1913  memset(&fi, 0, sizeof(fi));
1914  fi.fh = arg->fh;
1915 
1916  if (req->se->op.lseek)
1917  req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
1918  else
1919  fuse_reply_err(req, ENOSYS);
1920 }
1921 
1922 /* Prevent bogus data races (bogus since "init" is called before
1923  * multi-threading becomes relevant */
1924 static __attribute__((no_sanitize("thread")))
1925 void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1926 {
1927  struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
1928  struct fuse_init_out outarg;
1929  struct fuse_session *se = req->se;
1930  size_t bufsize = se->bufsize;
1931  size_t outargsize = sizeof(outarg);
1932  uint64_t inargflags = 0;
1933  uint64_t outargflags = 0;
1934  (void) nodeid;
1935  if (se->debug) {
1936  fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
1937  if (arg->major == 7 && arg->minor >= 6) {
1938  fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
1939  fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
1940  arg->max_readahead);
1941  }
1942  }
1943  se->conn.proto_major = arg->major;
1944  se->conn.proto_minor = arg->minor;
1945  se->conn.capable = 0;
1946  se->conn.want = 0;
1947 
1948  memset(&outarg, 0, sizeof(outarg));
1949  outarg.major = FUSE_KERNEL_VERSION;
1950  outarg.minor = FUSE_KERNEL_MINOR_VERSION;
1951 
1952  if (arg->major < 7) {
1953  fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
1954  arg->major, arg->minor);
1955  fuse_reply_err(req, EPROTO);
1956  return;
1957  }
1958 
1959  if (arg->major > 7) {
1960  /* Wait for a second INIT request with a 7.X version */
1961  send_reply_ok(req, &outarg, sizeof(outarg));
1962  return;
1963  }
1964 
1965  if (arg->minor >= 6) {
1966  if (arg->max_readahead < se->conn.max_readahead)
1967  se->conn.max_readahead = arg->max_readahead;
1968  inargflags = arg->flags;
1969  if (inargflags & FUSE_INIT_EXT)
1970  inargflags = inargflags | (uint64_t) arg->flags2 << 32;
1971  if (inargflags & FUSE_ASYNC_READ)
1972  se->conn.capable |= FUSE_CAP_ASYNC_READ;
1973  if (inargflags & FUSE_POSIX_LOCKS)
1974  se->conn.capable |= FUSE_CAP_POSIX_LOCKS;
1975  if (inargflags & FUSE_ATOMIC_O_TRUNC)
1976  se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
1977  if (inargflags & FUSE_EXPORT_SUPPORT)
1978  se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
1979  if (inargflags & FUSE_DONT_MASK)
1980  se->conn.capable |= FUSE_CAP_DONT_MASK;
1981  if (inargflags & FUSE_FLOCK_LOCKS)
1982  se->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
1983  if (inargflags & FUSE_AUTO_INVAL_DATA)
1984  se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA;
1985  if (inargflags & FUSE_DO_READDIRPLUS)
1986  se->conn.capable |= FUSE_CAP_READDIRPLUS;
1987  if (inargflags & FUSE_READDIRPLUS_AUTO)
1988  se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO;
1989  if (inargflags & FUSE_ASYNC_DIO)
1990  se->conn.capable |= FUSE_CAP_ASYNC_DIO;
1991  if (inargflags & FUSE_WRITEBACK_CACHE)
1992  se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
1993  if (inargflags & FUSE_NO_OPEN_SUPPORT)
1994  se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT;
1995  if (inargflags & FUSE_PARALLEL_DIROPS)
1996  se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
1997  if (inargflags & FUSE_POSIX_ACL)
1998  se->conn.capable |= FUSE_CAP_POSIX_ACL;
1999  if (inargflags & FUSE_HANDLE_KILLPRIV)
2000  se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV;
2001  if (inargflags & FUSE_CACHE_SYMLINKS)
2002  se->conn.capable |= FUSE_CAP_CACHE_SYMLINKS;
2003  if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
2004  se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2005  if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2006  se->conn.capable |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2007  if (inargflags & FUSE_SETXATTR_EXT)
2008  se->conn.capable |= FUSE_CAP_SETXATTR_EXT;
2009  if (!(inargflags & FUSE_MAX_PAGES)) {
2010  size_t max_bufsize =
2011  FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2012  + FUSE_BUFFER_HEADER_SIZE;
2013  if (bufsize > max_bufsize) {
2014  bufsize = max_bufsize;
2015  }
2016  }
2017  if (arg->minor >= 38)
2018  se->conn.capable |= FUSE_CAP_EXPIRE_ONLY;
2019  } else {
2020  se->conn.max_readahead = 0;
2021  }
2022 
2023  if (se->conn.proto_minor >= 14) {
2024 #ifdef HAVE_SPLICE
2025 #ifdef HAVE_VMSPLICE
2026  if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2027  se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
2028  }
2029 #endif
2030  if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2031  se->conn.capable |= FUSE_CAP_SPLICE_READ;
2032  }
2033 #endif
2034  }
2035  if (se->conn.proto_minor >= 18)
2036  se->conn.capable |= FUSE_CAP_IOCTL_DIR;
2037 
2038  /* Default settings for modern filesystems.
2039  *
2040  * Most of these capabilities were disabled by default in
2041  * libfuse2 for backwards compatibility reasons. In libfuse3,
2042  * we can finally enable them by default (as long as they're
2043  * supported by the kernel).
2044  */
2045 #define LL_SET_DEFAULT(cond, cap) \
2046  if ((cond) && (se->conn.capable & (cap))) \
2047  se->conn.want |= (cap)
2048  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2049  LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS);
2050  LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2051  LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV);
2052  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2053  LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2054  LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2055  LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2056  LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2058  LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2059  LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2060  LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2062 
2063  /* This could safely become default, but libfuse needs an API extension
2064  * to support it
2065  * LL_SET_DEFAULT(1, FUSE_CAP_SETXATTR_EXT);
2066  */
2067 
2068  se->conn.time_gran = 1;
2069 
2070  if (bufsize < FUSE_MIN_READ_BUFFER) {
2071  fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n",
2072  bufsize);
2073  bufsize = FUSE_MIN_READ_BUFFER;
2074  }
2075  se->bufsize = bufsize;
2076 
2077  if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE)
2078  se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE;
2079 
2080  se->got_init = 1;
2081  if (se->op.init)
2082  se->op.init(se->userdata, &se->conn);
2083 
2084  if (se->conn.want & (~se->conn.capable)) {
2085  fuse_log(FUSE_LOG_ERR, "fuse: error: filesystem requested capabilities "
2086  "0x%x that are not supported by kernel, aborting.\n",
2087  se->conn.want & (~se->conn.capable));
2088  fuse_reply_err(req, EPROTO);
2089  se->error = -EPROTO;
2090  fuse_session_exit(se);
2091  return;
2092  }
2093 
2094  unsigned max_read_mo = get_max_read(se->mo);
2095  if (se->conn.max_read != max_read_mo) {
2096  fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2097  "requested different maximum read size (%u vs %u)\n",
2098  se->conn.max_read, max_read_mo);
2099  fuse_reply_err(req, EPROTO);
2100  se->error = -EPROTO;
2101  fuse_session_exit(se);
2102  return;
2103  }
2104 
2105  if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) {
2106  se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2107  }
2108  if (arg->flags & FUSE_MAX_PAGES) {
2109  outarg.flags |= FUSE_MAX_PAGES;
2110  outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2111  }
2112  outargflags = outarg.flags;
2113  /* Always enable big writes, this is superseded
2114  by the max_write option */
2115  outargflags |= FUSE_BIG_WRITES;
2116 
2117  if (se->conn.want & FUSE_CAP_ASYNC_READ)
2118  outargflags |= FUSE_ASYNC_READ;
2119  if (se->conn.want & FUSE_CAP_POSIX_LOCKS)
2120  outargflags |= FUSE_POSIX_LOCKS;
2121  if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC)
2122  outargflags |= FUSE_ATOMIC_O_TRUNC;
2123  if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT)
2124  outargflags |= FUSE_EXPORT_SUPPORT;
2125  if (se->conn.want & FUSE_CAP_DONT_MASK)
2126  outargflags |= FUSE_DONT_MASK;
2127  if (se->conn.want & FUSE_CAP_FLOCK_LOCKS)
2128  outargflags |= FUSE_FLOCK_LOCKS;
2129  if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA)
2130  outargflags |= FUSE_AUTO_INVAL_DATA;
2131  if (se->conn.want & FUSE_CAP_READDIRPLUS)
2132  outargflags |= FUSE_DO_READDIRPLUS;
2133  if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO)
2134  outargflags |= FUSE_READDIRPLUS_AUTO;
2135  if (se->conn.want & FUSE_CAP_ASYNC_DIO)
2136  outargflags |= FUSE_ASYNC_DIO;
2137  if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE)
2138  outargflags |= FUSE_WRITEBACK_CACHE;
2139  if (se->conn.want & FUSE_CAP_POSIX_ACL)
2140  outargflags |= FUSE_POSIX_ACL;
2141  if (se->conn.want & FUSE_CAP_CACHE_SYMLINKS)
2142  outargflags |= FUSE_CACHE_SYMLINKS;
2143  if (se->conn.want & FUSE_CAP_EXPLICIT_INVAL_DATA)
2144  outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2145  if (se->conn.want & FUSE_CAP_SETXATTR_EXT)
2146  outargflags |= FUSE_SETXATTR_EXT;
2147 
2148  if (inargflags & FUSE_INIT_EXT) {
2149  outargflags |= FUSE_INIT_EXT;
2150  outarg.flags2 = outargflags >> 32;
2151  }
2152 
2153  outarg.flags = outargflags;
2154 
2155  outarg.max_readahead = se->conn.max_readahead;
2156  outarg.max_write = se->conn.max_write;
2157  if (se->conn.proto_minor >= 13) {
2158  if (se->conn.max_background >= (1 << 16))
2159  se->conn.max_background = (1 << 16) - 1;
2160  if (se->conn.congestion_threshold > se->conn.max_background)
2161  se->conn.congestion_threshold = se->conn.max_background;
2162  if (!se->conn.congestion_threshold) {
2163  se->conn.congestion_threshold =
2164  se->conn.max_background * 3 / 4;
2165  }
2166 
2167  outarg.max_background = se->conn.max_background;
2168  outarg.congestion_threshold = se->conn.congestion_threshold;
2169  }
2170  if (se->conn.proto_minor >= 23)
2171  outarg.time_gran = se->conn.time_gran;
2172 
2173  if (se->debug) {
2174  fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2175  fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2176  fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2177  outarg.max_readahead);
2178  fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2179  fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2180  outarg.max_background);
2181  fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2182  outarg.congestion_threshold);
2183  fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2184  outarg.time_gran);
2185  }
2186  if (arg->minor < 5)
2187  outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2188  else if (arg->minor < 23)
2189  outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2190 
2191  send_reply_ok(req, &outarg, outargsize);
2192 }
2193 
2194 static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2195 {
2196  struct fuse_session *se = req->se;
2197 
2198  (void) nodeid;
2199  (void) inarg;
2200 
2201  se->got_destroy = 1;
2202  if (se->op.destroy)
2203  se->op.destroy(se->userdata);
2204 
2205  send_reply_ok(req, NULL, 0);
2206 }
2207 
2208 static void list_del_nreq(struct fuse_notify_req *nreq)
2209 {
2210  struct fuse_notify_req *prev = nreq->prev;
2211  struct fuse_notify_req *next = nreq->next;
2212  prev->next = next;
2213  next->prev = prev;
2214 }
2215 
2216 static void list_add_nreq(struct fuse_notify_req *nreq,
2217  struct fuse_notify_req *next)
2218 {
2219  struct fuse_notify_req *prev = next->prev;
2220  nreq->next = next;
2221  nreq->prev = prev;
2222  prev->next = nreq;
2223  next->prev = nreq;
2224 }
2225 
2226 static void list_init_nreq(struct fuse_notify_req *nreq)
2227 {
2228  nreq->next = nreq;
2229  nreq->prev = nreq;
2230 }
2231 
2232 static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2233  const void *inarg, const struct fuse_buf *buf)
2234 {
2235  struct fuse_session *se = req->se;
2236  struct fuse_notify_req *nreq;
2237  struct fuse_notify_req *head;
2238 
2239  pthread_mutex_lock(&se->lock);
2240  head = &se->notify_list;
2241  for (nreq = head->next; nreq != head; nreq = nreq->next) {
2242  if (nreq->unique == req->unique) {
2243  list_del_nreq(nreq);
2244  break;
2245  }
2246  }
2247  pthread_mutex_unlock(&se->lock);
2248 
2249  if (nreq != head)
2250  nreq->reply(nreq, req, nodeid, inarg, buf);
2251 }
2252 
2253 static int send_notify_iov(struct fuse_session *se, int notify_code,
2254  struct iovec *iov, int count)
2255 {
2256  struct fuse_out_header out;
2257 
2258  if (!se->got_init)
2259  return -ENOTCONN;
2260 
2261  out.unique = 0;
2262  out.error = notify_code;
2263  iov[0].iov_base = &out;
2264  iov[0].iov_len = sizeof(struct fuse_out_header);
2265 
2266  return fuse_send_msg(se, NULL, iov, count);
2267 }
2268 
2269 int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2270 {
2271  if (ph != NULL) {
2272  struct fuse_notify_poll_wakeup_out outarg;
2273  struct iovec iov[2];
2274 
2275  outarg.kh = ph->kh;
2276 
2277  iov[1].iov_base = &outarg;
2278  iov[1].iov_len = sizeof(outarg);
2279 
2280  return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2281  } else {
2282  return 0;
2283  }
2284 }
2285 
2286 int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2287  off_t off, off_t len)
2288 {
2289  struct fuse_notify_inval_inode_out outarg;
2290  struct iovec iov[2];
2291 
2292  if (!se)
2293  return -EINVAL;
2294 
2295  if (se->conn.proto_minor < 12)
2296  return -ENOSYS;
2297 
2298  outarg.ino = ino;
2299  outarg.off = off;
2300  outarg.len = len;
2301 
2302  iov[1].iov_base = &outarg;
2303  iov[1].iov_len = sizeof(outarg);
2304 
2305  return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2306 }
2307 
2308 int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
2309  const char *name, size_t namelen,
2310  enum fuse_expire_flags flags)
2311 {
2312  struct fuse_notify_inval_entry_out outarg;
2313  struct iovec iov[3];
2314 
2315  if (!se)
2316  return -EINVAL;
2317 
2318  if (se->conn.proto_minor < 12)
2319  return -ENOSYS;
2320 
2321  outarg.parent = parent;
2322  outarg.namelen = namelen;
2323  outarg.flags = 0;
2324  if (flags & FUSE_LL_EXPIRE_ONLY)
2325  outarg.flags |= FUSE_EXPIRE_ONLY;
2326 
2327  iov[1].iov_base = &outarg;
2328  iov[1].iov_len = sizeof(outarg);
2329  iov[2].iov_base = (void *)name;
2330  iov[2].iov_len = namelen + 1;
2331 
2332  return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2333 }
2334 
2335 int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2336  const char *name, size_t namelen)
2337 {
2338  return fuse_lowlevel_notify_expire_entry(se, parent, name, namelen, 0);
2339 }
2340 
2341 
2342 int fuse_lowlevel_notify_delete(struct fuse_session *se,
2343  fuse_ino_t parent, fuse_ino_t child,
2344  const char *name, size_t namelen)
2345 {
2346  struct fuse_notify_delete_out outarg;
2347  struct iovec iov[3];
2348 
2349  if (!se)
2350  return -EINVAL;
2351 
2352  if (se->conn.proto_minor < 18)
2353  return -ENOSYS;
2354 
2355  outarg.parent = parent;
2356  outarg.child = child;
2357  outarg.namelen = namelen;
2358  outarg.padding = 0;
2359 
2360  iov[1].iov_base = &outarg;
2361  iov[1].iov_len = sizeof(outarg);
2362  iov[2].iov_base = (void *)name;
2363  iov[2].iov_len = namelen + 1;
2364 
2365  return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2366 }
2367 
2368 int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2369  off_t offset, struct fuse_bufvec *bufv,
2370  enum fuse_buf_copy_flags flags)
2371 {
2372  struct fuse_out_header out;
2373  struct fuse_notify_store_out outarg;
2374  struct iovec iov[3];
2375  size_t size = fuse_buf_size(bufv);
2376  int res;
2377 
2378  if (!se)
2379  return -EINVAL;
2380 
2381  if (se->conn.proto_minor < 15)
2382  return -ENOSYS;
2383 
2384  out.unique = 0;
2385  out.error = FUSE_NOTIFY_STORE;
2386 
2387  outarg.nodeid = ino;
2388  outarg.offset = offset;
2389  outarg.size = size;
2390  outarg.padding = 0;
2391 
2392  iov[0].iov_base = &out;
2393  iov[0].iov_len = sizeof(out);
2394  iov[1].iov_base = &outarg;
2395  iov[1].iov_len = sizeof(outarg);
2396 
2397  res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2398  if (res > 0)
2399  res = -res;
2400 
2401  return res;
2402 }
2403 
2404 struct fuse_retrieve_req {
2405  struct fuse_notify_req nreq;
2406  void *cookie;
2407 };
2408 
2409 static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2410  fuse_req_t req, fuse_ino_t ino,
2411  const void *inarg,
2412  const struct fuse_buf *ibuf)
2413 {
2414  struct fuse_session *se = req->se;
2415  struct fuse_retrieve_req *rreq =
2416  container_of(nreq, struct fuse_retrieve_req, nreq);
2417  const struct fuse_notify_retrieve_in *arg = inarg;
2418  struct fuse_bufvec bufv = {
2419  .buf[0] = *ibuf,
2420  .count = 1,
2421  };
2422 
2423  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2424  bufv.buf[0].mem = PARAM(arg);
2425 
2426  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2427  sizeof(struct fuse_notify_retrieve_in);
2428 
2429  if (bufv.buf[0].size < arg->size) {
2430  fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2431  fuse_reply_none(req);
2432  goto out;
2433  }
2434  bufv.buf[0].size = arg->size;
2435 
2436  if (se->op.retrieve_reply) {
2437  se->op.retrieve_reply(req, rreq->cookie, ino,
2438  arg->offset, &bufv);
2439  } else {
2440  fuse_reply_none(req);
2441  }
2442 out:
2443  free(rreq);
2444  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2445  fuse_ll_clear_pipe(se);
2446 }
2447 
2448 int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2449  size_t size, off_t offset, void *cookie)
2450 {
2451  struct fuse_notify_retrieve_out outarg;
2452  struct iovec iov[2];
2453  struct fuse_retrieve_req *rreq;
2454  int err;
2455 
2456  if (!se)
2457  return -EINVAL;
2458 
2459  if (se->conn.proto_minor < 15)
2460  return -ENOSYS;
2461 
2462  rreq = malloc(sizeof(*rreq));
2463  if (rreq == NULL)
2464  return -ENOMEM;
2465 
2466  pthread_mutex_lock(&se->lock);
2467  rreq->cookie = cookie;
2468  rreq->nreq.unique = se->notify_ctr++;
2469  rreq->nreq.reply = fuse_ll_retrieve_reply;
2470  list_add_nreq(&rreq->nreq, &se->notify_list);
2471  pthread_mutex_unlock(&se->lock);
2472 
2473  outarg.notify_unique = rreq->nreq.unique;
2474  outarg.nodeid = ino;
2475  outarg.offset = offset;
2476  outarg.size = size;
2477  outarg.padding = 0;
2478 
2479  iov[1].iov_base = &outarg;
2480  iov[1].iov_len = sizeof(outarg);
2481 
2482  err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2483  if (err) {
2484  pthread_mutex_lock(&se->lock);
2485  list_del_nreq(&rreq->nreq);
2486  pthread_mutex_unlock(&se->lock);
2487  free(rreq);
2488  }
2489 
2490  return err;
2491 }
2492 
2493 void *fuse_req_userdata(fuse_req_t req)
2494 {
2495  return req->se->userdata;
2496 }
2497 
2498 const struct fuse_ctx *fuse_req_ctx(fuse_req_t req)
2499 {
2500  return &req->ctx;
2501 }
2502 
2504  void *data)
2505 {
2506  pthread_mutex_lock(&req->lock);
2507  pthread_mutex_lock(&req->se->lock);
2508  req->u.ni.func = func;
2509  req->u.ni.data = data;
2510  pthread_mutex_unlock(&req->se->lock);
2511  if (req->interrupted && func)
2512  func(req, data);
2513  pthread_mutex_unlock(&req->lock);
2514 }
2515 
2517 {
2518  int interrupted;
2519 
2520  pthread_mutex_lock(&req->se->lock);
2521  interrupted = req->interrupted;
2522  pthread_mutex_unlock(&req->se->lock);
2523 
2524  return interrupted;
2525 }
2526 
2527 static struct {
2528  void (*func)(fuse_req_t, fuse_ino_t, const void *);
2529  const char *name;
2530 } fuse_ll_ops[] = {
2531  [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2532  [FUSE_FORGET] = { do_forget, "FORGET" },
2533  [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2534  [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2535  [FUSE_READLINK] = { do_readlink, "READLINK" },
2536  [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2537  [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2538  [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2539  [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2540  [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2541  [FUSE_RENAME] = { do_rename, "RENAME" },
2542  [FUSE_LINK] = { do_link, "LINK" },
2543  [FUSE_OPEN] = { do_open, "OPEN" },
2544  [FUSE_READ] = { do_read, "READ" },
2545  [FUSE_WRITE] = { do_write, "WRITE" },
2546  [FUSE_STATFS] = { do_statfs, "STATFS" },
2547  [FUSE_RELEASE] = { do_release, "RELEASE" },
2548  [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2549  [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2550  [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2551  [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2552  [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2553  [FUSE_FLUSH] = { do_flush, "FLUSH" },
2554  [FUSE_INIT] = { do_init, "INIT" },
2555  [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2556  [FUSE_READDIR] = { do_readdir, "READDIR" },
2557  [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2558  [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2559  [FUSE_GETLK] = { do_getlk, "GETLK" },
2560  [FUSE_SETLK] = { do_setlk, "SETLK" },
2561  [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2562  [FUSE_ACCESS] = { do_access, "ACCESS" },
2563  [FUSE_CREATE] = { do_create, "CREATE" },
2564  [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2565  [FUSE_BMAP] = { do_bmap, "BMAP" },
2566  [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2567  [FUSE_POLL] = { do_poll, "POLL" },
2568  [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2569  [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2570  [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2571  [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2572  [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2573  [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2574  [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2575  [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2576  [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2577 };
2578 
2579 #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2580 
2581 static const char *opname(enum fuse_opcode opcode)
2582 {
2583  if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2584  return "???";
2585  else
2586  return fuse_ll_ops[opcode].name;
2587 }
2588 
2589 static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2590  struct fuse_bufvec *src)
2591 {
2592  ssize_t res = fuse_buf_copy(dst, src, 0);
2593  if (res < 0) {
2594  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2595  return res;
2596  }
2597  if ((size_t)res < fuse_buf_size(dst)) {
2598  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2599  return -1;
2600  }
2601  return 0;
2602 }
2603 
2604 void fuse_session_process_buf(struct fuse_session *se,
2605  const struct fuse_buf *buf)
2606 {
2607  fuse_session_process_buf_int(se, buf, NULL);
2608 }
2609 
2610 void fuse_session_process_buf_int(struct fuse_session *se,
2611  const struct fuse_buf *buf, struct fuse_chan *ch)
2612 {
2613  const size_t write_header_size = sizeof(struct fuse_in_header) +
2614  sizeof(struct fuse_write_in);
2615  struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2616  struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2617  struct fuse_in_header *in;
2618  const void *inarg;
2619  struct fuse_req *req;
2620  void *mbuf = NULL;
2621  int err;
2622  int res;
2623 
2624  if (buf->flags & FUSE_BUF_IS_FD) {
2625  if (buf->size < tmpbuf.buf[0].size)
2626  tmpbuf.buf[0].size = buf->size;
2627 
2628  mbuf = malloc(tmpbuf.buf[0].size);
2629  if (mbuf == NULL) {
2630  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2631  goto clear_pipe;
2632  }
2633  tmpbuf.buf[0].mem = mbuf;
2634 
2635  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2636  if (res < 0)
2637  goto clear_pipe;
2638 
2639  in = mbuf;
2640  } else {
2641  in = buf->mem;
2642  }
2643 
2644  if (se->debug) {
2645  fuse_log(FUSE_LOG_DEBUG,
2646  "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2647  (unsigned long long) in->unique,
2648  opname((enum fuse_opcode) in->opcode), in->opcode,
2649  (unsigned long long) in->nodeid, buf->size, in->pid);
2650  }
2651 
2652  req = fuse_ll_alloc_req(se);
2653  if (req == NULL) {
2654  struct fuse_out_header out = {
2655  .unique = in->unique,
2656  .error = -ENOMEM,
2657  };
2658  struct iovec iov = {
2659  .iov_base = &out,
2660  .iov_len = sizeof(struct fuse_out_header),
2661  };
2662 
2663  fuse_send_msg(se, ch, &iov, 1);
2664  goto clear_pipe;
2665  }
2666 
2667  req->unique = in->unique;
2668  req->ctx.uid = in->uid;
2669  req->ctx.gid = in->gid;
2670  req->ctx.pid = in->pid;
2671  req->ch = ch ? fuse_chan_get(ch) : NULL;
2672 
2673  err = EIO;
2674  if (!se->got_init) {
2675  enum fuse_opcode expected;
2676 
2677  expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2678  if (in->opcode != expected)
2679  goto reply_err;
2680  } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2681  goto reply_err;
2682 
2683  err = EACCES;
2684  /* Implement -o allow_root */
2685  if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2686  in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2687  in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2688  in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2689  in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2690  in->opcode != FUSE_NOTIFY_REPLY &&
2691  in->opcode != FUSE_READDIRPLUS)
2692  goto reply_err;
2693 
2694  err = ENOSYS;
2695  if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2696  goto reply_err;
2697  if (in->opcode != FUSE_INTERRUPT) {
2698  struct fuse_req *intr;
2699  pthread_mutex_lock(&se->lock);
2700  intr = check_interrupt(se, req);
2701  list_add_req(req, &se->list);
2702  pthread_mutex_unlock(&se->lock);
2703  if (intr)
2704  fuse_reply_err(intr, EAGAIN);
2705  }
2706 
2707  if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
2708  (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
2709  in->opcode != FUSE_NOTIFY_REPLY) {
2710  void *newmbuf;
2711 
2712  err = ENOMEM;
2713  newmbuf = realloc(mbuf, buf->size);
2714  if (newmbuf == NULL)
2715  goto reply_err;
2716  mbuf = newmbuf;
2717 
2718  tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
2719  tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
2720 
2721  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2722  err = -res;
2723  if (res < 0)
2724  goto reply_err;
2725 
2726  in = mbuf;
2727  }
2728 
2729  inarg = (void *) &in[1];
2730  if (in->opcode == FUSE_WRITE && se->op.write_buf)
2731  do_write_buf(req, in->nodeid, inarg, buf);
2732  else if (in->opcode == FUSE_NOTIFY_REPLY)
2733  do_notify_reply(req, in->nodeid, inarg, buf);
2734  else
2735  fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
2736 
2737 out_free:
2738  free(mbuf);
2739  return;
2740 
2741 reply_err:
2742  fuse_reply_err(req, err);
2743 clear_pipe:
2744  if (buf->flags & FUSE_BUF_IS_FD)
2745  fuse_ll_clear_pipe(se);
2746  goto out_free;
2747 }
2748 
2749 #define LL_OPTION(n,o,v) \
2750  { n, offsetof(struct fuse_session, o), v }
2751 
2752 static const struct fuse_opt fuse_ll_opts[] = {
2753  LL_OPTION("debug", debug, 1),
2754  LL_OPTION("-d", debug, 1),
2755  LL_OPTION("--debug", debug, 1),
2756  LL_OPTION("allow_root", deny_others, 1),
2757  FUSE_OPT_END
2758 };
2759 
2760 void fuse_lowlevel_version(void)
2761 {
2762  printf("using FUSE kernel interface version %i.%i\n",
2763  FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2764  fuse_mount_version();
2765 }
2766 
2767 void fuse_lowlevel_help(void)
2768 {
2769  /* These are not all options, but the ones that are
2770  potentially of interest to an end-user */
2771  printf(
2772 " -o allow_other allow access by all users\n"
2773 " -o allow_root allow access by root\n"
2774 " -o auto_unmount auto unmount on process termination\n");
2775 }
2776 
2777 void fuse_session_destroy(struct fuse_session *se)
2778 {
2779  struct fuse_ll_pipe *llp;
2780 
2781  if (se->got_init && !se->got_destroy) {
2782  if (se->op.destroy)
2783  se->op.destroy(se->userdata);
2784  }
2785  llp = pthread_getspecific(se->pipe_key);
2786  if (llp != NULL)
2787  fuse_ll_pipe_free(llp);
2788  pthread_key_delete(se->pipe_key);
2789  pthread_mutex_destroy(&se->lock);
2790  free(se->cuse_data);
2791  if (se->fd != -1)
2792  close(se->fd);
2793  if (se->io != NULL)
2794  free(se->io);
2795  destroy_mount_opts(se->mo);
2796  free(se);
2797 }
2798 
2799 
2800 static void fuse_ll_pipe_destructor(void *data)
2801 {
2802  struct fuse_ll_pipe *llp = data;
2803  fuse_ll_pipe_free(llp);
2804 }
2805 
2806 int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
2807 {
2808  return fuse_session_receive_buf_int(se, buf, NULL);
2809 }
2810 
2811 int fuse_session_receive_buf_int(struct fuse_session *se, struct fuse_buf *buf,
2812  struct fuse_chan *ch)
2813 {
2814  int err;
2815  ssize_t res;
2816 #ifdef HAVE_SPLICE
2817  size_t bufsize = se->bufsize;
2818  struct fuse_ll_pipe *llp;
2819  struct fuse_buf tmpbuf;
2820 
2821  if (se->conn.proto_minor < 14 || !(se->conn.want & FUSE_CAP_SPLICE_READ))
2822  goto fallback;
2823 
2824  llp = fuse_ll_get_pipe(se);
2825  if (llp == NULL)
2826  goto fallback;
2827 
2828  if (llp->size < bufsize) {
2829  if (llp->can_grow) {
2830  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
2831  if (res == -1) {
2832  llp->can_grow = 0;
2833  res = grow_pipe_to_max(llp->pipe[0]);
2834  if (res > 0)
2835  llp->size = res;
2836  goto fallback;
2837  }
2838  llp->size = res;
2839  }
2840  if (llp->size < bufsize)
2841  goto fallback;
2842  }
2843 
2844  if (se->io != NULL && se->io->splice_receive != NULL) {
2845  res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
2846  llp->pipe[1], NULL, bufsize, 0,
2847  se->userdata);
2848  } else {
2849  res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
2850  bufsize, 0);
2851  }
2852  err = errno;
2853 
2854  if (fuse_session_exited(se))
2855  return 0;
2856 
2857  if (res == -1) {
2858  if (err == ENODEV) {
2859  /* Filesystem was unmounted, or connection was aborted
2860  via /sys/fs/fuse/connections */
2861  fuse_session_exit(se);
2862  return 0;
2863  }
2864  if (err != EINTR && err != EAGAIN)
2865  perror("fuse: splice from device");
2866  return -err;
2867  }
2868 
2869  if (res < sizeof(struct fuse_in_header)) {
2870  fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
2871  return -EIO;
2872  }
2873 
2874  tmpbuf = (struct fuse_buf) {
2875  .size = res,
2876  .flags = FUSE_BUF_IS_FD,
2877  .fd = llp->pipe[0],
2878  };
2879 
2880  /*
2881  * Don't bother with zero copy for small requests.
2882  * fuse_loop_mt() needs to check for FORGET so this more than
2883  * just an optimization.
2884  */
2885  if (res < sizeof(struct fuse_in_header) +
2886  sizeof(struct fuse_write_in) + pagesize) {
2887  struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
2888  struct fuse_bufvec dst = { .count = 1 };
2889 
2890  if (!buf->mem) {
2891  buf->mem = malloc(se->bufsize);
2892  if (!buf->mem) {
2893  fuse_log(FUSE_LOG_ERR,
2894  "fuse: failed to allocate read buffer\n");
2895  return -ENOMEM;
2896  }
2897  }
2898  buf->size = se->bufsize;
2899  buf->flags = 0;
2900  dst.buf[0] = *buf;
2901 
2902  res = fuse_buf_copy(&dst, &src, 0);
2903  if (res < 0) {
2904  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
2905  strerror(-res));
2906  fuse_ll_clear_pipe(se);
2907  return res;
2908  }
2909  if (res < tmpbuf.size) {
2910  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2911  fuse_ll_clear_pipe(se);
2912  return -EIO;
2913  }
2914  assert(res == tmpbuf.size);
2915 
2916  } else {
2917  /* Don't overwrite buf->mem, as that would cause a leak */
2918  buf->fd = tmpbuf.fd;
2919  buf->flags = tmpbuf.flags;
2920  }
2921  buf->size = tmpbuf.size;
2922 
2923  return res;
2924 
2925 fallback:
2926 #endif
2927  if (!buf->mem) {
2928  buf->mem = malloc(se->bufsize);
2929  if (!buf->mem) {
2930  fuse_log(FUSE_LOG_ERR,
2931  "fuse: failed to allocate read buffer\n");
2932  return -ENOMEM;
2933  }
2934  }
2935 
2936 restart:
2937  if (se->io != NULL) {
2938  /* se->io->read is never NULL if se->io is not NULL as
2939  specified by fuse_session_custom_io()*/
2940  res = se->io->read(ch ? ch->fd : se->fd, buf->mem, se->bufsize,
2941  se->userdata);
2942  } else {
2943  res = read(ch ? ch->fd : se->fd, buf->mem, se->bufsize);
2944  }
2945  err = errno;
2946 
2947  if (fuse_session_exited(se))
2948  return 0;
2949  if (res == -1) {
2950  /* ENOENT means the operation was interrupted, it's safe
2951  to restart */
2952  if (err == ENOENT)
2953  goto restart;
2954 
2955  if (err == ENODEV) {
2956  /* Filesystem was unmounted, or connection was aborted
2957  via /sys/fs/fuse/connections */
2958  fuse_session_exit(se);
2959  return 0;
2960  }
2961  /* Errors occurring during normal operation: EINTR (read
2962  interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
2963  umounted) */
2964  if (err != EINTR && err != EAGAIN)
2965  perror("fuse: reading device");
2966  return -err;
2967  }
2968  if ((size_t) res < sizeof(struct fuse_in_header)) {
2969  fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
2970  return -EIO;
2971  }
2972 
2973  buf->size = res;
2974 
2975  return res;
2976 }
2977 
2978 struct fuse_session *fuse_session_new(struct fuse_args *args,
2979  const struct fuse_lowlevel_ops *op,
2980  size_t op_size, void *userdata)
2981 {
2982  int err;
2983  struct fuse_session *se;
2984  struct mount_opts *mo;
2985 
2986  if (sizeof(struct fuse_lowlevel_ops) < op_size) {
2987  fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
2988  op_size = sizeof(struct fuse_lowlevel_ops);
2989  }
2990 
2991  if (args->argc == 0) {
2992  fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
2993  return NULL;
2994  }
2995 
2996  se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
2997  if (se == NULL) {
2998  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
2999  goto out1;
3000  }
3001  se->fd = -1;
3002  se->conn.max_write = UINT_MAX;
3003  se->conn.max_readahead = UINT_MAX;
3004 
3005  /* Parse options */
3006  if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
3007  goto out2;
3008  if(se->deny_others) {
3009  /* Allowing access only by root is done by instructing
3010  * kernel to allow access by everyone, and then restricting
3011  * access to root and mountpoint owner in libfuse.
3012  */
3013  // We may be adding the option a second time, but
3014  // that doesn't hurt.
3015  if(fuse_opt_add_arg(args, "-oallow_other") == -1)
3016  goto out2;
3017  }
3018  mo = parse_mount_opts(args);
3019  if (mo == NULL)
3020  goto out3;
3021 
3022  if(args->argc == 1 &&
3023  args->argv[0][0] == '-') {
3024  fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
3025  "will be ignored\n");
3026  } else if (args->argc != 1) {
3027  int i;
3028  fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
3029  for(i = 1; i < args->argc-1; i++)
3030  fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
3031  fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
3032  goto out4;
3033  }
3034 
3035  if (se->debug)
3036  fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
3037 
3038  se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() +
3039  FUSE_BUFFER_HEADER_SIZE;
3040 
3041  list_init_req(&se->list);
3042  list_init_req(&se->interrupts);
3043  list_init_nreq(&se->notify_list);
3044  se->notify_ctr = 1;
3045  pthread_mutex_init(&se->lock, NULL);
3046 
3047  err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
3048  if (err) {
3049  fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
3050  strerror(err));
3051  goto out5;
3052  }
3053 
3054  memcpy(&se->op, op, op_size);
3055  se->owner = getuid();
3056  se->userdata = userdata;
3057 
3058  se->mo = mo;
3059  return se;
3060 
3061 out5:
3062  pthread_mutex_destroy(&se->lock);
3063 out4:
3064  fuse_opt_free_args(args);
3065 out3:
3066  if (mo != NULL)
3067  destroy_mount_opts(mo);
3068 out2:
3069  free(se);
3070 out1:
3071  return NULL;
3072 }
3073 
3074 int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io,
3075  int fd)
3076 {
3077  if (fd < 0) {
3078  fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
3079  "fuse_session_custom_io()\n", fd);
3080  return -EBADF;
3081  }
3082  if (io == NULL) {
3083  fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
3084  "fuse_session_custom_io()\n");
3085  return -EINVAL;
3086  } else if (io->read == NULL || io->writev == NULL) {
3087  /* If the user provides their own file descriptor, we can't
3088  guarantee that the default behavior of the io operations made
3089  in libfuse will function properly. Therefore, we enforce the
3090  user to implement these io operations when using custom io. */
3091  fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
3092  "implement both io->read() and io->writev\n");
3093  return -EINVAL;
3094  }
3095 
3096  se->io = malloc(sizeof(struct fuse_custom_io));
3097  if (se->io == NULL) {
3098  fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
3099  "Error: %s\n", strerror(errno));
3100  return -errno;
3101  }
3102 
3103  se->fd = fd;
3104  *se->io = *io;
3105  return 0;
3106 }
3107 
3108 int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
3109 {
3110  int fd;
3111 
3112  /*
3113  * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
3114  * would ensue.
3115  */
3116  do {
3117  fd = open("/dev/null", O_RDWR);
3118  if (fd > 2)
3119  close(fd);
3120  } while (fd >= 0 && fd <= 2);
3121 
3122  /*
3123  * To allow FUSE daemons to run without privileges, the caller may open
3124  * /dev/fuse before launching the file system and pass on the file
3125  * descriptor by specifying /dev/fd/N as the mount point. Note that the
3126  * parent process takes care of performing the mount in this case.
3127  */
3128  fd = fuse_mnt_parse_fuse_fd(mountpoint);
3129  if (fd != -1) {
3130  if (fcntl(fd, F_GETFD) == -1) {
3131  fuse_log(FUSE_LOG_ERR,
3132  "fuse: Invalid file descriptor /dev/fd/%u\n",
3133  fd);
3134  return -1;
3135  }
3136  se->fd = fd;
3137  return 0;
3138  }
3139 
3140  /* Open channel */
3141  fd = fuse_kern_mount(mountpoint, se->mo);
3142  if (fd == -1)
3143  return -1;
3144  se->fd = fd;
3145 
3146  /* Save mountpoint */
3147  se->mountpoint = strdup(mountpoint);
3148  if (se->mountpoint == NULL)
3149  goto error_out;
3150 
3151  return 0;
3152 
3153 error_out:
3154  fuse_kern_unmount(mountpoint, fd);
3155  return -1;
3156 }
3157 
3158 int fuse_session_fd(struct fuse_session *se)
3159 {
3160  return se->fd;
3161 }
3162 
3163 void fuse_session_unmount(struct fuse_session *se)
3164 {
3165  if (se->mountpoint != NULL) {
3166  fuse_kern_unmount(se->mountpoint, se->fd);
3167  se->fd = -1;
3168  free(se->mountpoint);
3169  se->mountpoint = NULL;
3170  }
3171 }
3172 
3173 #ifdef linux
3174 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3175 {
3176  char *buf;
3177  size_t bufsize = 1024;
3178  char path[128];
3179  int ret;
3180  int fd;
3181  unsigned long pid = req->ctx.pid;
3182  char *s;
3183 
3184  sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3185 
3186 retry:
3187  buf = malloc(bufsize);
3188  if (buf == NULL)
3189  return -ENOMEM;
3190 
3191  ret = -EIO;
3192  fd = open(path, O_RDONLY);
3193  if (fd == -1)
3194  goto out_free;
3195 
3196  ret = read(fd, buf, bufsize);
3197  close(fd);
3198  if (ret < 0) {
3199  ret = -EIO;
3200  goto out_free;
3201  }
3202 
3203  if ((size_t)ret == bufsize) {
3204  free(buf);
3205  bufsize *= 4;
3206  goto retry;
3207  }
3208 
3209  ret = -EIO;
3210  s = strstr(buf, "\nGroups:");
3211  if (s == NULL)
3212  goto out_free;
3213 
3214  s += 8;
3215  ret = 0;
3216  while (1) {
3217  char *end;
3218  unsigned long val = strtoul(s, &end, 0);
3219  if (end == s)
3220  break;
3221 
3222  s = end;
3223  if (ret < size)
3224  list[ret] = val;
3225  ret++;
3226  }
3227 
3228 out_free:
3229  free(buf);
3230  return ret;
3231 }
3232 #else /* linux */
3233 /*
3234  * This is currently not implemented on other than Linux...
3235  */
3236 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3237 {
3238  (void) req; (void) size; (void) list;
3239  return -ENOSYS;
3240 }
3241 #endif
3242 
3243 /* Prevent spurious data race warning - we don't care
3244  * about races for this flag */
3245 __attribute__((no_sanitize_thread))
3246 void fuse_session_exit(struct fuse_session *se)
3247 {
3248  se->exited = 1;
3249 }
3250 
3251 __attribute__((no_sanitize_thread))
3252 void fuse_session_reset(struct fuse_session *se)
3253 {
3254  se->exited = 0;
3255  se->error = 0;
3256 }
3257 
3258 __attribute__((no_sanitize_thread))
3259 int fuse_session_exited(struct fuse_session *se)
3260 {
3261  return se->exited;
3262 }
#define FUSE_CAP_IOCTL_DIR
Definition: fuse_common.h:241
#define FUSE_CAP_DONT_MASK
Definition: fuse_common.h:196
#define FUSE_CAP_HANDLE_KILLPRIV
Definition: fuse_common.h:370
#define FUSE_CAP_AUTO_INVAL_DATA
Definition: fuse_common.h:263
#define FUSE_CAP_SPLICE_READ
Definition: fuse_common.h:221
#define FUSE_CAP_PARALLEL_DIROPS
Definition: fuse_common.h:342
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition: buffer.c:22
#define FUSE_CAP_WRITEBACK_CACHE
Definition: fuse_common.h:319
#define FUSE_CAP_EXPIRE_ONLY
Definition: fuse_common.h:434
#define FUSE_CAP_ATOMIC_O_TRUNC
Definition: fuse_common.h:181
#define FUSE_CAP_ASYNC_READ
Definition: fuse_common.h:164
#define FUSE_CAP_SPLICE_WRITE
Definition: fuse_common.h:204
#define FUSE_CAP_CACHE_SYMLINKS
Definition: fuse_common.h:383
#define FUSE_CAP_POSIX_ACL
Definition: fuse_common.h:361
@ FUSE_BUF_IS_FD
Definition: fuse_common.h:672
#define FUSE_CAP_EXPORT_SUPPORT
Definition: fuse_common.h:188
#define FUSE_CAP_POSIX_LOCKS
Definition: fuse_common.h:172
#define FUSE_CAP_EXPLICIT_INVAL_DATA
Definition: fuse_common.h:418
#define FUSE_CAP_READDIRPLUS_AUTO
Definition: fuse_common.h:299
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition: buffer.c:284
#define FUSE_CAP_NO_OPENDIR_SUPPORT
Definition: fuse_common.h:395
#define FUSE_CAP_ASYNC_DIO
Definition: fuse_common.h:310
#define FUSE_CAP_NO_OPEN_SUPPORT
Definition: fuse_common.h:332
#define FUSE_CAP_READDIRPLUS
Definition: fuse_common.h:271
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
fuse_buf_copy_flags
Definition: fuse_common.h:696
@ FUSE_BUF_SPLICE_NONBLOCK
Definition: fuse_common.h:732
@ FUSE_BUF_FORCE_SPLICE
Definition: fuse_common.h:714
@ FUSE_BUF_NO_SPLICE
Definition: fuse_common.h:706
@ FUSE_BUF_SPLICE_MOVE
Definition: fuse_common.h:723
#define FUSE_CAP_SPLICE_MOVE
Definition: fuse_common.h:212
#define FUSE_CAP_FLOCK_LOCKS
Definition: fuse_common.h:234
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition: fuse_log.c:33
void fuse_session_destroy(struct fuse_session *se)
fuse_expire_flags
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
Definition: fuse_lowlevel.h:49
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
int fuse_session_exited(struct fuse_session *se)
int fuse_session_fd(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_session_unmount(struct fuse_session *se)
void fuse_reply_none(fuse_req_t req)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
struct fuse_session * fuse_session_new(struct fuse_args *args, const struct fuse_lowlevel_ops *op, size_t op_size, void *userdata)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
void * fuse_req_userdata(fuse_req_t req)
int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen, enum fuse_expire_flags flags)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
void fuse_session_reset(struct fuse_session *se)
int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io, int fd)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
void fuse_lowlevel_version(void)
uint64_t fuse_ino_t
Definition: fuse_lowlevel.h:46
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition: fuse_opt.c:55
void fuse_opt_free_args(struct fuse_args *args)
Definition: fuse_opt.c:34
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition: fuse_opt.c:398
#define FUSE_OPT_END
Definition: fuse_opt.h:104
#define FUSE_CAP_SETXATTR_EXT
Definition: fuse_common.h:441
int argc
Definition: fuse_opt.h:111
char ** argv
Definition: fuse_opt.h:114
enum fuse_buf_flags flags
Definition: fuse_common.h:750
void * mem
Definition: fuse_common.h:757
size_t size
Definition: fuse_common.h:745
size_t off
Definition: fuse_common.h:796
struct fuse_buf buf[1]
Definition: fuse_common.h:801
size_t idx
Definition: fuse_common.h:791
size_t count
Definition: fuse_common.h:786
Definition: fuse_lowlevel.h:59
double entry_timeout
fuse_ino_t ino
Definition: fuse_lowlevel.h:67
uint64_t generation
Definition: fuse_lowlevel.h:79
double attr_timeout
Definition: fuse_lowlevel.h:94
struct stat attr
Definition: fuse_lowlevel.h:88
unsigned int direct_io
Definition: fuse_common.h:63
unsigned int keep_cache
Definition: fuse_common.h:69
unsigned int nonseekable
Definition: fuse_common.h:82
uint64_t lock_owner
Definition: fuse_common.h:109
uint32_t poll_events
Definition: fuse_common.h:113
unsigned int noflush
Definition: fuse_common.h:97
unsigned int writepage
Definition: fuse_common.h:60
unsigned int flush
Definition: fuse_common.h:78
unsigned int parallel_direct_writes
Definition: fuse_common.h:73
unsigned int cache_readdir
Definition: fuse_common.h:93