16#include "fuse_config.h"
18#include "fuse_lowlevel.h"
21#include "fuse_kernel.h"
45#define FUSE_NODE_SLAB 1
51#ifndef RENAME_EXCHANGE
52#define RENAME_EXCHANGE (1 << 1)
55#define FUSE_DEFAULT_INTR_SIGNAL SIGUSR1
57#define FUSE_UNKNOWN_INO 0xffffffff
58#define OFFSET_MAX 0x7fffffffffffffffLL
60#define NODE_TABLE_MIN_SIZE 8192
73struct lock_queue_element {
74 struct lock_queue_element *next;
95#define container_of(ptr, type, member) ({ \
96 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
97 (type *)( (char *)__mptr - offsetof(type,member) );})
99#define list_entry(ptr, type, member) \
100 container_of(ptr, type, member)
103 struct list_head *next;
104 struct list_head *prev;
108 struct list_head list;
109 struct list_head freelist;
114 struct fuse_session *se;
115 struct node_table name_table;
116 struct node_table id_table;
117 struct list_head lru_table;
119 unsigned int generation;
120 unsigned int hidectr;
121 pthread_mutex_t lock;
125 struct lock_queue_element *lockq;
127 struct list_head partial_slabs;
128 struct list_head full_slabs;
129 pthread_t prune_thread;
142 struct node *name_next;
143 struct node *id_next;
145 unsigned int generation;
151 struct timespec stat_updated;
152 struct timespec mtime;
155 unsigned int is_hidden : 1;
156 unsigned int cache_valid : 1;
158 char inline_name[32];
161#define TREELOCK_WRITE -1
162#define TREELOCK_WAIT_OFFSET INT_MIN
166 struct list_head lru;
167 struct timespec forget_time;
170struct fuse_direntry {
174 struct fuse_direntry *next;
178 pthread_mutex_t lock;
182 struct fuse_direntry *first;
183 struct fuse_direntry **last;
193struct fuse_context_i {
204static pthread_key_t fuse_context_key;
205static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
206static int fuse_context_ref;
209static int fuse_register_module(
const char *name,
211 struct fusemod_so *so)
217 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate module\n");
220 mod->name = strdup(name);
222 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate module name\n");
226 mod->factory = factory;
231 mod->next = fuse_modules;
237static void fuse_unregister_module(
struct fuse_module *m)
240 for (mp = &fuse_modules; *mp; mp = &(*mp)->next) {
250static int fuse_load_so_module(
const char *module)
254 struct fusemod_so *so;
257 tmp = malloc(strlen(module) + 64);
259 fuse_log(FUSE_LOG_ERR,
"fuse: memory allocation failed\n");
262 sprintf(tmp,
"libfusemod_%s.so", module);
263 so = calloc(1,
sizeof(
struct fusemod_so));
265 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate module so\n");
269 so->handle = dlopen(tmp, RTLD_NOW);
270 if (so->handle == NULL) {
271 fuse_log(FUSE_LOG_ERR,
"fuse: dlopen(%s) failed: %s\n",
276 sprintf(tmp,
"fuse_module_%s_factory", module);
278 if (factory == NULL) {
279 fuse_log(FUSE_LOG_ERR,
"fuse: symbol <%s> not found in module: %s\n",
283 ret = fuse_register_module(module, *factory, so);
298static struct fuse_module *fuse_find_module(
const char *module)
301 for (m = fuse_modules; m; m = m->next) {
302 if (strcmp(module, m->name) == 0) {
310static struct fuse_module *fuse_get_module(
const char *module)
314 pthread_mutex_lock(&fuse_context_lock);
315 m = fuse_find_module(module);
317 int err = fuse_load_so_module(module);
319 m = fuse_find_module(module);
321 pthread_mutex_unlock(&fuse_context_lock);
327 pthread_mutex_lock(&fuse_context_lock);
333 if (!m->ctr && m->so) {
334 struct fusemod_so *so = m->so;
339 for (mp = &fuse_modules; *mp;) {
341 fuse_unregister_module(*mp);
348 }
else if (!m->ctr) {
349 fuse_unregister_module(m);
351 pthread_mutex_unlock(&fuse_context_lock);
354static void init_list_head(
struct list_head *list)
360static int list_empty(
const struct list_head *head)
362 return head->next == head;
365static void list_add(
struct list_head *
new,
struct list_head *prev,
366 struct list_head *next)
374static inline void list_add_head(
struct list_head *
new,
struct list_head *head)
376 list_add(
new, head, head->next);
379static inline void list_add_tail(
struct list_head *
new,
struct list_head *head)
381 list_add(
new, head->prev, head);
384static inline void list_del(
struct list_head *entry)
386 struct list_head *prev = entry->prev;
387 struct list_head *next = entry->next;
393static inline int lru_enabled(
struct fuse *f)
395 return f->conf.remember > 0;
398static struct node_lru *node_lru(
struct node *node)
400 return (
struct node_lru *) node;
403static size_t get_node_size(
struct fuse *f)
406 return sizeof(
struct node_lru);
408 return sizeof(
struct node);
412static struct node_slab *list_to_slab(
struct list_head *head)
414 return (
struct node_slab *) head;
417static struct node_slab *node_to_slab(
struct fuse *f,
struct node *node)
419 return (
struct node_slab *) (((uintptr_t) node) & ~((uintptr_t) f->pagesize - 1));
422static int alloc_slab(
struct fuse *f)
425 struct node_slab *slab;
429 size_t node_size = get_node_size(f);
431 mem = mmap(NULL, f->pagesize, PROT_READ | PROT_WRITE,
432 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
434 if (mem == MAP_FAILED)
438 init_list_head(&slab->freelist);
440 num = (f->pagesize -
sizeof(
struct node_slab)) / node_size;
442 start = (
char *) mem + f->pagesize - num * node_size;
443 for (i = 0; i < num; i++) {
446 n = (
struct list_head *) (start + i * node_size);
447 list_add_tail(n, &slab->freelist);
449 list_add_tail(&slab->list, &f->partial_slabs);
454static struct node *alloc_node(
struct fuse *f)
456 struct node_slab *slab;
457 struct list_head *node;
459 if (list_empty(&f->partial_slabs)) {
460 int res = alloc_slab(f);
464 slab = list_to_slab(f->partial_slabs.next);
466 node = slab->freelist.next;
468 if (list_empty(&slab->freelist)) {
469 list_del(&slab->list);
470 list_add_tail(&slab->list, &f->full_slabs);
472 memset(node, 0,
sizeof(
struct node));
474 return (
struct node *) node;
477static void free_slab(
struct fuse *f,
struct node_slab *slab)
481 list_del(&slab->list);
482 res = munmap(slab, f->pagesize);
484 fuse_log(FUSE_LOG_WARNING,
"fuse warning: munmap(%p) failed\n",
488static void free_node_mem(
struct fuse *f,
struct node *node)
490 struct node_slab *slab = node_to_slab(f, node);
491 struct list_head *n = (
struct list_head *) node;
495 if (list_empty(&slab->freelist)) {
496 list_del(&slab->list);
497 list_add_tail(&slab->list, &f->partial_slabs);
499 list_add_head(n, &slab->freelist);
505static struct node *alloc_node(
struct fuse *f)
507 return (
struct node *) calloc(1, get_node_size(f));
510static void free_node_mem(
struct fuse *f,
struct node *node)
517static size_t id_hash(
struct fuse *f,
fuse_ino_t ino)
519 uint64_t hash = ((uint32_t) ino * 2654435761U) % f->id_table.size;
520 uint64_t oldhash = hash % (f->id_table.size / 2);
522 if (oldhash >= f->id_table.split)
528static struct node *get_node_nocheck(
struct fuse *f,
fuse_ino_t nodeid)
530 size_t hash = id_hash(f, nodeid);
533 for (node = f->id_table.array[hash]; node != NULL; node = node->id_next)
534 if (node->nodeid == nodeid)
540static struct node *get_node(
struct fuse *f,
fuse_ino_t nodeid)
542 struct node *node = get_node_nocheck(f, nodeid);
544 fuse_log(FUSE_LOG_ERR,
"fuse internal error: node %llu not found\n",
545 (
unsigned long long) nodeid);
551static void curr_time(
struct timespec *now);
552static double diff_timespec(
const struct timespec *t1,
553 const struct timespec *t2);
555static void remove_node_lru(
struct node *node)
557 struct node_lru *lnode = node_lru(node);
558 list_del(&lnode->lru);
559 init_list_head(&lnode->lru);
562static void set_forget_time(
struct fuse *f,
struct node *node)
564 struct node_lru *lnode = node_lru(node);
566 list_del(&lnode->lru);
567 list_add_tail(&lnode->lru, &f->lru_table);
568 curr_time(&lnode->forget_time);
571static void free_node(
struct fuse *f,
struct node *node)
573 if (node->name != node->inline_name)
575 free_node_mem(f, node);
578static void node_table_reduce(
struct node_table *t)
580 size_t newsize = t->size / 2;
583 if (newsize < NODE_TABLE_MIN_SIZE)
586 newarray = realloc(t->array,
sizeof(
struct node *) * newsize);
587 if (newarray != NULL)
591 t->split = t->size / 2;
594static void remerge_id(
struct fuse *f)
596 struct node_table *t = &f->id_table;
600 node_table_reduce(t);
602 for (iter = 8; t->split > 0 && iter; iter--) {
606 upper = &t->array[t->split + t->size / 2];
610 for (nodep = &t->array[t->split]; *nodep;
611 nodep = &(*nodep)->id_next);
620static void unhash_id(
struct fuse *f,
struct node *node)
622 struct node **nodep = &f->id_table.array[id_hash(f, node->nodeid)];
624 for (; *nodep != NULL; nodep = &(*nodep)->id_next)
625 if (*nodep == node) {
626 *nodep = node->id_next;
629 if(f->id_table.use < f->id_table.size / 4)
635static int node_table_resize(
struct node_table *t)
637 size_t newsize = t->size * 2;
640 newarray = realloc(t->array,
sizeof(
struct node *) * newsize);
641 if (newarray == NULL)
645 memset(t->array + t->size, 0, t->size *
sizeof(
struct node *));
652static void rehash_id(
struct fuse *f)
654 struct node_table *t = &f->id_table;
659 if (t->split == t->size / 2)
664 for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
665 struct node *node = *nodep;
666 size_t newhash = id_hash(f, node->nodeid);
668 if (newhash != hash) {
670 *nodep = node->id_next;
671 node->id_next = t->array[newhash];
672 t->array[newhash] = node;
674 next = &node->id_next;
677 if (t->split == t->size / 2)
678 node_table_resize(t);
681static void hash_id(
struct fuse *f,
struct node *node)
683 size_t hash = id_hash(f, node->nodeid);
684 node->id_next = f->id_table.array[hash];
685 f->id_table.array[hash] = node;
688 if (f->id_table.use >= f->id_table.size / 2)
692static size_t name_hash(
struct fuse *f,
fuse_ino_t parent,
695 uint64_t hash = parent;
698 for (; *name; name++)
699 hash = hash * 31 + (
unsigned char) *name;
701 hash %= f->name_table.size;
702 oldhash = hash % (f->name_table.size / 2);
703 if (oldhash >= f->name_table.split)
709static void unref_node(
struct fuse *f,
struct node *node);
711static void remerge_name(
struct fuse *f)
713 struct node_table *t = &f->name_table;
717 node_table_reduce(t);
719 for (iter = 8; t->split > 0 && iter; iter--) {
723 upper = &t->array[t->split + t->size / 2];
727 for (nodep = &t->array[t->split]; *nodep;
728 nodep = &(*nodep)->name_next);
737static void unhash_name(
struct fuse *f,
struct node *node)
740 size_t hash = name_hash(f, node->parent->nodeid, node->name);
741 struct node **nodep = &f->name_table.array[hash];
743 for (; *nodep != NULL; nodep = &(*nodep)->name_next)
744 if (*nodep == node) {
745 *nodep = node->name_next;
746 node->name_next = NULL;
747 unref_node(f, node->parent);
748 if (node->name != node->inline_name)
754 if (f->name_table.use < f->name_table.size / 4)
759 "fuse internal error: unable to unhash node: %llu\n",
760 (
unsigned long long) node->nodeid);
765static void rehash_name(
struct fuse *f)
767 struct node_table *t = &f->name_table;
772 if (t->split == t->size / 2)
777 for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
778 struct node *node = *nodep;
779 size_t newhash = name_hash(f, node->parent->nodeid, node->name);
781 if (newhash != hash) {
783 *nodep = node->name_next;
784 node->name_next = t->array[newhash];
785 t->array[newhash] = node;
787 next = &node->name_next;
790 if (t->split == t->size / 2)
791 node_table_resize(t);
794static int hash_name(
struct fuse *f,
struct node *node,
fuse_ino_t parentid,
797 size_t hash = name_hash(f, parentid, name);
798 struct node *parent = get_node(f, parentid);
799 if (strlen(name) <
sizeof(node->inline_name)) {
800 strcpy(node->inline_name, name);
801 node->name = node->inline_name;
803 node->name = strdup(name);
804 if (node->name == NULL)
809 node->parent = parent;
810 node->name_next = f->name_table.array[hash];
811 f->name_table.array[hash] = node;
814 if (f->name_table.use >= f->name_table.size / 2)
820static void delete_node(
struct fuse *f,
struct node *node)
823 fuse_log(FUSE_LOG_DEBUG,
"DELETE: %llu\n",
824 (
unsigned long long) node->nodeid);
826 assert(node->treelock == 0);
827 unhash_name(f, node);
829 remove_node_lru(node);
834static void unref_node(
struct fuse *f,
struct node *node)
836 assert(node->refctr > 0);
839 delete_node(f, node);
845 f->ctr = (f->ctr + 1) & 0xffffffff;
848 }
while (f->ctr == 0 || f->ctr == FUSE_UNKNOWN_INO ||
849 get_node_nocheck(f, f->ctr) != NULL);
853static struct node *lookup_node(
struct fuse *f,
fuse_ino_t parent,
856 size_t hash = name_hash(f, parent, name);
859 for (node = f->name_table.array[hash]; node != NULL; node = node->name_next)
860 if (node->parent->nodeid == parent &&
861 strcmp(node->name, name) == 0)
867static void inc_nlookup(
struct node *node)
874static struct node *find_node(
struct fuse *f,
fuse_ino_t parent,
879 pthread_mutex_lock(&f->lock);
881 node = get_node(f, parent);
883 node = lookup_node(f, parent, name);
885 node = alloc_node(f);
889 node->nodeid = next_id(f);
890 node->generation = f->generation;
891 if (f->conf.remember)
894 if (hash_name(f, node, parent, name) == -1) {
900 if (lru_enabled(f)) {
901 struct node_lru *lnode = node_lru(node);
902 init_list_head(&lnode->lru);
904 }
else if (lru_enabled(f) && node->nlookup == 1) {
905 remove_node_lru(node);
909 pthread_mutex_unlock(&f->lock);
913static int lookup_path_in_cache(
struct fuse *f,
916 char *tmp = strdup(path);
920 pthread_mutex_lock(&f->lock);
925 char *path_element = strtok_r(tmp,
"/", &save_ptr);
926 while (path_element != NULL) {
927 struct node *node = lookup_node(f, ino, path_element);
933 path_element = strtok_r(NULL,
"/", &save_ptr);
935 pthread_mutex_unlock(&f->lock);
943static char *add_name(
char **buf,
unsigned *bufsize,
char *s,
const char *name)
945 size_t len = strlen(name);
947 if (s - len <= *buf) {
948 unsigned pathlen = *bufsize - (s - *buf);
949 unsigned newbufsize = *bufsize;
952 while (newbufsize < pathlen + len + 1) {
953 if (newbufsize >= 0x80000000)
954 newbufsize = 0xffffffff;
959 newbuf = realloc(*buf, newbufsize);
964 s = newbuf + newbufsize - pathlen;
965 memmove(s, newbuf + *bufsize - pathlen, pathlen);
966 *bufsize = newbufsize;
969 memcpy(s, name, len);
976static void unlock_path(
struct fuse *f,
fuse_ino_t nodeid,
struct node *wnode,
982 assert(wnode->treelock == TREELOCK_WRITE);
986 for (node = get_node(f, nodeid);
987 node != end && node->nodeid != FUSE_ROOT_ID; node = node->parent) {
988 assert(node->treelock != 0);
989 assert(node->treelock != TREELOCK_WAIT_OFFSET);
990 assert(node->treelock != TREELOCK_WRITE);
992 if (node->treelock == TREELOCK_WAIT_OFFSET)
997static int try_get_path(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
998 char **path,
struct node **wnodep,
bool need_lock)
1000 unsigned bufsize = 256;
1004 struct node *wnode = NULL;
1010 buf = malloc(bufsize);
1014 s = buf + bufsize - 1;
1018 s = add_name(&buf, &bufsize, s, name);
1026 wnode = lookup_node(f, nodeid, name);
1028 if (wnode->treelock != 0) {
1029 if (wnode->treelock > 0)
1030 wnode->treelock += TREELOCK_WAIT_OFFSET;
1034 wnode->treelock = TREELOCK_WRITE;
1038 for (node = get_node(f, nodeid); node->nodeid != FUSE_ROOT_ID;
1039 node = node->parent) {
1041 if (node->name == NULL || node->parent == NULL)
1045 s = add_name(&buf, &bufsize, s, node->name);
1051 if (node->treelock < 0)
1059 memmove(buf, s, bufsize - (s - buf));
1071 unlock_path(f, nodeid, wnode, node);
1079static int try_get_path2(
struct fuse *f,
fuse_ino_t nodeid1,
const char *name1,
1081 char **path1,
char **path2,
1082 struct node **wnode1,
struct node **wnode2)
1087 err = try_get_path(f, nodeid1, name1, path1, wnode1,
true);
1089 err = try_get_path(f, nodeid2, name2, path2, wnode2,
true);
1091 struct node *wn1 = wnode1 ? *wnode1 : NULL;
1093 unlock_path(f, nodeid1, wn1, NULL);
1100static void queue_element_wakeup(
struct fuse *f,
struct lock_queue_element *qe)
1106 if (get_node(f, qe->nodeid1)->treelock == 0)
1107 pthread_cond_signal(&qe->cond);
1116 err = try_get_path(f, qe->nodeid1, qe->name1, qe->path1,
1119 err = try_get_path2(f, qe->nodeid1, qe->name1, qe->nodeid2,
1120 qe->name2, qe->path1, qe->path2, qe->wnode1,
1129 pthread_cond_signal(&qe->cond);
1132static void wake_up_queued(
struct fuse *f)
1134 struct lock_queue_element *qe;
1136 for (qe = f->lockq; qe != NULL; qe = qe->next)
1137 queue_element_wakeup(f, qe);
1140static void debug_path(
struct fuse *f,
const char *msg,
fuse_ino_t nodeid,
1141 const char *name,
bool wr)
1143 if (f->conf.debug) {
1144 struct node *wnode = NULL;
1147 wnode = lookup_node(f, nodeid, name);
1150 fuse_log(FUSE_LOG_DEBUG,
"%s %llu (w)\n",
1151 msg, (
unsigned long long) wnode->nodeid);
1153 fuse_log(FUSE_LOG_DEBUG,
"%s %llu\n",
1154 msg, (
unsigned long long) nodeid);
1159static void queue_path(
struct fuse *f,
struct lock_queue_element *qe)
1161 struct lock_queue_element **qp;
1164 pthread_cond_init(&qe->cond, NULL);
1166 for (qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
1170static void dequeue_path(
struct fuse *f,
struct lock_queue_element *qe)
1172 struct lock_queue_element **qp;
1174 pthread_cond_destroy(&qe->cond);
1175 for (qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
1179static int wait_path(
struct fuse *f,
struct lock_queue_element *qe)
1184 pthread_cond_wait(&qe->cond, &f->lock);
1185 }
while (!qe->done);
1187 dequeue_path(f, qe);
1192static int get_path_common(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
1193 char **path,
struct node **wnode)
1197 pthread_mutex_lock(&f->lock);
1198 err = try_get_path(f, nodeid, name, path, wnode,
true);
1199 if (err == -EAGAIN) {
1200 struct lock_queue_element qe = {
1206 debug_path(f,
"QUEUE PATH", nodeid, name, !!wnode);
1207 err = wait_path(f, &qe);
1208 debug_path(f,
"DEQUEUE PATH", nodeid, name, !!wnode);
1210 pthread_mutex_unlock(&f->lock);
1215static int get_path(
struct fuse *f,
fuse_ino_t nodeid,
char **path)
1217 return get_path_common(f, nodeid, NULL, path, NULL);
1220static int get_path_nullok(
struct fuse *f,
fuse_ino_t nodeid,
char **path)
1224 if (f->conf.nullpath_ok) {
1227 err = get_path_common(f, nodeid, NULL, path, NULL);
1235static int get_path_name(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
1238 return get_path_common(f, nodeid, name, path, NULL);
1241static int get_path_wrlock(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
1242 char **path,
struct node **wnode)
1244 return get_path_common(f, nodeid, name, path, wnode);
1247#if defined(__FreeBSD__)
1248#define CHECK_DIR_LOOP
1251#if defined(CHECK_DIR_LOOP)
1252static int check_dir_loop(
struct fuse *f,
1256 struct node *node, *node1, *node2;
1259 node1 = lookup_node(f, nodeid1, name1);
1260 id1 = node1 ? node1->nodeid : nodeid1;
1262 node2 = lookup_node(f, nodeid2, name2);
1263 id2 = node2 ? node2->nodeid : nodeid2;
1265 for (node = get_node(f, id2); node->nodeid != FUSE_ROOT_ID;
1266 node = node->parent) {
1267 if (node->name == NULL || node->parent == NULL)
1270 if (node->nodeid != id2 && node->nodeid == id1)
1276 for (node = get_node(f, id1); node->nodeid != FUSE_ROOT_ID;
1277 node = node->parent) {
1278 if (node->name == NULL || node->parent == NULL)
1281 if (node->nodeid != id1 && node->nodeid == id2)
1290static int get_path2(
struct fuse *f,
fuse_ino_t nodeid1,
const char *name1,
1292 char **path1,
char **path2,
1293 struct node **wnode1,
struct node **wnode2)
1297 pthread_mutex_lock(&f->lock);
1299#if defined(CHECK_DIR_LOOP)
1303 err = check_dir_loop(f, nodeid1, name1, nodeid2, name2);
1309 err = try_get_path2(f, nodeid1, name1, nodeid2, name2,
1310 path1, path2, wnode1, wnode2);
1311 if (err == -EAGAIN) {
1312 struct lock_queue_element qe = {
1323 debug_path(f,
"QUEUE PATH1", nodeid1, name1, !!wnode1);
1324 debug_path(f,
" PATH2", nodeid2, name2, !!wnode2);
1325 err = wait_path(f, &qe);
1326 debug_path(f,
"DEQUEUE PATH1", nodeid1, name1, !!wnode1);
1327 debug_path(f,
" PATH2", nodeid2, name2, !!wnode2);
1330#if defined(CHECK_DIR_LOOP)
1333 pthread_mutex_unlock(&f->lock);
1338static void free_path_wrlock(
struct fuse *f,
fuse_ino_t nodeid,
1339 struct node *wnode,
char *path)
1341 pthread_mutex_lock(&f->lock);
1342 unlock_path(f, nodeid, wnode, NULL);
1345 pthread_mutex_unlock(&f->lock);
1349static void free_path(
struct fuse *f,
fuse_ino_t nodeid,
char *path)
1352 free_path_wrlock(f, nodeid, NULL, path);
1356 struct node *wnode1,
struct node *wnode2,
1357 char *path1,
char *path2)
1359 pthread_mutex_lock(&f->lock);
1360 unlock_path(f, nodeid1, wnode1, NULL);
1361 unlock_path(f, nodeid2, wnode2, NULL);
1363 pthread_mutex_unlock(&f->lock);
1368static void forget_node(
struct fuse *f,
fuse_ino_t nodeid, uint64_t nlookup)
1371 if (nodeid == FUSE_ROOT_ID)
1373 pthread_mutex_lock(&f->lock);
1374 node = get_node(f, nodeid);
1380 while (node->nlookup == nlookup && node->treelock) {
1381 struct lock_queue_element qe = {
1385 debug_path(f,
"QUEUE PATH (forget)", nodeid, NULL,
false);
1389 pthread_cond_wait(&qe.cond, &f->lock);
1390 }
while (node->nlookup == nlookup && node->treelock);
1392 dequeue_path(f, &qe);
1393 debug_path(f,
"DEQUEUE_PATH (forget)", nodeid, NULL,
false);
1396 assert(node->nlookup >= nlookup);
1397 node->nlookup -= nlookup;
1398 if (!node->nlookup) {
1399 unref_node(f, node);
1400 }
else if (lru_enabled(f) && node->nlookup == 1) {
1401 set_forget_time(f, node);
1403 pthread_mutex_unlock(&f->lock);
1406static void unlink_node(
struct fuse *f,
struct node *node)
1408 if (f->conf.remember) {
1409 assert(node->nlookup > 1);
1412 unhash_name(f, node);
1415static void remove_node(
struct fuse *f,
fuse_ino_t dir,
const char *name)
1419 pthread_mutex_lock(&f->lock);
1420 node = lookup_node(f, dir, name);
1422 unlink_node(f, node);
1423 pthread_mutex_unlock(&f->lock);
1426static int rename_node(
struct fuse *f,
fuse_ino_t olddir,
const char *oldname,
1427 fuse_ino_t newdir,
const char *newname,
int hide)
1430 struct node *newnode;
1433 pthread_mutex_lock(&f->lock);
1434 node = lookup_node(f, olddir, oldname);
1435 newnode = lookup_node(f, newdir, newname);
1439 if (newnode != NULL) {
1441 fuse_log(FUSE_LOG_ERR,
"fuse: hidden file got created during hiding\n");
1445 unlink_node(f, newnode);
1448 unhash_name(f, node);
1449 if (hash_name(f, node, newdir, newname) == -1) {
1455 node->is_hidden = 1;
1458 pthread_mutex_unlock(&f->lock);
1462static int exchange_node(
struct fuse *f,
fuse_ino_t olddir,
const char *oldname,
1465 struct node *oldnode;
1466 struct node *newnode;
1469 pthread_mutex_lock(&f->lock);
1470 oldnode = lookup_node(f, olddir, oldname);
1471 newnode = lookup_node(f, newdir, newname);
1474 unhash_name(f, oldnode);
1476 unhash_name(f, newnode);
1480 if (hash_name(f, oldnode, newdir, newname) == -1)
1484 if (hash_name(f, newnode, olddir, oldname) == -1)
1489 pthread_mutex_unlock(&f->lock);
1493static void set_stat(
struct fuse *f,
fuse_ino_t nodeid,
struct stat *stbuf)
1495 if (!f->conf.use_ino)
1496 stbuf->st_ino = nodeid;
1497 if (f->conf.set_mode) {
1498 if (f->conf.dmask && S_ISDIR(stbuf->st_mode))
1499 stbuf->st_mode = (stbuf->st_mode & S_IFMT) |
1500 (0777 & ~f->conf.dmask);
1501 else if (f->conf.fmask)
1502 stbuf->st_mode = (stbuf->st_mode & S_IFMT) |
1503 (0777 & ~f->conf.fmask);
1505 stbuf->st_mode = (stbuf->st_mode & S_IFMT) |
1506 (0777 & ~f->conf.umask);
1508 if (f->conf.set_uid)
1509 stbuf->st_uid = f->conf.uid;
1510 if (f->conf.set_gid)
1511 stbuf->st_gid = f->conf.gid;
1519static void fuse_intr_sighandler(
int sig)
1525struct fuse_intr_data {
1527 pthread_cond_t cond;
1531static void fuse_interrupt(
fuse_req_t req,
void *d_)
1533 struct fuse_intr_data *d = d_;
1534 struct fuse *f = req_fuse(req);
1536 if (d->id == pthread_self())
1539 pthread_mutex_lock(&f->lock);
1540 while (!d->finished) {
1542 struct timespec timeout;
1544 pthread_kill(d->id, f->conf.intr_signal);
1545 gettimeofday(&now, NULL);
1546 timeout.tv_sec = now.tv_sec + 1;
1547 timeout.tv_nsec = now.tv_usec * 1000;
1548 pthread_cond_timedwait(&d->cond, &f->lock, &timeout);
1550 pthread_mutex_unlock(&f->lock);
1553static void fuse_do_finish_interrupt(
struct fuse *f,
fuse_req_t req,
1554 struct fuse_intr_data *d)
1556 pthread_mutex_lock(&f->lock);
1558 pthread_cond_broadcast(&d->cond);
1559 pthread_mutex_unlock(&f->lock);
1561 pthread_cond_destroy(&d->cond);
1564static void fuse_do_prepare_interrupt(
fuse_req_t req,
struct fuse_intr_data *d)
1566 d->id = pthread_self();
1567 pthread_cond_init(&d->cond, NULL);
1572static inline void fuse_finish_interrupt(
struct fuse *f,
fuse_req_t req,
1573 struct fuse_intr_data *d)
1576 fuse_do_finish_interrupt(f, req, d);
1579static inline void fuse_prepare_interrupt(
struct fuse *f,
fuse_req_t req,
1580 struct fuse_intr_data *d)
1583 fuse_do_prepare_interrupt(req, d);
1587 char* buf,
size_t len)
1591 snprintf(buf, len,
"%llu", (
unsigned long long) fi->
fh);
1595int fuse_fs_getattr(
struct fuse_fs *fs,
const char *path,
struct stat *buf,
1599 if (fs->op.getattr) {
1602 fuse_log(FUSE_LOG_DEBUG,
"getattr[%s] %s\n",
1603 file_info_string(fi, buf,
sizeof(buf)),
1606 return fs->op.getattr(path, buf, fi);
1612int fuse_fs_rename(
struct fuse_fs *fs,
const char *oldpath,
1613 const char *newpath,
unsigned int flags)
1616 if (fs->op.rename) {
1618 fuse_log(FUSE_LOG_DEBUG,
"rename %s %s 0x%x\n", oldpath, newpath,
1621 return fs->op.rename(oldpath, newpath, flags);
1627int fuse_fs_unlink(
struct fuse_fs *fs,
const char *path)
1630 if (fs->op.unlink) {
1632 fuse_log(FUSE_LOG_DEBUG,
"unlink %s\n", path);
1634 return fs->op.unlink(path);
1640int fuse_fs_rmdir(
struct fuse_fs *fs,
const char *path)
1645 fuse_log(FUSE_LOG_DEBUG,
"rmdir %s\n", path);
1647 return fs->op.rmdir(path);
1653int fuse_fs_symlink(
struct fuse_fs *fs,
const char *linkname,
const char *path)
1656 if (fs->op.symlink) {
1658 fuse_log(FUSE_LOG_DEBUG,
"symlink %s %s\n", linkname, path);
1660 return fs->op.symlink(linkname, path);
1666int fuse_fs_link(
struct fuse_fs *fs,
const char *oldpath,
const char *newpath)
1671 fuse_log(FUSE_LOG_DEBUG,
"link %s %s\n", oldpath, newpath);
1673 return fs->op.link(oldpath, newpath);
1679int fuse_fs_release(
struct fuse_fs *fs,
const char *path,
1683 if (fs->op.release) {
1685 fuse_log(FUSE_LOG_DEBUG,
"release%s[%llu] flags: 0x%x\n",
1686 fi->
flush ?
"+flush" :
"",
1687 (unsigned long long) fi->fh, fi->flags);
1689 return fs->op.release(path, fi);
1695int fuse_fs_opendir(
struct fuse_fs *fs,
const char *path,
1699 if (fs->op.opendir) {
1703 fuse_log(FUSE_LOG_DEBUG,
"opendir flags: 0x%x %s\n", fi->
flags,
1706 err = fs->op.opendir(path, fi);
1708 if (fs->debug && !err)
1709 fuse_log(FUSE_LOG_DEBUG,
" opendir[%llu] flags: 0x%x %s\n",
1710 (
unsigned long long) fi->
fh, fi->
flags, path);
1718int fuse_fs_open(
struct fuse_fs *fs,
const char *path,
1726 fuse_log(FUSE_LOG_DEBUG,
"open flags: 0x%x %s\n", fi->
flags,
1729 err = fs->op.open(path, fi);
1731 if (fs->debug && !err)
1732 fuse_log(FUSE_LOG_DEBUG,
" open[%llu] flags: 0x%x %s\n",
1733 (
unsigned long long) fi->
fh, fi->
flags, path);
1746 for (i = 0; i < buf->
count; i++)
1753int fuse_fs_read_buf(
struct fuse_fs *fs,
const char *path,
1754 struct fuse_bufvec **bufp,
size_t size, off_t off,
1758 if (fs->op.read || fs->op.read_buf) {
1763 "read[%llu] %zu bytes from %llu flags: 0x%x\n",
1764 (
unsigned long long) fi->
fh,
1765 size, (
unsigned long long) off, fi->
flags);
1767 if (fs->op.read_buf) {
1768 res = fs->op.read_buf(path, bufp, size, off, fi);
1782 *
buf = FUSE_BUFVEC_INIT(size);
1786 res = fs->op.read(path, mem, size,
off, fi);
1791 if (fs->debug && res >= 0)
1792 fuse_log(FUSE_LOG_DEBUG,
" read[%llu] %zu bytes from %llu\n",
1793 (
unsigned long long) fi->
fh,
1795 (
unsigned long long)
off);
1797 fuse_log(FUSE_LOG_ERR,
"fuse: read too many bytes\n");
1808int fuse_fs_read(
struct fuse_fs *fs,
const char *path,
char *mem,
size_t size,
1812 if (fs->op.read || fs->op.read_buf) {
1817 "read[%llu] %zu bytes from %llu flags: 0x%x\n",
1818 (
unsigned long long) fi->
fh,
1819 size, (
unsigned long long)
off, fi->
flags);
1821 if (fs->op.read_buf) {
1824 res = fs->op.read_buf(path, &
buf, size,
off, fi);
1833 res = fs->op.read(path, mem, size,
off, fi);
1836 if (fs->debug && res >= 0)
1837 fuse_log(FUSE_LOG_DEBUG,
" read[%llu] %u bytes from %llu\n",
1838 (
unsigned long long) fi->
fh,
1840 (
unsigned long long)
off);
1841 if (res >= 0 && res > (
int) size)
1842 fuse_log(FUSE_LOG_ERR,
"fuse: read too many bytes\n");
1850int fuse_fs_write_buf(
struct fuse_fs *fs,
const char *path,
1855 if (fs->op.write_buf || fs->op.write) {
1859 assert(
buf->idx == 0 &&
buf->off == 0);
1862 "write%s[%llu] %zu bytes to %llu flags: 0x%x\n",
1864 (unsigned long long) fi->fh,
1866 (unsigned long long)
off,
1869 if (fs->op.write_buf) {
1870 res = fs->op.write_buf(path,
buf,
off, fi);
1876 if (
buf->count == 1 &&
1878 flatbuf = &
buf->buf[0];
1891 flatbuf = &tmp.
buf[0];
1894 res = fs->op.write(path, flatbuf->
mem, flatbuf->
size,
1900 if (fs->debug && res >= 0)
1901 fuse_log(FUSE_LOG_DEBUG,
" write%s[%llu] %u bytes to %llu\n",
1903 (unsigned long long) fi->fh, res,
1904 (unsigned long long)
off);
1905 if (res > (
int) size)
1906 fuse_log(FUSE_LOG_ERR,
"fuse: wrote too many bytes\n");
1914int fuse_fs_write(
struct fuse_fs *fs,
const char *path,
const char *mem,
1919 bufv.
buf[0].
mem = (
void *) mem;
1921 return fuse_fs_write_buf(fs, path, &bufv,
off, fi);
1924int fuse_fs_fsync(
struct fuse_fs *fs,
const char *path,
int datasync,
1930 fuse_log(FUSE_LOG_DEBUG,
"fsync[%llu] datasync: %i\n",
1931 (
unsigned long long) fi->
fh, datasync);
1933 return fs->op.fsync(path, datasync, fi);
1939int fuse_fs_fsyncdir(
struct fuse_fs *fs,
const char *path,
int datasync,
1943 if (fs->op.fsyncdir) {
1945 fuse_log(FUSE_LOG_DEBUG,
"fsyncdir[%llu] datasync: %i\n",
1946 (
unsigned long long) fi->
fh, datasync);
1948 return fs->op.fsyncdir(path, datasync, fi);
1954int fuse_fs_flush(
struct fuse_fs *fs,
const char *path,
1960 fuse_log(FUSE_LOG_DEBUG,
"flush[%llu]\n",
1961 (
unsigned long long) fi->
fh);
1963 return fs->op.flush(path, fi);
1969int fuse_fs_statfs(
struct fuse_fs *fs,
const char *path,
struct statvfs *
buf)
1972 if (fs->op.statfs) {
1974 fuse_log(FUSE_LOG_DEBUG,
"statfs %s\n", path);
1976 return fs->op.statfs(path,
buf);
1978 buf->f_namemax = 255;
1984int fuse_fs_releasedir(
struct fuse_fs *fs,
const char *path,
1988 if (fs->op.releasedir) {
1990 fuse_log(FUSE_LOG_DEBUG,
"releasedir[%llu] flags: 0x%x\n",
1991 (
unsigned long long) fi->
fh, fi->
flags);
1993 return fs->op.releasedir(path, fi);
1999int fuse_fs_readdir(
struct fuse_fs *fs,
const char *path,
void *
buf,
2005 if (fs->op.readdir) {
2007 fuse_log(FUSE_LOG_DEBUG,
"readdir%s[%llu] from %llu\n",
2008 (flags & FUSE_READDIR_PLUS) ?
"plus" :
"",
2009 (unsigned long long) fi->fh,
2010 (unsigned long long)
off);
2013 return fs->op.readdir(path,
buf, filler,
off, fi, flags);
2019int fuse_fs_create(
struct fuse_fs *fs,
const char *path, mode_t mode,
2023 if (fs->op.create) {
2028 "create flags: 0x%x %s 0%o umask=0%03o\n",
2029 fi->
flags, path, mode,
2032 err = fs->op.create(path, mode, fi);
2034 if (fs->debug && !err)
2035 fuse_log(FUSE_LOG_DEBUG,
" create[%llu] flags: 0x%x %s\n",
2036 (
unsigned long long) fi->
fh, fi->
flags, path);
2044int fuse_fs_lock(
struct fuse_fs *fs,
const char *path,
2050 fuse_log(FUSE_LOG_DEBUG,
"lock[%llu] %s %s start: %llu len: %llu pid: %llu\n",
2051 (
unsigned long long) fi->
fh,
2052 (cmd == F_GETLK ?
"F_GETLK" :
2053 (cmd == F_SETLK ?
"F_SETLK" :
2054 (cmd == F_SETLKW ?
"F_SETLKW" :
"???"))),
2055 (lock->l_type == F_RDLCK ?
"F_RDLCK" :
2056 (lock->l_type == F_WRLCK ?
"F_WRLCK" :
2057 (lock->l_type == F_UNLCK ?
"F_UNLCK" :
2059 (unsigned long long) lock->l_start,
2060 (unsigned long long) lock->l_len,
2061 (unsigned long long) lock->l_pid);
2063 return fs->op.lock(path, fi, cmd, lock);
2069int fuse_fs_flock(
struct fuse_fs *fs,
const char *path,
2075 int xop = op & ~LOCK_NB;
2077 fuse_log(FUSE_LOG_DEBUG,
"lock[%llu] %s%s\n",
2078 (
unsigned long long) fi->
fh,
2079 xop == LOCK_SH ?
"LOCK_SH" :
2080 (xop == LOCK_EX ?
"LOCK_EX" :
2081 (xop == LOCK_UN ?
"LOCK_UN" :
"???")),
2082 (op & LOCK_NB) ?
"|LOCK_NB" :
"");
2084 return fs->op.flock(path, fi, op);
2090int fuse_fs_chown(
struct fuse_fs *fs,
const char *path, uid_t uid,
2097 fuse_log(FUSE_LOG_DEBUG,
"chown[%s] %s %lu %lu\n",
2098 file_info_string(fi,
buf,
sizeof(
buf)),
2099 path, (
unsigned long) uid, (
unsigned long) gid);
2101 return fs->op.chown(path, uid, gid, fi);
2107int fuse_fs_truncate(
struct fuse_fs *fs,
const char *path, off_t size,
2111 if (fs->op.truncate) {
2114 fuse_log(FUSE_LOG_DEBUG,
"truncate[%s] %llu\n",
2115 file_info_string(fi,
buf,
sizeof(
buf)),
2116 (
unsigned long long) size);
2118 return fs->op.truncate(path, size, fi);
2124int fuse_fs_utimens(
struct fuse_fs *fs,
const char *path,
2128 if (fs->op.utimens) {
2131 fuse_log(FUSE_LOG_DEBUG,
"utimens[%s] %s %li.%09lu %li.%09lu\n",
2132 file_info_string(fi,
buf,
sizeof(
buf)),
2133 path, tv[0].tv_sec, tv[0].tv_nsec,
2134 tv[1].tv_sec, tv[1].tv_nsec);
2136 return fs->op.utimens(path, tv, fi);
2142int fuse_fs_access(
struct fuse_fs *fs,
const char *path,
int mask)
2145 if (fs->op.access) {
2147 fuse_log(FUSE_LOG_DEBUG,
"access %s 0%o\n", path, mask);
2149 return fs->op.access(path, mask);
2155int fuse_fs_readlink(
struct fuse_fs *fs,
const char *path,
char *
buf,
2159 if (fs->op.readlink) {
2161 fuse_log(FUSE_LOG_DEBUG,
"readlink %s %lu\n", path,
2162 (
unsigned long) len);
2164 return fs->op.readlink(path,
buf, len);
2170int fuse_fs_mknod(
struct fuse_fs *fs,
const char *path, mode_t mode,
2176 fuse_log(FUSE_LOG_DEBUG,
"mknod %s 0%o 0x%llx umask=0%03o\n",
2177 path, mode, (
unsigned long long) rdev,
2180 return fs->op.mknod(path, mode, rdev);
2186int fuse_fs_mkdir(
struct fuse_fs *fs,
const char *path, mode_t mode)
2191 fuse_log(FUSE_LOG_DEBUG,
"mkdir %s 0%o umask=0%03o\n",
2194 return fs->op.mkdir(path, mode);
2200int fuse_fs_setxattr(
struct fuse_fs *fs,
const char *path,
const char *name,
2201 const char *value,
size_t size,
int flags)
2204 if (fs->op.setxattr) {
2206 fuse_log(FUSE_LOG_DEBUG,
"setxattr %s %s %lu 0x%x\n",
2207 path, name, (
unsigned long) size, flags);
2209 return fs->op.setxattr(path, name, value, size, flags);
2215int fuse_fs_getxattr(
struct fuse_fs *fs,
const char *path,
const char *name,
2216 char *value,
size_t size)
2219 if (fs->op.getxattr) {
2221 fuse_log(FUSE_LOG_DEBUG,
"getxattr %s %s %lu\n",
2222 path, name, (
unsigned long) size);
2224 return fs->op.getxattr(path, name, value, size);
2230int fuse_fs_listxattr(
struct fuse_fs *fs,
const char *path,
char *list,
2234 if (fs->op.listxattr) {
2236 fuse_log(FUSE_LOG_DEBUG,
"listxattr %s %lu\n",
2237 path, (
unsigned long) size);
2239 return fs->op.listxattr(path, list, size);
2245int fuse_fs_bmap(
struct fuse_fs *fs,
const char *path,
size_t blocksize,
2251 fuse_log(FUSE_LOG_DEBUG,
"bmap %s blocksize: %lu index: %llu\n",
2252 path, (
unsigned long) blocksize,
2253 (
unsigned long long) *
idx);
2255 return fs->op.bmap(path, blocksize,
idx);
2261int fuse_fs_removexattr(
struct fuse_fs *fs,
const char *path,
const char *name)
2264 if (fs->op.removexattr) {
2266 fuse_log(FUSE_LOG_DEBUG,
"removexattr %s %s\n", path, name);
2268 return fs->op.removexattr(path, name);
2274int fuse_fs_ioctl(
struct fuse_fs *fs,
const char *path,
unsigned int cmd,
2281 fuse_log(FUSE_LOG_DEBUG,
"ioctl[%llu] 0x%x flags: 0x%x\n",
2282 (
unsigned long long) fi->
fh, cmd, flags);
2284 return fs->op.ioctl(path, cmd, arg, fi, flags, data);
2289int fuse_fs_poll(
struct fuse_fs *fs,
const char *path,
2298 fuse_log(FUSE_LOG_DEBUG,
"poll[%llu] ph: %p, events 0x%x\n",
2299 (
unsigned long long) fi->
fh, ph,
2302 res = fs->op.poll(path, fi, ph, reventsp);
2304 if (fs->debug && !res)
2305 fuse_log(FUSE_LOG_DEBUG,
" poll[%llu] revents: 0x%x\n",
2306 (
unsigned long long) fi->
fh, *reventsp);
2313int fuse_fs_fallocate(
struct fuse_fs *fs,
const char *path,
int mode,
2317 if (fs->op.fallocate) {
2319 fuse_log(FUSE_LOG_DEBUG,
"fallocate %s mode %x, offset: %llu, length: %llu\n",
2322 (
unsigned long long) offset,
2323 (
unsigned long long) length);
2325 return fs->op.fallocate(path, mode, offset, length, fi);
2330ssize_t fuse_fs_copy_file_range(
struct fuse_fs *fs,
const char *path_in,
2332 const char *path_out,
2334 size_t len,
int flags)
2337 if (fs->op.copy_file_range) {
2339 fuse_log(FUSE_LOG_DEBUG,
"copy_file_range from %s:%llu to "
2340 "%s:%llu, length: %llu\n",
2342 (
unsigned long long) off_in,
2344 (
unsigned long long) off_out,
2345 (
unsigned long long) len);
2347 return fs->op.copy_file_range(path_in, fi_in, off_in, path_out,
2348 fi_out, off_out, len, flags);
2353off_t fuse_fs_lseek(
struct fuse_fs *fs,
const char *path, off_t
off,
int whence,
2360 fuse_log(FUSE_LOG_DEBUG,
"lseek[%s] %llu %d\n",
2361 file_info_string(fi,
buf,
sizeof(
buf)),
2362 (
unsigned long long)
off, whence);
2364 return fs->op.lseek(path,
off, whence, fi);
2370static int is_open(
struct fuse *f,
fuse_ino_t dir,
const char *name)
2374 pthread_mutex_lock(&f->lock);
2375 node = lookup_node(f, dir, name);
2376 if (node && node->open_count > 0)
2378 pthread_mutex_unlock(&f->lock);
2382static char *hidden_name(
struct fuse *f,
fuse_ino_t dir,
const char *oldname,
2383 char *newname,
size_t bufsize)
2387 struct node *newnode;
2393 pthread_mutex_lock(&f->lock);
2394 node = lookup_node(f, dir, oldname);
2396 pthread_mutex_unlock(&f->lock);
2401 snprintf(newname, bufsize,
".fuse_hidden%08x%08x",
2402 (
unsigned int) node->nodeid, f->hidectr);
2403 newnode = lookup_node(f, dir, newname);
2406 res = try_get_path(f, dir, newname, &newpath, NULL,
false);
2407 pthread_mutex_unlock(&f->lock);
2411 memset(&buf, 0,
sizeof(buf));
2412 res = fuse_fs_getattr(f->fs, newpath, &buf, NULL);
2417 }
while(res == 0 && --failctr);
2422static int hide_node(
struct fuse *f,
const char *oldpath,
2429 newpath = hidden_name(f, dir, oldname, newname,
sizeof(newname));
2431 err = fuse_fs_rename(f->fs, oldpath, newpath, 0);
2433 err = rename_node(f, dir, oldname, dir, newname, 1);
2439static int mtime_eq(
const struct stat *stbuf,
const struct timespec *ts)
2441 return stbuf->st_mtime == ts->tv_sec &&
2442 ST_MTIM_NSEC(stbuf) == ts->tv_nsec;
2445#ifndef CLOCK_MONOTONIC
2446#define CLOCK_MONOTONIC CLOCK_REALTIME
2449static void curr_time(
struct timespec *now)
2451 static clockid_t clockid = CLOCK_MONOTONIC;
2452 int res = clock_gettime(clockid, now);
2453 if (res == -1 && errno == EINVAL) {
2454 clockid = CLOCK_REALTIME;
2455 res = clock_gettime(clockid, now);
2458 perror(
"fuse: clock_gettime");
2463static void update_stat(
struct node *node,
const struct stat *stbuf)
2465 if (node->cache_valid && (!mtime_eq(stbuf, &node->mtime) ||
2466 stbuf->st_size != node->size))
2467 node->cache_valid = 0;
2468 node->mtime.tv_sec = stbuf->st_mtime;
2469 node->mtime.tv_nsec = ST_MTIM_NSEC(stbuf);
2470 node->size = stbuf->st_size;
2471 curr_time(&node->stat_updated);
2474static int do_lookup(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
2479 node = find_node(f, nodeid, name);
2483 e->
ino = node->nodeid;
2487 if (f->conf.auto_cache) {
2488 pthread_mutex_lock(&f->lock);
2489 update_stat(node, &e->
attr);
2490 pthread_mutex_unlock(&f->lock);
2492 set_stat(f, e->
ino, &e->
attr);
2496static int lookup_path(
struct fuse *f,
fuse_ino_t nodeid,
2497 const char *name,
const char *path,
2503 res = fuse_fs_getattr(f->fs, path, &e->
attr, fi);
2505 res = do_lookup(f, nodeid, name, e);
2506 if (res == 0 && f->conf.debug) {
2507 fuse_log(FUSE_LOG_DEBUG,
" NODEID: %llu\n",
2508 (
unsigned long long) e->
ino);
2514static struct fuse_context_i *fuse_get_context_internal(
void)
2516 return (
struct fuse_context_i *) pthread_getspecific(fuse_context_key);
2519static struct fuse_context_i *fuse_create_context(
struct fuse *f)
2521 struct fuse_context_i *c = fuse_get_context_internal();
2523 c = (
struct fuse_context_i *)
2524 calloc(1,
sizeof(
struct fuse_context_i));
2530 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate thread specific data\n");
2533 pthread_setspecific(fuse_context_key, c);
2535 memset(c, 0,
sizeof(*c));
2542static void fuse_freecontext(
void *data)
2547static int fuse_create_context_key(
void)
2550 pthread_mutex_lock(&fuse_context_lock);
2551 if (!fuse_context_ref) {
2552 err = pthread_key_create(&fuse_context_key, fuse_freecontext);
2554 fuse_log(FUSE_LOG_ERR,
"fuse: failed to create thread specific key: %s\n",
2556 pthread_mutex_unlock(&fuse_context_lock);
2561 pthread_mutex_unlock(&fuse_context_lock);
2565static void fuse_delete_context_key(
void)
2567 pthread_mutex_lock(&fuse_context_lock);
2569 if (!fuse_context_ref) {
2570 free(pthread_getspecific(fuse_context_key));
2571 pthread_key_delete(fuse_context_key);
2573 pthread_mutex_unlock(&fuse_context_lock);
2576static struct fuse *req_fuse_prepare(
fuse_req_t req)
2578 struct fuse_context_i *c = fuse_create_context(req_fuse(req));
2581 c->ctx.uid = ctx->
uid;
2582 c->ctx.gid = ctx->
gid;
2583 c->ctx.pid = ctx->
pid;
2584 c->ctx.umask = ctx->
umask;
2588static inline void reply_err(
fuse_req_t req,
int err)
2598 struct fuse *f = req_fuse(req);
2602 forget_node(f, e->
ino, 1);
2605 reply_err(req, err);
2608void fuse_fs_init(
struct fuse_fs *fs,
struct fuse_conn_info *conn,
2612 if (!fs->op.write_buf)
2619 uint64_t want_ext_default = conn->
want_ext;
2620 uint32_t want_default = fuse_lower_32_bits(conn->
want_ext);
2623 conn->
want = want_default;
2624 fs->user_data = fs->op.init(conn, cfg);
2626 rc = convert_to_conn_want_ext(conn, want_ext_default,
2637 "fuse: Aborting due to invalid conn want flags.\n");
2638 _exit(EXIT_FAILURE);
2643static int fuse_init_intr_signal(
int signum,
int *installed);
2645static void fuse_lib_init(
void *data,
struct fuse_conn_info *conn)
2647 struct fuse *f = (
struct fuse *) data;
2649 fuse_create_context(f);
2651 fuse_fs_init(f->fs, conn, &f->conf);
2654 if (fuse_init_intr_signal(f->conf.intr_signal,
2655 &f->intr_installed) == -1)
2656 fuse_log(FUSE_LOG_ERR,
"fuse: failed to init interrupt signal\n");
2663void fuse_fs_destroy(
struct fuse_fs *fs)
2667 fs->op.destroy(fs->user_data);
2670static void fuse_lib_destroy(
void *data)
2672 struct fuse *f = (
struct fuse *) data;
2674 fuse_create_context(f);
2675 fuse_fs_destroy(f->fs);
2681 struct fuse *f = req_fuse_prepare(req);
2685 struct node *dot = NULL;
2687 if (name[0] ==
'.') {
2688 int len = strlen(name);
2690 if (len == 1 || (name[1] ==
'.' && len == 2)) {
2691 pthread_mutex_lock(&f->lock);
2694 fuse_log(FUSE_LOG_DEBUG,
"LOOKUP-DOT\n");
2695 dot = get_node_nocheck(f, parent);
2697 pthread_mutex_unlock(&f->lock);
2698 reply_entry(req, &e, -ESTALE);
2704 fuse_log(FUSE_LOG_DEBUG,
"LOOKUP-DOTDOT\n");
2705 parent = get_node(f, parent)->parent->nodeid;
2707 pthread_mutex_unlock(&f->lock);
2712 err = get_path_name(f, parent, name, &path);
2714 struct fuse_intr_data d;
2716 fuse_log(FUSE_LOG_DEBUG,
"LOOKUP %s\n", path);
2717 fuse_prepare_interrupt(f, req, &d);
2718 err = lookup_path(f, parent, name, path, &e, NULL);
2719 if (err == -ENOENT && f->conf.negative_timeout != 0.0) {
2724 fuse_finish_interrupt(f, req, &d);
2725 free_path(f, parent, path);
2728 pthread_mutex_lock(&f->lock);
2730 pthread_mutex_unlock(&f->lock);
2732 reply_entry(req, &e, err);
2735static void do_forget(
struct fuse *f,
fuse_ino_t ino, uint64_t nlookup)
2738 fuse_log(FUSE_LOG_DEBUG,
"FORGET %llu/%llu\n", (
unsigned long long)ino,
2739 (
unsigned long long) nlookup);
2740 forget_node(f, ino, nlookup);
2745 do_forget(req_fuse(req), ino, nlookup);
2749static void fuse_lib_forget_multi(
fuse_req_t req,
size_t count,
2750 struct fuse_forget_data *forgets)
2752 struct fuse *f = req_fuse(req);
2755 for (i = 0; i < count; i++)
2756 do_forget(f, forgets[i].ino, forgets[i].nlookup);
2765 struct fuse *f = req_fuse_prepare(req);
2770 memset(&buf, 0,
sizeof(buf));
2773 err = get_path_nullok(f, ino, &path);
2775 err = get_path(f, ino, &path);
2777 struct fuse_intr_data d;
2778 fuse_prepare_interrupt(f, req, &d);
2779 err = fuse_fs_getattr(f->fs, path, &buf, fi);
2780 fuse_finish_interrupt(f, req, &d);
2781 free_path(f, ino, path);
2786 pthread_mutex_lock(&f->lock);
2787 node = get_node(f, ino);
2788 if (node->is_hidden && buf.st_nlink > 0)
2790 if (f->conf.auto_cache)
2791 update_stat(node, &buf);
2792 pthread_mutex_unlock(&f->lock);
2793 set_stat(f, ino, &buf);
2796 reply_err(req, err);
2799int fuse_fs_chmod(
struct fuse_fs *fs,
const char *path, mode_t mode,
2806 fuse_log(FUSE_LOG_DEBUG,
"chmod[%s] %s %llo\n",
2807 file_info_string(fi, buf,
sizeof(buf)),
2808 path, (
unsigned long long) mode);
2810 return fs->op.chmod(path, mode, fi);
2819 struct fuse *f = req_fuse_prepare(req);
2824 memset(&buf, 0,
sizeof(buf));
2826 err = get_path_nullok(f, ino, &path);
2828 err = get_path(f, ino, &path);
2830 struct fuse_intr_data d;
2831 fuse_prepare_interrupt(f, req, &d);
2833 if (!err && (valid & FUSE_SET_ATTR_MODE))
2834 err = fuse_fs_chmod(f->fs, path, attr->st_mode, fi);
2835 if (!err && (valid & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID))) {
2836 uid_t uid = (valid & FUSE_SET_ATTR_UID) ?
2837 attr->st_uid : (uid_t) -1;
2838 gid_t gid = (valid & FUSE_SET_ATTR_GID) ?
2839 attr->st_gid : (gid_t) -1;
2840 err = fuse_fs_chown(f->fs, path, uid, gid, fi);
2842 if (!err && (valid & FUSE_SET_ATTR_SIZE)) {
2843 err = fuse_fs_truncate(f->fs, path,
2846#ifdef HAVE_UTIMENSAT
2848 (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME))) {
2849 struct timespec tv[2];
2853 tv[0].tv_nsec = UTIME_OMIT;
2854 tv[1].tv_nsec = UTIME_OMIT;
2856 if (valid & FUSE_SET_ATTR_ATIME_NOW)
2857 tv[0].tv_nsec = UTIME_NOW;
2858 else if (valid & FUSE_SET_ATTR_ATIME)
2859 tv[0] = attr->st_atim;
2861 if (valid & FUSE_SET_ATTR_MTIME_NOW)
2862 tv[1].tv_nsec = UTIME_NOW;
2863 else if (valid & FUSE_SET_ATTR_MTIME)
2864 tv[1] = attr->st_mtim;
2866 err = fuse_fs_utimens(f->fs, path, tv, fi);
2870 (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) ==
2871 (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) {
2872 struct timespec tv[2];
2873 tv[0].tv_sec = attr->st_atime;
2874 tv[0].tv_nsec = ST_ATIM_NSEC(attr);
2875 tv[1].tv_sec = attr->st_mtime;
2876 tv[1].tv_nsec = ST_MTIM_NSEC(attr);
2877 err = fuse_fs_utimens(f->fs, path, tv, fi);
2880 err = fuse_fs_getattr(f->fs, path, &buf, fi);
2882 fuse_finish_interrupt(f, req, &d);
2883 free_path(f, ino, path);
2886 if (f->conf.auto_cache) {
2887 pthread_mutex_lock(&f->lock);
2888 update_stat(get_node(f, ino), &buf);
2889 pthread_mutex_unlock(&f->lock);
2891 set_stat(f, ino, &buf);
2894 reply_err(req, err);
2899 struct fuse *f = req_fuse_prepare(req);
2903 err = get_path(f, ino, &path);
2905 struct fuse_intr_data d;
2907 fuse_prepare_interrupt(f, req, &d);
2908 err = fuse_fs_access(f->fs, path, mask);
2909 fuse_finish_interrupt(f, req, &d);
2910 free_path(f, ino, path);
2912 reply_err(req, err);
2917 struct fuse *f = req_fuse_prepare(req);
2918 char linkname[PATH_MAX + 1];
2922 err = get_path(f, ino, &path);
2924 struct fuse_intr_data d;
2925 fuse_prepare_interrupt(f, req, &d);
2926 err = fuse_fs_readlink(f->fs, path, linkname,
sizeof(linkname));
2927 fuse_finish_interrupt(f, req, &d);
2928 free_path(f, ino, path);
2931 linkname[PATH_MAX] =
'\0';
2934 reply_err(req, err);
2938 mode_t mode, dev_t rdev)
2940 struct fuse *f = req_fuse_prepare(req);
2945 err = get_path_name(f, parent, name, &path);
2947 struct fuse_intr_data d;
2949 fuse_prepare_interrupt(f, req, &d);
2951 if (S_ISREG(mode)) {
2954 memset(&fi, 0,
sizeof(fi));
2955 fi.
flags = O_CREAT | O_EXCL | O_WRONLY;
2956 err = fuse_fs_create(f->fs, path, mode, &fi);
2958 err = lookup_path(f, parent, name, path, &e,
2960 fuse_fs_release(f->fs, path, &fi);
2963 if (err == -ENOSYS) {
2964 err = fuse_fs_mknod(f->fs, path, mode, rdev);
2966 err = lookup_path(f, parent, name, path, &e,
2969 fuse_finish_interrupt(f, req, &d);
2970 free_path(f, parent, path);
2972 reply_entry(req, &e, err);
2978 struct fuse *f = req_fuse_prepare(req);
2983 err = get_path_name(f, parent, name, &path);
2985 struct fuse_intr_data d;
2987 fuse_prepare_interrupt(f, req, &d);
2988 err = fuse_fs_mkdir(f->fs, path, mode);
2990 err = lookup_path(f, parent, name, path, &e, NULL);
2991 fuse_finish_interrupt(f, req, &d);
2992 free_path(f, parent, path);
2994 reply_entry(req, &e, err);
3000 struct fuse *f = req_fuse_prepare(req);
3005 err = get_path_wrlock(f, parent, name, &path, &wnode);
3007 struct fuse_intr_data d;
3009 fuse_prepare_interrupt(f, req, &d);
3010 if (!f->conf.hard_remove && is_open(f, parent, name)) {
3011 err = hide_node(f, path, parent, name);
3014 if (!is_open(f, parent, wnode->name)) {
3018 if (try_get_path(f, wnode->nodeid, NULL, &unlinkpath, NULL,
false) == 0) {
3019 err = fuse_fs_unlink(f->fs, unlinkpath);
3021 remove_node(f, parent, wnode->name);
3027 err = fuse_fs_unlink(f->fs, path);
3029 remove_node(f, parent, name);
3031 fuse_finish_interrupt(f, req, &d);
3032 free_path_wrlock(f, parent, wnode, path);
3034 reply_err(req, err);
3039 struct fuse *f = req_fuse_prepare(req);
3044 err = get_path_wrlock(f, parent, name, &path, &wnode);
3046 struct fuse_intr_data d;
3048 fuse_prepare_interrupt(f, req, &d);
3049 err = fuse_fs_rmdir(f->fs, path);
3050 fuse_finish_interrupt(f, req, &d);
3052 remove_node(f, parent, name);
3053 free_path_wrlock(f, parent, wnode, path);
3055 reply_err(req, err);
3058static void fuse_lib_symlink(
fuse_req_t req,
const char *linkname,
3061 struct fuse *f = req_fuse_prepare(req);
3066 err = get_path_name(f, parent, name, &path);
3068 struct fuse_intr_data d;
3070 fuse_prepare_interrupt(f, req, &d);
3071 err = fuse_fs_symlink(f->fs, linkname, path);
3073 err = lookup_path(f, parent, name, path, &e, NULL);
3074 fuse_finish_interrupt(f, req, &d);
3075 free_path(f, parent, path);
3077 reply_entry(req, &e, err);
3082 const char *newname,
unsigned int flags)
3084 struct fuse *f = req_fuse_prepare(req);
3087 struct node *wnode1;
3088 struct node *wnode2;
3091 err = get_path2(f, olddir, oldname, newdir, newname,
3092 &oldpath, &newpath, &wnode1, &wnode2);
3094 struct fuse_intr_data d;
3096 fuse_prepare_interrupt(f, req, &d);
3097 if (!f->conf.hard_remove && !(flags & RENAME_EXCHANGE) &&
3098 is_open(f, newdir, newname))
3099 err = hide_node(f, newpath, newdir, newname);
3101 err = fuse_fs_rename(f->fs, oldpath, newpath, flags);
3103 if (flags & RENAME_EXCHANGE) {
3104 err = exchange_node(f, olddir, oldname,
3107 err = rename_node(f, olddir, oldname,
3108 newdir, newname, 0);
3112 fuse_finish_interrupt(f, req, &d);
3113 free_path2(f, olddir, newdir, wnode1, wnode2, oldpath, newpath);
3115 reply_err(req, err);
3119 const char *newname)
3121 struct fuse *f = req_fuse_prepare(req);
3127 err = get_path2(f,
ino, NULL, newparent, newname,
3128 &oldpath, &newpath, NULL, NULL);
3130 struct fuse_intr_data d;
3132 fuse_prepare_interrupt(f, req, &d);
3133 err = fuse_fs_link(f->fs, oldpath, newpath);
3135 err = lookup_path(f, newparent, newname, newpath,
3137 fuse_finish_interrupt(f, req, &d);
3138 free_path2(f, ino, newparent, NULL, NULL, oldpath, newpath);
3140 reply_entry(req, &e, err);
3143static void fuse_do_release(
struct fuse *f,
fuse_ino_t ino,
const char *path,
3147 int unlink_hidden = 0;
3149 fuse_fs_release(f->fs, path, fi);
3151 pthread_mutex_lock(&f->lock);
3152 node = get_node(f, ino);
3153 assert(node->open_count > 0);
3155 if (node->is_hidden && !node->open_count) {
3157 node->is_hidden = 0;
3159 pthread_mutex_unlock(&f->lock);
3163 fuse_fs_unlink(f->fs, path);
3164 }
else if (f->conf.nullpath_ok) {
3167 if (get_path(f, ino, &unlinkpath) == 0)
3168 fuse_fs_unlink(f->fs, unlinkpath);
3170 free_path(f, ino, unlinkpath);
3176 const char *name, mode_t mode,
3179 struct fuse *f = req_fuse_prepare(req);
3180 struct fuse_intr_data d;
3185 err = get_path_name(f, parent, name, &path);
3187 fuse_prepare_interrupt(f, req, &d);
3188 err = fuse_fs_create(f->fs, path, mode, fi);
3190 err = lookup_path(f, parent, name, path, &e, fi);
3192 fuse_fs_release(f->fs, path, fi);
3193 else if (!S_ISREG(e.
attr.st_mode)) {
3195 fuse_fs_release(f->fs, path, fi);
3196 forget_node(f, e.
ino, 1);
3198 if (f->conf.direct_io)
3200 if (f->conf.kernel_cache)
3203 f->conf.parallel_direct_writes)
3207 fuse_finish_interrupt(f, req, &d);
3210 pthread_mutex_lock(&f->lock);
3211 get_node(f, e.
ino)->open_count++;
3212 pthread_mutex_unlock(&f->lock);
3216 fuse_do_release(f, e.
ino, path, fi);
3217 forget_node(f, e.
ino, 1);
3220 reply_err(req, err);
3223 free_path(f, parent, path);
3226static double diff_timespec(
const struct timespec *t1,
3227 const struct timespec *t2)
3229 return (t1->tv_sec - t2->tv_sec) +
3230 ((double) t1->tv_nsec - (
double) t2->tv_nsec) / 1000000000.0;
3233static void open_auto_cache(
struct fuse *f,
fuse_ino_t ino,
const char *path,
3238 pthread_mutex_lock(&f->lock);
3239 node = get_node(f, ino);
3240 if (node->cache_valid) {
3241 struct timespec now;
3244 if (diff_timespec(&now, &node->stat_updated) >
3245 f->conf.ac_attr_timeout) {
3248 pthread_mutex_unlock(&f->lock);
3249 err = fuse_fs_getattr(f->fs, path, &stbuf, fi);
3250 pthread_mutex_lock(&f->lock);
3252 update_stat(node, &stbuf);
3254 node->cache_valid = 0;
3257 if (node->cache_valid)
3260 node->cache_valid = 1;
3261 pthread_mutex_unlock(&f->lock);
3267 struct fuse *f = req_fuse_prepare(req);
3268 struct fuse_intr_data d;
3272 err = get_path(f, ino, &path);
3274 fuse_prepare_interrupt(f, req, &d);
3275 err = fuse_fs_open(f->fs, path, fi);
3277 if (f->conf.direct_io)
3279 if (f->conf.kernel_cache)
3282 if (f->conf.auto_cache)
3283 open_auto_cache(f, ino, path, fi);
3285 if (f->conf.no_rofd_flush &&
3286 (fi->
flags & O_ACCMODE) == O_RDONLY)
3289 if (fi->
direct_io && f->conf.parallel_direct_writes)
3293 fuse_finish_interrupt(f, req, &d);
3296 pthread_mutex_lock(&f->lock);
3297 get_node(f, ino)->open_count++;
3298 pthread_mutex_unlock(&f->lock);
3302 fuse_do_release(f, ino, path, fi);
3305 reply_err(req, err);
3307 free_path(f, ino, path);
3313 struct fuse *f = req_fuse_prepare(req);
3318 res = get_path_nullok(f, ino, &path);
3320 struct fuse_intr_data d;
3322 fuse_prepare_interrupt(f, req, &d);
3323 res = fuse_fs_read_buf(f->fs, path, &buf, size, off, fi);
3324 fuse_finish_interrupt(f, req, &d);
3325 free_path(f, ino, path);
3331 reply_err(req, res);
3340 struct fuse *f = req_fuse_prepare(req);
3344 res = get_path_nullok(f, ino, &path);
3346 struct fuse_intr_data d;
3348 fuse_prepare_interrupt(f, req, &d);
3349 res = fuse_fs_write_buf(f->fs, path, buf, off, fi);
3350 fuse_finish_interrupt(f, req, &d);
3351 free_path(f, ino, path);
3357 reply_err(req, res);
3363 struct fuse *f = req_fuse_prepare(req);
3367 err = get_path_nullok(f, ino, &path);
3369 struct fuse_intr_data d;
3371 fuse_prepare_interrupt(f, req, &d);
3372 err = fuse_fs_fsync(f->fs, path, datasync, fi);
3373 fuse_finish_interrupt(f, req, &d);
3374 free_path(f, ino, path);
3376 reply_err(req, err);
3379static struct fuse_dh *get_dirhandle(
const struct fuse_file_info *llfi,
3382 struct fuse_dh *dh = (
struct fuse_dh *) (uintptr_t) llfi->
fh;
3391 struct fuse *f = req_fuse_prepare(req);
3392 struct fuse_intr_data d;
3398 dh = (
struct fuse_dh *) malloc(
sizeof(
struct fuse_dh));
3400 reply_err(req, -ENOMEM);
3403 memset(dh, 0,
sizeof(
struct fuse_dh));
3405 dh->contents = NULL;
3410 pthread_mutex_init(&dh->lock, NULL);
3412 llfi->
fh = (uintptr_t) dh;
3414 memset(&fi, 0,
sizeof(fi));
3417 err = get_path(f, ino, &path);
3419 fuse_prepare_interrupt(f, req, &d);
3420 err = fuse_fs_opendir(f->fs, path, &fi);
3421 fuse_finish_interrupt(f, req, &d);
3430 fuse_fs_releasedir(f->fs, path, &fi);
3431 pthread_mutex_destroy(&dh->lock);
3435 reply_err(req, err);
3436 pthread_mutex_destroy(&dh->lock);
3439 free_path(f, ino, path);
3442static int extend_contents(
struct fuse_dh *dh,
unsigned minsize)
3444 if (minsize > dh->size) {
3446 unsigned newsize = dh->size;
3449 while (newsize < minsize) {
3450 if (newsize >= 0x80000000)
3451 newsize = 0xffffffff;
3456 newptr = (
char *) realloc(dh->contents, newsize);
3458 dh->error = -ENOMEM;
3461 dh->contents = newptr;
3467static int fuse_add_direntry_to_dh(
struct fuse_dh *dh,
const char *name,
3470 struct fuse_direntry *de;
3472 de = malloc(
sizeof(
struct fuse_direntry));
3474 dh->error = -ENOMEM;
3477 de->name = strdup(name);
3479 dh->error = -ENOMEM;
3488 dh->last = &de->next;
3499 pthread_mutex_lock(&f->lock);
3500 node = lookup_node(f, parent, name);
3503 pthread_mutex_unlock(&f->lock);
3508static int fill_dir(
void *dh_,
const char *name,
const struct stat *statp,
3511 struct fuse_dh *dh = (
struct fuse_dh *) dh_;
3514 if ((flags & ~FUSE_FILL_DIR_PLUS) != 0) {
3522 memset(&stbuf, 0,
sizeof(stbuf));
3523 stbuf.st_ino = FUSE_UNKNOWN_INO;
3526 if (!dh->fuse->conf.use_ino) {
3527 stbuf.st_ino = FUSE_UNKNOWN_INO;
3528 if (dh->fuse->conf.readdir_ino) {
3529 stbuf.st_ino = (ino_t)
3530 lookup_nodeid(dh->fuse, dh->nodeid, name);
3547 if (extend_contents(dh, dh->needlen) == -1)
3552 dh->needlen - dh->len, name,
3554 if (newlen > dh->needlen)
3561 if (fuse_add_direntry_to_dh(dh, name, &stbuf, flags) == -1)
3567static int is_dot_or_dotdot(
const char *name)
3569 return name[0] ==
'.' && (name[1] ==
'\0' ||
3570 (name[1] ==
'.' && name[2] ==
'\0'));
3573static int fill_dir_plus(
void *dh_,
const char *name,
const struct stat *statp,
3576 struct fuse_dh *dh = (
struct fuse_dh *) dh_;
3581 struct fuse *f = dh->fuse;
3584 if ((flags & ~FUSE_FILL_DIR_PLUS) != 0) {
3589 if (statp && (flags & FUSE_FILL_DIR_PLUS)) {
3592 e.
attr.st_ino = FUSE_UNKNOWN_INO;
3594 e.
attr.st_mode = statp->st_mode;
3595 if (f->conf.use_ino)
3596 e.
attr.st_ino = statp->st_ino;
3598 if (!f->conf.use_ino && f->conf.readdir_ino) {
3599 e.
attr.st_ino = (ino_t)
3600 lookup_nodeid(f, dh->nodeid, name);
3616 if (extend_contents(dh, dh->needlen) == -1)
3619 if (statp && (flags & FUSE_FILL_DIR_PLUS)) {
3620 if (!is_dot_or_dotdot(name)) {
3621 res = do_lookup(f, dh->nodeid, name, &e);
3631 dh->needlen - dh->len, name,
3633 if (newlen > dh->needlen)
3639 if (fuse_add_direntry_to_dh(dh, name, &e.
attr, flags) == -1)
3646static void free_direntries(
struct fuse_direntry *de)
3649 struct fuse_direntry *next = de->next;
3657 size_t size, off_t off,
struct fuse_dh *dh,
3664 if (f->fs->op.readdir)
3665 err = get_path_nullok(f, ino, &path);
3667 err = get_path(f, ino, &path);
3669 struct fuse_intr_data d;
3672 if (flags & FUSE_READDIR_PLUS)
3673 filler = fill_dir_plus;
3675 free_direntries(dh->first);
3677 dh->last = &dh->first;
3683 fuse_prepare_interrupt(f, req, &d);
3684 err = fuse_fs_readdir(f->fs, path, dh, filler, off, fi, flags);
3685 fuse_finish_interrupt(f, req, &d);
3691 free_path(f, ino, path);
3696static int readdir_fill_from_list(
fuse_req_t req,
struct fuse_dh *dh,
3700 struct fuse_direntry *de = dh->first;
3705 if (extend_contents(dh, dh->needlen) == -1)
3708 for (pos = 0; pos < off; pos++) {
3715 char *p = dh->contents + dh->len;
3716 unsigned rem = dh->needlen - dh->len;
3721 if (flags & FUSE_READDIR_PLUS) {
3727 if (de->flags & FUSE_FILL_DIR_PLUS &&
3728 !is_dot_or_dotdot(de->name)) {
3729 res = do_lookup(dh->fuse, dh->nodeid,
3741 de->name, &de->stat, pos);
3743 newlen = dh->len + thislen;
3744 if (newlen > dh->needlen)
3756 struct fuse *f = req_fuse_prepare(req);
3758 struct fuse_dh *dh = get_dirhandle(llfi, &fi);
3761 pthread_mutex_lock(&dh->lock);
3768 err = readdir_fill(f, req, ino, size, off, dh, &fi, flags);
3770 reply_err(req, err);
3776 err = readdir_fill_from_list(req, dh, off, flags);
3778 reply_err(req, err);
3784 pthread_mutex_unlock(&dh->lock);
3790 fuse_readdir_common(req, ino, size, off, llfi, 0);
3796 fuse_readdir_common(req, ino, size, off, llfi, FUSE_READDIR_PLUS);
3802 struct fuse *f = req_fuse_prepare(req);
3803 struct fuse_intr_data d;
3805 struct fuse_dh *dh = get_dirhandle(llfi, &fi);
3808 get_path_nullok(f, ino, &path);
3810 fuse_prepare_interrupt(f, req, &d);
3811 fuse_fs_releasedir(f->fs, path, &fi);
3812 fuse_finish_interrupt(f, req, &d);
3813 free_path(f, ino, path);
3815 pthread_mutex_lock(&dh->lock);
3816 pthread_mutex_unlock(&dh->lock);
3817 pthread_mutex_destroy(&dh->lock);
3818 free_direntries(dh->first);
3827 struct fuse *f = req_fuse_prepare(req);
3832 get_dirhandle(llfi, &fi);
3834 err = get_path_nullok(f, ino, &path);
3836 struct fuse_intr_data d;
3837 fuse_prepare_interrupt(f, req, &d);
3838 err = fuse_fs_fsyncdir(f->fs, path, datasync, &fi);
3839 fuse_finish_interrupt(f, req, &d);
3840 free_path(f, ino, path);
3842 reply_err(req, err);
3847 struct fuse *f = req_fuse_prepare(req);
3852 memset(&buf, 0,
sizeof(buf));
3854 err = get_path(f, ino, &path);
3857 struct fuse_intr_data d;
3858 fuse_prepare_interrupt(f, req, &d);
3859 err = fuse_fs_statfs(f->fs, path ? path :
"/", &buf);
3860 fuse_finish_interrupt(f, req, &d);
3861 free_path(f, ino, path);
3867 reply_err(req, err);
3871 const char *value,
size_t size,
int flags)
3873 struct fuse *f = req_fuse_prepare(req);
3877 err = get_path(f, ino, &path);
3879 struct fuse_intr_data d;
3880 fuse_prepare_interrupt(f, req, &d);
3881 err = fuse_fs_setxattr(f->fs, path, name, value, size, flags);
3882 fuse_finish_interrupt(f, req, &d);
3883 free_path(f, ino, path);
3885 reply_err(req, err);
3889 const char *name,
char *value,
size_t size)
3894 err = get_path(f, ino, &path);
3896 struct fuse_intr_data d;
3897 fuse_prepare_interrupt(f, req, &d);
3898 err = fuse_fs_getxattr(f->fs, path, name, value, size);
3899 fuse_finish_interrupt(f, req, &d);
3900 free_path(f, ino, path);
3908 struct fuse *f = req_fuse_prepare(req);
3912 char *value = (
char *) malloc(size);
3913 if (value == NULL) {
3914 reply_err(req, -ENOMEM);
3917 res = common_getxattr(f, req, ino, name, value, size);
3921 reply_err(req, res);
3924 res = common_getxattr(f, req, ino, name, NULL, 0);
3928 reply_err(req, res);
3933 char *list,
size_t size)
3938 err = get_path(f, ino, &path);
3940 struct fuse_intr_data d;
3941 fuse_prepare_interrupt(f, req, &d);
3942 err = fuse_fs_listxattr(f->fs, path, list, size);
3943 fuse_finish_interrupt(f, req, &d);
3944 free_path(f, ino, path);
3951 struct fuse *f = req_fuse_prepare(req);
3955 char *list = (
char *) malloc(size);
3957 reply_err(req, -ENOMEM);
3960 res = common_listxattr(f, req, ino, list, size);
3964 reply_err(req, res);
3967 res = common_listxattr(f, req, ino, NULL, 0);
3971 reply_err(req, res);
3978 struct fuse *f = req_fuse_prepare(req);
3982 err = get_path(f, ino, &path);
3984 struct fuse_intr_data d;
3985 fuse_prepare_interrupt(f, req, &d);
3986 err = fuse_fs_removexattr(f->fs, path, name);
3987 fuse_finish_interrupt(f, req, &d);
3988 free_path(f, ino, path);
3990 reply_err(req, err);
3993static struct lock *locks_conflict(
struct node *node,
const struct lock *lock)
3997 for (l = node->locks; l; l = l->next)
3998 if (l->owner != lock->owner &&
3999 lock->start <= l->end && l->start <= lock->end &&
4000 (l->type == F_WRLCK || lock->type == F_WRLCK))
4006static void delete_lock(
struct lock **lockp)
4008 struct lock *l = *lockp;
4013static void insert_lock(
struct lock **pos,
struct lock *lock)
4019static int locks_insert(
struct node *node,
struct lock *lock)
4022 struct lock *newl1 = NULL;
4023 struct lock *newl2 = NULL;
4025 if (lock->type != F_UNLCK || lock->start != 0 ||
4026 lock->end != OFFSET_MAX) {
4027 newl1 = malloc(
sizeof(
struct lock));
4028 newl2 = malloc(
sizeof(
struct lock));
4030 if (!newl1 || !newl2) {
4037 for (lp = &node->locks; *lp;) {
4038 struct lock *l = *lp;
4039 if (l->owner != lock->owner)
4042 if (lock->type == l->type) {
4043 if (l->end < lock->start - 1)
4045 if (lock->end < l->start - 1)
4047 if (l->start <= lock->start && lock->end <= l->end)
4049 if (l->start < lock->start)
4050 lock->start = l->start;
4051 if (lock->end < l->end)
4055 if (l->end < lock->start)
4057 if (lock->end < l->start)
4059 if (lock->start <= l->start && l->end <= lock->end)
4061 if (l->end <= lock->end) {
4062 l->end = lock->start - 1;
4065 if (lock->start <= l->start) {
4066 l->start = lock->end + 1;
4070 newl2->start = lock->end + 1;
4071 l->end = lock->start - 1;
4072 insert_lock(&l->next, newl2);
4082 if (lock->type != F_UNLCK) {
4084 insert_lock(lp, newl1);
4093static void flock_to_lock(
struct flock *flock,
struct lock *lock)
4095 memset(lock, 0,
sizeof(
struct lock));
4096 lock->type = flock->l_type;
4097 lock->start = flock->l_start;
4099 flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
4100 lock->pid = flock->l_pid;
4103static void lock_to_flock(
struct lock *lock,
struct flock *flock)
4105 flock->l_type = lock->type;
4106 flock->l_start = lock->start;
4108 (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
4109 flock->l_pid = lock->pid;
4115 struct fuse_intr_data d;
4121 fuse_prepare_interrupt(f, req, &d);
4122 memset(&lock, 0,
sizeof(lock));
4123 lock.l_type = F_UNLCK;
4124 lock.l_whence = SEEK_SET;
4125 err = fuse_fs_flush(f->fs, path, fi);
4126 errlock = fuse_fs_lock(f->fs, path, fi, F_SETLK, &lock);
4127 fuse_finish_interrupt(f, req, &d);
4129 if (errlock != -ENOSYS) {
4130 flock_to_lock(&lock, &l);
4132 pthread_mutex_lock(&f->lock);
4133 locks_insert(get_node(f, ino), &l);
4134 pthread_mutex_unlock(&f->lock);
4147 struct fuse *f = req_fuse_prepare(req);
4148 struct fuse_intr_data d;
4152 get_path_nullok(f, ino, &path);
4154 err = fuse_flush_common(f, req, ino, path, fi);
4159 fuse_prepare_interrupt(f, req, &d);
4160 fuse_do_release(f, ino, path, fi);
4161 fuse_finish_interrupt(f, req, &d);
4162 free_path(f, ino, path);
4164 reply_err(req, err);
4170 struct fuse *f = req_fuse_prepare(req);
4174 get_path_nullok(f, ino, &path);
4175 err = fuse_flush_common(f, req, ino, path, fi);
4176 free_path(f, ino, path);
4178 reply_err(req, err);
4185 struct fuse *f = req_fuse_prepare(req);
4189 err = get_path_nullok(f, ino, &path);
4191 struct fuse_intr_data d;
4192 fuse_prepare_interrupt(f, req, &d);
4193 err = fuse_fs_lock(f->fs, path, fi, cmd, lock);
4194 fuse_finish_interrupt(f, req, &d);
4195 free_path(f, ino, path);
4205 struct lock *conflict;
4206 struct fuse *f = req_fuse(req);
4208 flock_to_lock(lock, &l);
4210 pthread_mutex_lock(&f->lock);
4211 conflict = locks_conflict(get_node(f, ino), &l);
4213 lock_to_flock(conflict, lock);
4214 pthread_mutex_unlock(&f->lock);
4216 err = fuse_lock_common(req, ino, fi, lock, F_GETLK);
4223 reply_err(req, err);
4230 int err = fuse_lock_common(req, ino, fi, lock,
4231 sleep ? F_SETLKW : F_SETLK);
4233 struct fuse *f = req_fuse(req);
4235 flock_to_lock(lock, &l);
4237 pthread_mutex_lock(&f->lock);
4238 locks_insert(get_node(f, ino), &l);
4239 pthread_mutex_unlock(&f->lock);
4241 reply_err(req, err);
4247 struct fuse *f = req_fuse_prepare(req);
4251 err = get_path_nullok(f, ino, &path);
4253 struct fuse_intr_data d;
4254 fuse_prepare_interrupt(f, req, &d);
4255 err = fuse_fs_flock(f->fs, path, fi, op);
4256 fuse_finish_interrupt(f, req, &d);
4257 free_path(f, ino, path);
4259 reply_err(req, err);
4265 struct fuse *f = req_fuse_prepare(req);
4266 struct fuse_intr_data d;
4270 err = get_path(f, ino, &path);
4272 fuse_prepare_interrupt(f, req, &d);
4273 err = fuse_fs_bmap(f->fs, path, blocksize, &idx);
4274 fuse_finish_interrupt(f, req, &d);
4275 free_path(f, ino, path);
4280 reply_err(req, err);
4285 unsigned int flags,
const void *in_buf,
4286 size_t in_bufsz,
size_t out_bufsz)
4288 struct fuse *f = req_fuse_prepare(req);
4289 struct fuse_intr_data d;
4291 char *path, *out_buf = NULL;
4295 if (
flags & FUSE_IOCTL_UNRESTRICTED)
4298 if (
flags & FUSE_IOCTL_DIR)
4299 get_dirhandle(llfi, &fi);
4305 out_buf = malloc(out_bufsz);
4310 assert(!in_bufsz || !out_bufsz || in_bufsz == out_bufsz);
4311 if (out_buf && in_bufsz)
4312 memcpy(out_buf, in_buf, in_bufsz);
4314 err = get_path_nullok(f, ino, &path);
4318 fuse_prepare_interrupt(f, req, &d);
4320 err = fuse_fs_ioctl(f->fs, path, cmd, arg, &fi,
flags,
4321 out_buf ? out_buf : (void *)in_buf);
4323 fuse_finish_interrupt(f, req, &d);
4324 free_path(f, ino, path);
4331 reply_err(req, err);
4339 struct fuse *f = req_fuse_prepare(req);
4340 struct fuse_intr_data d;
4343 unsigned revents = 0;
4345 err = get_path_nullok(f, ino, &path);
4347 fuse_prepare_interrupt(f, req, &d);
4348 err = fuse_fs_poll(f->fs, path, fi, ph, &revents);
4349 fuse_finish_interrupt(f, req, &d);
4350 free_path(f, ino, path);
4355 reply_err(req, err);
4361 struct fuse *f = req_fuse_prepare(req);
4362 struct fuse_intr_data d;
4366 err = get_path_nullok(f, ino, &path);
4368 fuse_prepare_interrupt(f, req, &d);
4369 err = fuse_fs_fallocate(f->fs, path, mode, offset, length, fi);
4370 fuse_finish_interrupt(f, req, &d);
4371 free_path(f, ino, path);
4373 reply_err(req, err);
4382 struct fuse *f = req_fuse_prepare(req);
4383 struct fuse_intr_data d;
4384 char *path_in, *path_out;
4388 err = get_path_nullok(f, nodeid_in, &path_in);
4390 reply_err(req, err);
4394 err = get_path_nullok(f, nodeid_out, &path_out);
4396 free_path(f, nodeid_in, path_in);
4397 reply_err(req, err);
4401 fuse_prepare_interrupt(f, req, &d);
4402 res = fuse_fs_copy_file_range(f->fs, path_in, fi_in, off_in, path_out,
4403 fi_out, off_out, len, flags);
4404 fuse_finish_interrupt(f, req, &d);
4409 reply_err(req, res);
4411 free_path(f, nodeid_in, path_in);
4412 free_path(f, nodeid_out, path_out);
4418 struct fuse *f = req_fuse_prepare(req);
4419 struct fuse_intr_data d;
4424 err = get_path(f, ino, &path);
4426 reply_err(req, err);
4430 fuse_prepare_interrupt(f, req, &d);
4431 res = fuse_fs_lseek(f->fs, path, off, whence, fi);
4432 fuse_finish_interrupt(f, req, &d);
4433 free_path(f, ino, path);
4437 reply_err(req, res);
4440static int clean_delay(
struct fuse *f)
4448 int max_sleep = 3600;
4449 int sleep_time = f->conf.remember / 10;
4451 if (sleep_time > max_sleep)
4453 if (sleep_time < min_sleep)
4460 struct node_lru *lnode;
4461 struct list_head *curr, *next;
4463 struct timespec now;
4465 pthread_mutex_lock(&f->lock);
4469 for (curr = f->lru_table.next; curr != &f->lru_table; curr = next) {
4473 lnode = list_entry(curr,
struct node_lru, lru);
4474 node = &lnode->node;
4476 age = diff_timespec(&now, &lnode->forget_time);
4477 if (age <= f->conf.remember)
4480 assert(node->nlookup == 1);
4483 if (node->refctr > 1)
4487 unhash_name(f, node);
4488 unref_node(f, node);
4490 pthread_mutex_unlock(&f->lock);
4492 return clean_delay(f);
4496 .
init = fuse_lib_init,
4497 .destroy = fuse_lib_destroy,
4498 .lookup = fuse_lib_lookup,
4499 .forget = fuse_lib_forget,
4500 .forget_multi = fuse_lib_forget_multi,
4501 .getattr = fuse_lib_getattr,
4502 .setattr = fuse_lib_setattr,
4503 .access = fuse_lib_access,
4504 .readlink = fuse_lib_readlink,
4505 .mknod = fuse_lib_mknod,
4506 .mkdir = fuse_lib_mkdir,
4507 .unlink = fuse_lib_unlink,
4508 .rmdir = fuse_lib_rmdir,
4509 .symlink = fuse_lib_symlink,
4510 .rename = fuse_lib_rename,
4511 .link = fuse_lib_link,
4512 .create = fuse_lib_create,
4513 .open = fuse_lib_open,
4514 .read = fuse_lib_read,
4515 .write_buf = fuse_lib_write_buf,
4516 .flush = fuse_lib_flush,
4517 .release = fuse_lib_release,
4518 .fsync = fuse_lib_fsync,
4519 .opendir = fuse_lib_opendir,
4520 .readdir = fuse_lib_readdir,
4521 .readdirplus = fuse_lib_readdirplus,
4522 .releasedir = fuse_lib_releasedir,
4523 .fsyncdir = fuse_lib_fsyncdir,
4524 .statfs = fuse_lib_statfs,
4525 .setxattr = fuse_lib_setxattr,
4526 .getxattr = fuse_lib_getxattr,
4527 .listxattr = fuse_lib_listxattr,
4528 .removexattr = fuse_lib_removexattr,
4529 .getlk = fuse_lib_getlk,
4530 .setlk = fuse_lib_setlk,
4531 .flock = fuse_lib_flock,
4532 .bmap = fuse_lib_bmap,
4533 .ioctl = fuse_lib_ioctl,
4534 .poll = fuse_lib_poll,
4535 .fallocate = fuse_lib_fallocate,
4536 .copy_file_range = fuse_lib_copy_file_range,
4537 .lseek = fuse_lib_lseek,
4540int fuse_notify_poll(
struct fuse_pollhandle *ph)
4550static int fuse_session_loop_remember(
struct fuse *f)
4552 struct fuse_session *se = f->se;
4554 struct timespec now;
4556 struct pollfd fds = {
4565 next_clean = now.tv_sec;
4570 if (now.tv_sec < next_clean)
4571 timeout = next_clean - now.tv_sec;
4575 res = poll(&fds, 1, timeout * 1000);
4581 }
else if (res > 0) {
4582 res = fuse_session_receive_buf_internal(se, &fbuf,
4589 fuse_session_process_buf_internal(se, &fbuf, NULL);
4593 next_clean = now.tv_sec + timeout;
4599 return res < 0 ? -1 : 0;
4608 return fuse_session_loop_remember(f);
4613FUSE_SYMVER(
"fuse_loop_mt_312",
"fuse_loop_mt@@FUSE_3.12")
4628int fuse_loop_mt_32(
struct fuse *f,
struct fuse_loop_config_v1 *config_v1);
4629FUSE_SYMVER(
"fuse_loop_mt_32",
"fuse_loop_mt@FUSE_3.2")
4630int fuse_loop_mt_32(struct fuse *f, struct fuse_loop_config_v1 *config_v1)
4636 fuse_loop_cfg_convert(config, config_v1);
4638 int res = fuse_loop_mt_312(f, config);
4640 fuse_loop_cfg_destroy(config);
4645int fuse_loop_mt_31(
struct fuse *f,
int clone_fd);
4646FUSE_SYMVER(
"fuse_loop_mt_31",
"fuse_loop_mt@FUSE_3.0")
4647int fuse_loop_mt_31(struct fuse *f,
int clone_fd)
4655 fuse_loop_cfg_set_clone_fd(config,
clone_fd);
4657 err = fuse_loop_mt_312(f, config);
4659 fuse_loop_cfg_destroy(config);
4671 struct fuse_context_i *c = fuse_get_context_internal();
4681 struct fuse_context_i *c = fuse_get_context_internal();
4690 struct fuse_context_i *c = fuse_get_context_internal();
4700 int err = lookup_path_in_cache(f, path, &ino);
4708#define FUSE_LIB_OPT(t, p, v) { t, offsetof(struct fuse_config, p), v }
4710static const struct fuse_opt fuse_lib_opts[] = {
4713 FUSE_LIB_OPT(
"debug", debug, 1),
4714 FUSE_LIB_OPT(
"-d", debug, 1),
4715 FUSE_LIB_OPT(
"kernel_cache", kernel_cache, 1),
4716 FUSE_LIB_OPT(
"auto_cache", auto_cache, 1),
4717 FUSE_LIB_OPT(
"noauto_cache", auto_cache, 0),
4718 FUSE_LIB_OPT(
"no_rofd_flush", no_rofd_flush, 1),
4719 FUSE_LIB_OPT(
"umask=", set_mode, 1),
4720 FUSE_LIB_OPT(
"umask=%o", umask, 0),
4721 FUSE_LIB_OPT(
"fmask=", set_mode, 1),
4722 FUSE_LIB_OPT(
"fmask=%o", fmask, 0),
4723 FUSE_LIB_OPT(
"dmask=", set_mode, 1),
4724 FUSE_LIB_OPT(
"dmask=%o", dmask, 0),
4725 FUSE_LIB_OPT(
"uid=", set_uid, 1),
4726 FUSE_LIB_OPT(
"uid=%d", uid, 0),
4727 FUSE_LIB_OPT(
"gid=", set_gid, 1),
4728 FUSE_LIB_OPT(
"gid=%d", gid, 0),
4729 FUSE_LIB_OPT(
"entry_timeout=%lf", entry_timeout, 0),
4730 FUSE_LIB_OPT(
"attr_timeout=%lf", attr_timeout, 0),
4731 FUSE_LIB_OPT(
"ac_attr_timeout=%lf", ac_attr_timeout, 0),
4732 FUSE_LIB_OPT(
"ac_attr_timeout=", ac_attr_timeout_set, 1),
4733 FUSE_LIB_OPT(
"negative_timeout=%lf", negative_timeout, 0),
4734 FUSE_LIB_OPT(
"noforget", remember, -1),
4735 FUSE_LIB_OPT(
"remember=%u", remember, 0),
4736 FUSE_LIB_OPT(
"modules=%s", modules, 0),
4737 FUSE_LIB_OPT(
"parallel_direct_write=%d", parallel_direct_writes, 0),
4741static int fuse_lib_opt_proc(
void *data,
const char *arg,
int key,
4744 (void) arg; (void) outargs; (void) data; (void) key;
4751static const struct fuse_opt fuse_help_opts[] = {
4752 FUSE_LIB_OPT(
"modules=%s", modules, 1),
4757static void print_module_help(
const char *name,
4764 printf(
"\nOptions for %s module:\n", name);
4774" -o kernel_cache cache files in kernel\n"
4775" -o [no]auto_cache enable caching based on modification times (off)\n"
4776" -o no_rofd_flush disable flushing of read-only fd on close (off)\n"
4777" -o umask=M set file permissions (octal)\n"
4778" -o fmask=M set file permissions (octal)\n"
4779" -o dmask=M set dir permissions (octal)\n"
4780" -o uid=N set file owner\n"
4781" -o gid=N set file group\n"
4782" -o entry_timeout=T cache timeout for names (1.0s)\n"
4783" -o negative_timeout=T cache timeout for deleted names (0.0s)\n"
4784" -o attr_timeout=T cache timeout for attributes (1.0s)\n"
4785" -o ac_attr_timeout=T auto cache timeout for attributes (attr_timeout)\n"
4786" -o noforget never forget cached inodes\n"
4787" -o remember=T remember cached inodes for T seconds (0s)\n"
4788" -o modules=M1[:M2...] names of modules to push onto filesystem stack\n");
4795 print_module_help(
"subdir", &fuse_module_subdir_factory);
4797 print_module_help(
"iconv", &fuse_module_iconv_factory);
4804 fuse_lib_opt_proc) == -1
4813 for (module = conf.modules;
module;
module = next) {
4815 for (p = module; *p && *p !=
':'; p++);
4816 next = *p ? p + 1 : NULL;
4819 m = fuse_get_module(module);
4821 print_module_help(module, &m->factory);
4825static int fuse_init_intr_signal(
int signum,
int *installed)
4827 struct sigaction old_sa;
4829 if (sigaction(signum, NULL, &old_sa) == -1) {
4830 perror(
"fuse: cannot get old signal handler");
4834 if (old_sa.sa_handler == SIG_DFL) {
4835 struct sigaction sa;
4837 memset(&sa, 0,
sizeof(
struct sigaction));
4838 sa.sa_handler = fuse_intr_sighandler;
4839 sigemptyset(&sa.sa_mask);
4841 if (sigaction(signum, &sa, NULL) == -1) {
4842 perror(
"fuse: cannot set interrupt signal handler");
4850static void fuse_restore_intr_signal(
int signum)
4852 struct sigaction sa;
4854 memset(&sa, 0,
sizeof(
struct sigaction));
4855 sa.sa_handler = SIG_DFL;
4856 sigaction(signum, &sa, NULL);
4860static int fuse_push_module(
struct fuse *f,
const char *module,
4863 struct fuse_fs *fs[2] = { f->fs, NULL };
4864 struct fuse_fs *newfs;
4870 newfs = m->factory(args, fs);
4885 fuse_log(FUSE_LOG_ERR,
"fuse: warning: library too old, some operations may not not work\n");
4889 fs = (
struct fuse_fs *) calloc(1,
sizeof(
struct fuse_fs));
4891 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate fuse_fs object\n");
4895 fs->user_data = user_data;
4897 memcpy(&fs->op, op, op_size);
4901static int node_table_init(
struct node_table *t)
4903 t->size = NODE_TABLE_MIN_SIZE;
4904 t->array = (
struct node **) calloc(1,
sizeof(
struct node *) * t->size);
4905 if (t->array == NULL) {
4906 fuse_log(FUSE_LOG_ERR,
"fuse: memory allocation failed\n");
4915static void *fuse_prune_nodes(
void *fuse)
4917 struct fuse *f = fuse;
4920 pthread_setname_np(pthread_self(),
"fuse_prune_nodes");
4932 return fuse_start_thread(&f->prune_thread, fuse_prune_nodes, f);
4939 if (lru_enabled(f)) {
4940 pthread_mutex_lock(&f->lock);
4941 pthread_cancel(f->prune_thread);
4942 pthread_mutex_unlock(&f->lock);
4943 pthread_join(f->prune_thread, NULL);
4951struct fuse *_fuse_new_31(
struct fuse_args *args,
4960 f = (
struct fuse *) calloc(1,
sizeof(
struct fuse));
4962 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate fuse object\n");
4966 f->conf.entry_timeout = 1.0;
4967 f->conf.attr_timeout = 1.0;
4968 f->conf.negative_timeout = 0.0;
4969 f->conf.intr_signal = FUSE_DEFAULT_INTR_SIGNAL;
4973 fuse_lib_opt_proc) == -1)
4976 pthread_mutex_lock(&fuse_context_lock);
4977 static int builtin_modules_registered = 0;
4979 if (builtin_modules_registered == 0) {
4981 fuse_register_module(
"subdir", fuse_module_subdir_factory, NULL);
4983 fuse_register_module(
"iconv", fuse_module_iconv_factory, NULL);
4985 builtin_modules_registered= 1;
4987 pthread_mutex_unlock(&fuse_context_lock);
4989 if (fuse_create_context_key() == -1)
4994 goto out_delete_context_key;
5004 f->pagesize = getpagesize();
5005 init_list_head(&f->partial_slabs);
5006 init_list_head(&f->full_slabs);
5007 init_list_head(&f->lru_table);
5009 if (f->conf.modules) {
5013 for (module = f->conf.modules;
module;
module = next) {
5015 for (p = module; *p && *p !=
':'; p++);
5016 next = *p ? p + 1 : NULL;
5019 fuse_push_module(f, module, args) == -1)
5024 if (!f->conf.ac_attr_timeout_set)
5025 f->conf.ac_attr_timeout = f->conf.attr_timeout;
5027#if defined(__FreeBSD__) || defined(__NetBSD__)
5032 f->conf.readdir_ino = 1;
5036 struct fuse_session *fuse_session_new_versioned(
5040 f->se = fuse_session_new_versioned(args, &llop,
sizeof(llop), version,
5045 if (f->conf.debug) {
5046 fuse_log(FUSE_LOG_DEBUG,
"nullpath_ok: %i\n", f->conf.nullpath_ok);
5050 f->fs->debug = f->conf.debug;
5053 if (node_table_init(&f->name_table) == -1)
5054 goto out_free_session;
5056 if (node_table_init(&f->id_table) == -1)
5057 goto out_free_name_table;
5059 pthread_mutex_init(&f->lock, NULL);
5061 root = alloc_node(f);
5063 fuse_log(FUSE_LOG_ERR,
"fuse: memory allocation failed\n");
5064 goto out_free_id_table;
5066 if (lru_enabled(f)) {
5067 struct node_lru *lnode = node_lru(root);
5068 init_list_head(&lnode->lru);
5071 strcpy(root->inline_name,
"/");
5072 root->name = root->inline_name;
5073 root->parent = NULL;
5074 root->nodeid = FUSE_ROOT_ID;
5081 free(f->id_table.array);
5083 free(f->name_table.array);
5088 free(f->conf.modules);
5089out_delete_context_key:
5090 fuse_delete_context_key();
5098FUSE_SYMVER(
"_fuse_new_30",
"_fuse_new@FUSE_3.0")
5099struct fuse *_fuse_new_30(struct
fuse_args *args,
5108 FUSE_LIB_OPT(
"-h", show_help, 1),
5109 FUSE_LIB_OPT(
"--help", show_help, 1),
5114 fuse_lib_opt_proc) == -1)
5121 return _fuse_new_31(args, op, op_size, version, user_data);
5126 size_t op_size,
void *user_data);
5127FUSE_SYMVER(
"fuse_new_31",
"fuse_new@FUSE_3.1")
5128struct fuse *fuse_new_31(struct
fuse_args *args,
5130 size_t op_size,
void *user_data)
5135 return _fuse_new_31(args, op, op_size, &version, user_data);
5143 size_t op_size,
void *user_data);
5144FUSE_SYMVER(
"fuse_new_30",
"fuse_new@FUSE_3.0")
5145struct fuse *fuse_new_30(struct
fuse_args *args,
5147 size_t op_size,
void *user_data)
5152 FUSE_LIB_OPT(
"-h", show_help, 1),
5153 FUSE_LIB_OPT(
"--help", show_help, 1),
5158 fuse_lib_opt_proc) == -1)
5165 return fuse_new_31(args, op, op_size, user_data);
5173 if (f->conf.intr && f->intr_installed)
5174 fuse_restore_intr_signal(f->conf.intr_signal);
5177 fuse_create_context(f);
5179 for (i = 0; i < f->id_table.size; i++) {
5182 for (node = f->id_table.array[i]; node != NULL;
5183 node = node->id_next) {
5184 if (node->is_hidden) {
5186 if (try_get_path(f, node->nodeid, NULL, &path, NULL,
false) == 0) {
5187 fuse_fs_unlink(f->fs, path);
5194 for (i = 0; i < f->id_table.size; i++) {
5198 for (node = f->id_table.array[i]; node != NULL; node = next) {
5199 next = node->id_next;
5204 assert(list_empty(&f->partial_slabs));
5205 assert(list_empty(&f->full_slabs));
5207 while (fuse_modules) {
5208 fuse_put_module(fuse_modules);
5210 free(f->id_table.array);
5211 free(f->name_table.array);
5212 pthread_mutex_destroy(&f->lock);
5215 free(f->conf.modules);
5217 fuse_delete_context_key();
5231 return FUSE_VERSION;
5236 return PACKAGE_VERSION;
int fuse_getgroups(int size, gid_t list[])
int fuse_mount(struct fuse *f, const char *mountpoint)
int fuse_interrupted(void)
void fuse_destroy(struct fuse *f)
int fuse_start_cleanup_thread(struct fuse *fuse)
int fuse_invalidate_path(struct fuse *f, const char *path)
struct fuse_fs *(* fuse_module_factory_t)(struct fuse_args *args, struct fuse_fs *fs[])
struct fuse_context * fuse_get_context(void)
int fuse_loop(struct fuse *f)
int(* fuse_fill_dir_t)(void *buf, const char *name, const struct stat *stbuf, off_t off, enum fuse_fill_dir_flags flags)
void fuse_exit(struct fuse *f)
int fuse_clean_cache(struct fuse *fuse)
void fuse_lib_help(struct fuse_args *args)
struct fuse_session * fuse_get_session(struct fuse *f)
void fuse_unmount(struct fuse *f)
struct fuse_fs * fuse_fs_new(const struct fuse_operations *op, size_t op_size, void *private_data)
void fuse_stop_cleanup_thread(struct fuse *fuse)
#define FUSE_CAP_SPLICE_READ
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
#define FUSE_CAP_EXPORT_SUPPORT
#define FUSE_CAP_POSIX_LOCKS
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
const char * fuse_pkgversion(void)
#define FUSE_CAP_FLOCK_LOCKS
void fuse_log(enum fuse_log_level level, const char *fmt,...)
void fuse_session_destroy(struct fuse_session *se)
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
void * fuse_req_userdata(fuse_req_t req)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_session_exited(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_session_loop(struct fuse_session *se)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_session_unmount(struct fuse_session *se)
void fuse_reply_none(fuse_req_t req)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
void fuse_session_reset(struct fuse_session *se)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
void fuse_opt_free_args(struct fuse_args *args)
#define FUSE_OPT_KEY(templ, key)
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
#define FUSE_OPT_KEY_KEEP
#define FUSE_ARGS_INIT(argc, argv)
struct fuse_context * fuse_get_context(void)
enum fuse_buf_flags flags
uint32_t parallel_direct_writes
void(* getlk)(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, struct flock *lock)
void(* init)(void *userdata, struct fuse_conn_info *conn)
void(* setlk)(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, struct flock *lock, int sleep)