16 #include "fuse_config.h"
18 #include "fuse_lowlevel.h"
20 #include "fuse_misc.h"
21 #include "fuse_kernel.h"
37 #include <sys/param.h>
43 #define FUSE_NODE_SLAB 1
49 #ifndef RENAME_EXCHANGE
50 #define RENAME_EXCHANGE (1 << 1)
53 #define FUSE_DEFAULT_INTR_SIGNAL SIGUSR1
55 #define FUSE_UNKNOWN_INO 0xffffffff
56 #define OFFSET_MAX 0x7fffffffffffffffLL
58 #define NODE_TABLE_MIN_SIZE 8192
71 struct lock_queue_element {
72 struct lock_queue_element *next;
93 #define container_of(ptr, type, member) ({ \
94 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
95 (type *)( (char *)__mptr - offsetof(type,member) );})
97 #define list_entry(ptr, type, member) \
98 container_of(ptr, type, member)
101 struct list_head *next;
102 struct list_head *prev;
106 struct list_head list;
107 struct list_head freelist;
112 struct fuse_session *se;
113 struct node_table name_table;
114 struct node_table id_table;
115 struct list_head lru_table;
117 unsigned int generation;
118 unsigned int hidectr;
119 pthread_mutex_t lock;
123 struct lock_queue_element *lockq;
125 struct list_head partial_slabs;
126 struct list_head full_slabs;
127 pthread_t prune_thread;
140 struct node *name_next;
141 struct node *id_next;
143 unsigned int generation;
149 struct timespec stat_updated;
150 struct timespec mtime;
153 unsigned int is_hidden : 1;
154 unsigned int cache_valid : 1;
156 char inline_name[32];
159 #define TREELOCK_WRITE -1
160 #define TREELOCK_WAIT_OFFSET INT_MIN
164 struct list_head lru;
165 struct timespec forget_time;
168 struct fuse_direntry {
171 struct fuse_direntry *next;
175 pthread_mutex_t lock;
179 struct fuse_direntry *first;
180 struct fuse_direntry **last;
190 struct fuse_context_i {
201 static pthread_key_t fuse_context_key;
202 static pthread_mutex_t fuse_context_lock = PTHREAD_MUTEX_INITIALIZER;
203 static int fuse_context_ref;
206 static int fuse_register_module(
const char *name,
208 struct fusemod_so *so)
214 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate module\n");
217 mod->name = strdup(name);
219 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate module name\n");
223 mod->factory = factory;
228 mod->next = fuse_modules;
234 static void fuse_unregister_module(
struct fuse_module *m)
237 for (mp = &fuse_modules; *mp; mp = &(*mp)->next) {
247 static int fuse_load_so_module(
const char *module)
251 struct fusemod_so *so;
254 tmp = malloc(strlen(module) + 64);
256 fuse_log(FUSE_LOG_ERR,
"fuse: memory allocation failed\n");
259 sprintf(tmp,
"libfusemod_%s.so", module);
260 so = calloc(1,
sizeof(
struct fusemod_so));
262 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate module so\n");
266 so->handle = dlopen(tmp, RTLD_NOW);
267 if (so->handle == NULL) {
268 fuse_log(FUSE_LOG_ERR,
"fuse: dlopen(%s) failed: %s\n",
273 sprintf(tmp,
"fuse_module_%s_factory", module);
275 if (factory == NULL) {
276 fuse_log(FUSE_LOG_ERR,
"fuse: symbol <%s> not found in module: %s\n",
280 ret = fuse_register_module(module, *factory, so);
295 static struct fuse_module *fuse_find_module(
const char *module)
298 for (m = fuse_modules; m; m = m->next) {
299 if (strcmp(module, m->name) == 0) {
307 static struct fuse_module *fuse_get_module(
const char *module)
311 pthread_mutex_lock(&fuse_context_lock);
312 m = fuse_find_module(module);
314 int err = fuse_load_so_module(module);
316 m = fuse_find_module(module);
318 pthread_mutex_unlock(&fuse_context_lock);
324 pthread_mutex_lock(&fuse_context_lock);
330 if (!m->ctr && m->so) {
331 struct fusemod_so *so = m->so;
336 for (mp = &fuse_modules; *mp;) {
338 fuse_unregister_module(*mp);
345 }
else if (!m->ctr) {
346 fuse_unregister_module(m);
348 pthread_mutex_unlock(&fuse_context_lock);
351 static void init_list_head(
struct list_head *list)
357 static int list_empty(
const struct list_head *head)
359 return head->next == head;
362 static void list_add(
struct list_head *
new,
struct list_head *prev,
363 struct list_head *next)
371 static inline void list_add_head(
struct list_head *
new,
struct list_head *head)
373 list_add(
new, head, head->next);
376 static inline void list_add_tail(
struct list_head *
new,
struct list_head *head)
378 list_add(
new, head->prev, head);
381 static inline void list_del(
struct list_head *entry)
383 struct list_head *prev = entry->prev;
384 struct list_head *next = entry->next;
390 static inline int lru_enabled(
struct fuse *f)
392 return f->conf.remember > 0;
395 static struct node_lru *node_lru(
struct node *node)
397 return (
struct node_lru *) node;
400 static size_t get_node_size(
struct fuse *f)
403 return sizeof(
struct node_lru);
405 return sizeof(
struct node);
408 #ifdef FUSE_NODE_SLAB
409 static struct node_slab *list_to_slab(
struct list_head *head)
411 return (
struct node_slab *) head;
414 static struct node_slab *node_to_slab(
struct fuse *f,
struct node *node)
416 return (
struct node_slab *) (((uintptr_t) node) & ~((uintptr_t) f->pagesize - 1));
419 static int alloc_slab(
struct fuse *f)
422 struct node_slab *slab;
426 size_t node_size = get_node_size(f);
428 mem = mmap(NULL, f->pagesize, PROT_READ | PROT_WRITE,
429 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
431 if (mem == MAP_FAILED)
435 init_list_head(&slab->freelist);
437 num = (f->pagesize -
sizeof(
struct node_slab)) / node_size;
439 start = (
char *) mem + f->pagesize - num * node_size;
440 for (i = 0; i < num; i++) {
443 n = (
struct list_head *) (start + i * node_size);
444 list_add_tail(n, &slab->freelist);
446 list_add_tail(&slab->list, &f->partial_slabs);
451 static struct node *alloc_node(
struct fuse *f)
453 struct node_slab *slab;
454 struct list_head *node;
456 if (list_empty(&f->partial_slabs)) {
457 int res = alloc_slab(f);
461 slab = list_to_slab(f->partial_slabs.next);
463 node = slab->freelist.next;
465 if (list_empty(&slab->freelist)) {
466 list_del(&slab->list);
467 list_add_tail(&slab->list, &f->full_slabs);
469 memset(node, 0,
sizeof(
struct node));
471 return (
struct node *) node;
474 static void free_slab(
struct fuse *f,
struct node_slab *slab)
478 list_del(&slab->list);
479 res = munmap(slab, f->pagesize);
481 fuse_log(FUSE_LOG_WARNING,
"fuse warning: munmap(%p) failed\n",
485 static void free_node_mem(
struct fuse *f,
struct node *node)
487 struct node_slab *slab = node_to_slab(f, node);
488 struct list_head *n = (
struct list_head *) node;
492 if (list_empty(&slab->freelist)) {
493 list_del(&slab->list);
494 list_add_tail(&slab->list, &f->partial_slabs);
496 list_add_head(n, &slab->freelist);
502 static struct node *alloc_node(
struct fuse *f)
504 return (
struct node *) calloc(1, get_node_size(f));
507 static void free_node_mem(
struct fuse *f,
struct node *node)
514 static size_t id_hash(
struct fuse *f,
fuse_ino_t ino)
516 uint64_t hash = ((uint32_t) ino * 2654435761U) % f->id_table.size;
517 uint64_t oldhash = hash % (f->id_table.size / 2);
519 if (oldhash >= f->id_table.split)
525 static struct node *get_node_nocheck(
struct fuse *f,
fuse_ino_t nodeid)
527 size_t hash = id_hash(f, nodeid);
530 for (node = f->id_table.array[hash]; node != NULL; node = node->id_next)
531 if (node->nodeid == nodeid)
537 static struct node *get_node(
struct fuse *f,
fuse_ino_t nodeid)
539 struct node *node = get_node_nocheck(f, nodeid);
541 fuse_log(FUSE_LOG_ERR,
"fuse internal error: node %llu not found\n",
542 (
unsigned long long) nodeid);
548 static void curr_time(
struct timespec *now);
549 static double diff_timespec(
const struct timespec *t1,
550 const struct timespec *t2);
552 static void remove_node_lru(
struct node *node)
554 struct node_lru *lnode = node_lru(node);
555 list_del(&lnode->lru);
556 init_list_head(&lnode->lru);
559 static void set_forget_time(
struct fuse *f,
struct node *node)
561 struct node_lru *lnode = node_lru(node);
563 list_del(&lnode->lru);
564 list_add_tail(&lnode->lru, &f->lru_table);
565 curr_time(&lnode->forget_time);
568 static void free_node(
struct fuse *f,
struct node *node)
570 if (node->name != node->inline_name)
572 free_node_mem(f, node);
575 static void node_table_reduce(
struct node_table *t)
577 size_t newsize = t->size / 2;
580 if (newsize < NODE_TABLE_MIN_SIZE)
583 newarray = realloc(t->array,
sizeof(
struct node *) * newsize);
584 if (newarray != NULL)
588 t->split = t->size / 2;
591 static void remerge_id(
struct fuse *f)
593 struct node_table *t = &f->id_table;
597 node_table_reduce(t);
599 for (iter = 8; t->split > 0 && iter; iter--) {
603 upper = &t->array[t->split + t->size / 2];
607 for (nodep = &t->array[t->split]; *nodep;
608 nodep = &(*nodep)->id_next);
617 static void unhash_id(
struct fuse *f,
struct node *node)
619 struct node **nodep = &f->id_table.array[id_hash(f, node->nodeid)];
621 for (; *nodep != NULL; nodep = &(*nodep)->id_next)
622 if (*nodep == node) {
623 *nodep = node->id_next;
626 if(f->id_table.use < f->id_table.size / 4)
632 static int node_table_resize(
struct node_table *t)
634 size_t newsize = t->size * 2;
637 newarray = realloc(t->array,
sizeof(
struct node *) * newsize);
638 if (newarray == NULL)
642 memset(t->array + t->size, 0, t->size *
sizeof(
struct node *));
649 static void rehash_id(
struct fuse *f)
651 struct node_table *t = &f->id_table;
656 if (t->split == t->size / 2)
661 for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
662 struct node *node = *nodep;
663 size_t newhash = id_hash(f, node->nodeid);
665 if (newhash != hash) {
667 *nodep = node->id_next;
668 node->id_next = t->array[newhash];
669 t->array[newhash] = node;
671 next = &node->id_next;
674 if (t->split == t->size / 2)
675 node_table_resize(t);
678 static void hash_id(
struct fuse *f,
struct node *node)
680 size_t hash = id_hash(f, node->nodeid);
681 node->id_next = f->id_table.array[hash];
682 f->id_table.array[hash] = node;
685 if (f->id_table.use >= f->id_table.size / 2)
689 static size_t name_hash(
struct fuse *f,
fuse_ino_t parent,
692 uint64_t hash = parent;
695 for (; *name; name++)
696 hash = hash * 31 + (
unsigned char) *name;
698 hash %= f->name_table.size;
699 oldhash = hash % (f->name_table.size / 2);
700 if (oldhash >= f->name_table.split)
706 static void unref_node(
struct fuse *f,
struct node *node);
708 static void remerge_name(
struct fuse *f)
710 struct node_table *t = &f->name_table;
714 node_table_reduce(t);
716 for (iter = 8; t->split > 0 && iter; iter--) {
720 upper = &t->array[t->split + t->size / 2];
724 for (nodep = &t->array[t->split]; *nodep;
725 nodep = &(*nodep)->name_next);
734 static void unhash_name(
struct fuse *f,
struct node *node)
737 size_t hash = name_hash(f, node->parent->nodeid, node->name);
738 struct node **nodep = &f->name_table.array[hash];
740 for (; *nodep != NULL; nodep = &(*nodep)->name_next)
741 if (*nodep == node) {
742 *nodep = node->name_next;
743 node->name_next = NULL;
744 unref_node(f, node->parent);
745 if (node->name != node->inline_name)
751 if (f->name_table.use < f->name_table.size / 4)
756 "fuse internal error: unable to unhash node: %llu\n",
757 (
unsigned long long) node->nodeid);
762 static void rehash_name(
struct fuse *f)
764 struct node_table *t = &f->name_table;
769 if (t->split == t->size / 2)
774 for (nodep = &t->array[hash]; *nodep != NULL; nodep = next) {
775 struct node *node = *nodep;
776 size_t newhash = name_hash(f, node->parent->nodeid, node->name);
778 if (newhash != hash) {
780 *nodep = node->name_next;
781 node->name_next = t->array[newhash];
782 t->array[newhash] = node;
784 next = &node->name_next;
787 if (t->split == t->size / 2)
788 node_table_resize(t);
791 static int hash_name(
struct fuse *f,
struct node *node,
fuse_ino_t parentid,
794 size_t hash = name_hash(f, parentid, name);
795 struct node *parent = get_node(f, parentid);
796 if (strlen(name) <
sizeof(node->inline_name)) {
797 strcpy(node->inline_name, name);
798 node->name = node->inline_name;
800 node->name = strdup(name);
801 if (node->name == NULL)
806 node->parent = parent;
807 node->name_next = f->name_table.array[hash];
808 f->name_table.array[hash] = node;
811 if (f->name_table.use >= f->name_table.size / 2)
817 static void delete_node(
struct fuse *f,
struct node *node)
820 fuse_log(FUSE_LOG_DEBUG,
"DELETE: %llu\n",
821 (
unsigned long long) node->nodeid);
823 assert(node->treelock == 0);
824 unhash_name(f, node);
826 remove_node_lru(node);
831 static void unref_node(
struct fuse *f,
struct node *node)
833 assert(node->refctr > 0);
836 delete_node(f, node);
842 f->ctr = (f->ctr + 1) & 0xffffffff;
845 }
while (f->ctr == 0 || f->ctr == FUSE_UNKNOWN_INO ||
846 get_node_nocheck(f, f->ctr) != NULL);
850 static struct node *lookup_node(
struct fuse *f,
fuse_ino_t parent,
853 size_t hash = name_hash(f, parent, name);
856 for (node = f->name_table.array[hash]; node != NULL; node = node->name_next)
857 if (node->parent->nodeid == parent &&
858 strcmp(node->name, name) == 0)
864 static void inc_nlookup(
struct node *node)
871 static struct node *find_node(
struct fuse *f,
fuse_ino_t parent,
876 pthread_mutex_lock(&f->lock);
878 node = get_node(f, parent);
880 node = lookup_node(f, parent, name);
882 node = alloc_node(f);
886 node->nodeid = next_id(f);
887 node->generation = f->generation;
888 if (f->conf.remember)
891 if (hash_name(f, node, parent, name) == -1) {
897 if (lru_enabled(f)) {
898 struct node_lru *lnode = node_lru(node);
899 init_list_head(&lnode->lru);
901 }
else if (lru_enabled(f) && node->nlookup == 1) {
902 remove_node_lru(node);
906 pthread_mutex_unlock(&f->lock);
910 static int lookup_path_in_cache(
struct fuse *f,
913 char *tmp = strdup(path);
917 pthread_mutex_lock(&f->lock);
922 char *path_element = strtok_r(tmp,
"/", &save_ptr);
923 while (path_element != NULL) {
924 struct node *node = lookup_node(f, ino, path_element);
930 path_element = strtok_r(NULL,
"/", &save_ptr);
932 pthread_mutex_unlock(&f->lock);
940 static char *add_name(
char **buf,
unsigned *bufsize,
char *s,
const char *name)
942 size_t len = strlen(name);
944 if (s - len <= *buf) {
945 unsigned pathlen = *bufsize - (s - *buf);
946 unsigned newbufsize = *bufsize;
949 while (newbufsize < pathlen + len + 1) {
950 if (newbufsize >= 0x80000000)
951 newbufsize = 0xffffffff;
956 newbuf = realloc(*buf, newbufsize);
961 s = newbuf + newbufsize - pathlen;
962 memmove(s, newbuf + *bufsize - pathlen, pathlen);
963 *bufsize = newbufsize;
966 memcpy(s, name, len);
973 static void unlock_path(
struct fuse *f,
fuse_ino_t nodeid,
struct node *wnode,
979 assert(wnode->treelock == TREELOCK_WRITE);
983 for (node = get_node(f, nodeid);
984 node != end && node->nodeid !=
FUSE_ROOT_ID; node = node->parent) {
985 assert(node->treelock != 0);
986 assert(node->treelock != TREELOCK_WAIT_OFFSET);
987 assert(node->treelock != TREELOCK_WRITE);
989 if (node->treelock == TREELOCK_WAIT_OFFSET)
994 static int try_get_path(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
995 char **path,
struct node **wnodep,
bool need_lock)
997 unsigned bufsize = 256;
1001 struct node *wnode = NULL;
1007 buf = malloc(bufsize);
1011 s = buf + bufsize - 1;
1015 s = add_name(&buf, &bufsize, s, name);
1023 wnode = lookup_node(f, nodeid, name);
1025 if (wnode->treelock != 0) {
1026 if (wnode->treelock > 0)
1027 wnode->treelock += TREELOCK_WAIT_OFFSET;
1031 wnode->treelock = TREELOCK_WRITE;
1035 for (node = get_node(f, nodeid); node->nodeid !=
FUSE_ROOT_ID;
1036 node = node->parent) {
1038 if (node->name == NULL || node->parent == NULL)
1042 s = add_name(&buf, &bufsize, s, node->name);
1048 if (node->treelock < 0)
1056 memmove(buf, s, bufsize - (s - buf));
1068 unlock_path(f, nodeid, wnode, node);
1076 static int try_get_path2(
struct fuse *f,
fuse_ino_t nodeid1,
const char *name1,
1078 char **path1,
char **path2,
1079 struct node **wnode1,
struct node **wnode2)
1084 err = try_get_path(f, nodeid1, name1, path1, wnode1,
true);
1086 err = try_get_path(f, nodeid2, name2, path2, wnode2,
true);
1088 struct node *wn1 = wnode1 ? *wnode1 : NULL;
1090 unlock_path(f, nodeid1, wn1, NULL);
1097 static void queue_element_wakeup(
struct fuse *f,
struct lock_queue_element *qe)
1103 if (get_node(f, qe->nodeid1)->treelock == 0)
1104 pthread_cond_signal(&qe->cond);
1113 err = try_get_path(f, qe->nodeid1, qe->name1, qe->path1,
1116 err = try_get_path2(f, qe->nodeid1, qe->name1, qe->nodeid2,
1117 qe->name2, qe->path1, qe->path2, qe->wnode1,
1126 pthread_cond_signal(&qe->cond);
1129 static void wake_up_queued(
struct fuse *f)
1131 struct lock_queue_element *qe;
1133 for (qe = f->lockq; qe != NULL; qe = qe->next)
1134 queue_element_wakeup(f, qe);
1137 static void debug_path(
struct fuse *f,
const char *msg,
fuse_ino_t nodeid,
1138 const char *name,
bool wr)
1140 if (f->conf.debug) {
1141 struct node *wnode = NULL;
1144 wnode = lookup_node(f, nodeid, name);
1147 fuse_log(FUSE_LOG_DEBUG,
"%s %llu (w)\n",
1148 msg, (
unsigned long long) wnode->nodeid);
1150 fuse_log(FUSE_LOG_DEBUG,
"%s %llu\n",
1151 msg, (
unsigned long long) nodeid);
1156 static void queue_path(
struct fuse *f,
struct lock_queue_element *qe)
1158 struct lock_queue_element **qp;
1161 pthread_cond_init(&qe->cond, NULL);
1163 for (qp = &f->lockq; *qp != NULL; qp = &(*qp)->next);
1167 static void dequeue_path(
struct fuse *f,
struct lock_queue_element *qe)
1169 struct lock_queue_element **qp;
1171 pthread_cond_destroy(&qe->cond);
1172 for (qp = &f->lockq; *qp != qe; qp = &(*qp)->next);
1176 static int wait_path(
struct fuse *f,
struct lock_queue_element *qe)
1181 pthread_cond_wait(&qe->cond, &f->lock);
1182 }
while (!qe->done);
1184 dequeue_path(f, qe);
1189 static int get_path_common(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
1190 char **path,
struct node **wnode)
1194 pthread_mutex_lock(&f->lock);
1195 err = try_get_path(f, nodeid, name, path, wnode,
true);
1196 if (err == -EAGAIN) {
1197 struct lock_queue_element qe = {
1203 debug_path(f,
"QUEUE PATH", nodeid, name, !!wnode);
1204 err = wait_path(f, &qe);
1205 debug_path(f,
"DEQUEUE PATH", nodeid, name, !!wnode);
1207 pthread_mutex_unlock(&f->lock);
1212 static int get_path(
struct fuse *f,
fuse_ino_t nodeid,
char **path)
1214 return get_path_common(f, nodeid, NULL, path, NULL);
1217 static int get_path_nullok(
struct fuse *f,
fuse_ino_t nodeid,
char **path)
1221 if (f->conf.nullpath_ok) {
1224 err = get_path_common(f, nodeid, NULL, path, NULL);
1232 static int get_path_name(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
1235 return get_path_common(f, nodeid, name, path, NULL);
1238 static int get_path_wrlock(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
1239 char **path,
struct node **wnode)
1241 return get_path_common(f, nodeid, name, path, wnode);
1244 #if defined(__FreeBSD__)
1245 #define CHECK_DIR_LOOP
1248 #if defined(CHECK_DIR_LOOP)
1249 static int check_dir_loop(
struct fuse *f,
1253 struct node *node, *node1, *node2;
1256 node1 = lookup_node(f, nodeid1, name1);
1257 id1 = node1 ? node1->nodeid : nodeid1;
1259 node2 = lookup_node(f, nodeid2, name2);
1260 id2 = node2 ? node2->nodeid : nodeid2;
1262 for (node = get_node(f, id2); node->nodeid !=
FUSE_ROOT_ID;
1263 node = node->parent) {
1264 if (node->name == NULL || node->parent == NULL)
1267 if (node->nodeid != id2 && node->nodeid == id1)
1273 for (node = get_node(f, id1); node->nodeid !=
FUSE_ROOT_ID;
1274 node = node->parent) {
1275 if (node->name == NULL || node->parent == NULL)
1278 if (node->nodeid != id1 && node->nodeid == id2)
1287 static int get_path2(
struct fuse *f,
fuse_ino_t nodeid1,
const char *name1,
1289 char **path1,
char **path2,
1290 struct node **wnode1,
struct node **wnode2)
1294 pthread_mutex_lock(&f->lock);
1296 #if defined(CHECK_DIR_LOOP)
1300 err = check_dir_loop(f, nodeid1, name1, nodeid2, name2);
1306 err = try_get_path2(f, nodeid1, name1, nodeid2, name2,
1307 path1, path2, wnode1, wnode2);
1308 if (err == -EAGAIN) {
1309 struct lock_queue_element qe = {
1320 debug_path(f,
"QUEUE PATH1", nodeid1, name1, !!wnode1);
1321 debug_path(f,
" PATH2", nodeid2, name2, !!wnode2);
1322 err = wait_path(f, &qe);
1323 debug_path(f,
"DEQUEUE PATH1", nodeid1, name1, !!wnode1);
1324 debug_path(f,
" PATH2", nodeid2, name2, !!wnode2);
1327 #if defined(CHECK_DIR_LOOP)
1330 pthread_mutex_unlock(&f->lock);
1335 static void free_path_wrlock(
struct fuse *f,
fuse_ino_t nodeid,
1336 struct node *wnode,
char *path)
1338 pthread_mutex_lock(&f->lock);
1339 unlock_path(f, nodeid, wnode, NULL);
1342 pthread_mutex_unlock(&f->lock);
1346 static void free_path(
struct fuse *f,
fuse_ino_t nodeid,
char *path)
1349 free_path_wrlock(f, nodeid, NULL, path);
1353 struct node *wnode1,
struct node *wnode2,
1354 char *path1,
char *path2)
1356 pthread_mutex_lock(&f->lock);
1357 unlock_path(f, nodeid1, wnode1, NULL);
1358 unlock_path(f, nodeid2, wnode2, NULL);
1360 pthread_mutex_unlock(&f->lock);
1365 static void forget_node(
struct fuse *f,
fuse_ino_t nodeid, uint64_t nlookup)
1370 pthread_mutex_lock(&f->lock);
1371 node = get_node(f, nodeid);
1377 while (node->nlookup == nlookup && node->treelock) {
1378 struct lock_queue_element qe = {
1382 debug_path(f,
"QUEUE PATH (forget)", nodeid, NULL,
false);
1386 pthread_cond_wait(&qe.cond, &f->lock);
1387 }
while (node->nlookup == nlookup && node->treelock);
1389 dequeue_path(f, &qe);
1390 debug_path(f,
"DEQUEUE_PATH (forget)", nodeid, NULL,
false);
1393 assert(node->nlookup >= nlookup);
1394 node->nlookup -= nlookup;
1395 if (!node->nlookup) {
1396 unref_node(f, node);
1397 }
else if (lru_enabled(f) && node->nlookup == 1) {
1398 set_forget_time(f, node);
1400 pthread_mutex_unlock(&f->lock);
1403 static void unlink_node(
struct fuse *f,
struct node *node)
1405 if (f->conf.remember) {
1406 assert(node->nlookup > 1);
1409 unhash_name(f, node);
1412 static void remove_node(
struct fuse *f,
fuse_ino_t dir,
const char *name)
1416 pthread_mutex_lock(&f->lock);
1417 node = lookup_node(f, dir, name);
1419 unlink_node(f, node);
1420 pthread_mutex_unlock(&f->lock);
1423 static int rename_node(
struct fuse *f,
fuse_ino_t olddir,
const char *oldname,
1424 fuse_ino_t newdir,
const char *newname,
int hide)
1427 struct node *newnode;
1430 pthread_mutex_lock(&f->lock);
1431 node = lookup_node(f, olddir, oldname);
1432 newnode = lookup_node(f, newdir, newname);
1436 if (newnode != NULL) {
1438 fuse_log(FUSE_LOG_ERR,
"fuse: hidden file got created during hiding\n");
1442 unlink_node(f, newnode);
1445 unhash_name(f, node);
1446 if (hash_name(f, node, newdir, newname) == -1) {
1452 node->is_hidden = 1;
1455 pthread_mutex_unlock(&f->lock);
1459 static int exchange_node(
struct fuse *f,
fuse_ino_t olddir,
const char *oldname,
1462 struct node *oldnode;
1463 struct node *newnode;
1466 pthread_mutex_lock(&f->lock);
1467 oldnode = lookup_node(f, olddir, oldname);
1468 newnode = lookup_node(f, newdir, newname);
1471 unhash_name(f, oldnode);
1473 unhash_name(f, newnode);
1477 if (hash_name(f, oldnode, newdir, newname) == -1)
1481 if (hash_name(f, newnode, olddir, oldname) == -1)
1486 pthread_mutex_unlock(&f->lock);
1490 static void set_stat(
struct fuse *f,
fuse_ino_t nodeid,
struct stat *stbuf)
1492 if (!f->conf.use_ino)
1493 stbuf->st_ino = nodeid;
1494 if (f->conf.set_mode)
1495 stbuf->st_mode = (stbuf->st_mode & S_IFMT) |
1496 (0777 & ~f->conf.umask);
1497 if (f->conf.set_uid)
1498 stbuf->st_uid = f->conf.uid;
1499 if (f->conf.set_gid)
1500 stbuf->st_gid = f->conf.gid;
1508 static void fuse_intr_sighandler(
int sig)
1514 struct fuse_intr_data {
1516 pthread_cond_t cond;
1520 static void fuse_interrupt(
fuse_req_t req,
void *d_)
1522 struct fuse_intr_data *d = d_;
1523 struct fuse *f = req_fuse(req);
1525 if (d->id == pthread_self())
1528 pthread_mutex_lock(&f->lock);
1529 while (!d->finished) {
1531 struct timespec timeout;
1533 pthread_kill(d->id, f->conf.intr_signal);
1534 gettimeofday(&now, NULL);
1535 timeout.tv_sec = now.tv_sec + 1;
1536 timeout.tv_nsec = now.tv_usec * 1000;
1537 pthread_cond_timedwait(&d->cond, &f->lock, &timeout);
1539 pthread_mutex_unlock(&f->lock);
1542 static void fuse_do_finish_interrupt(
struct fuse *f,
fuse_req_t req,
1543 struct fuse_intr_data *d)
1545 pthread_mutex_lock(&f->lock);
1547 pthread_cond_broadcast(&d->cond);
1548 pthread_mutex_unlock(&f->lock);
1550 pthread_cond_destroy(&d->cond);
1553 static void fuse_do_prepare_interrupt(
fuse_req_t req,
struct fuse_intr_data *d)
1555 d->id = pthread_self();
1556 pthread_cond_init(&d->cond, NULL);
1561 static inline void fuse_finish_interrupt(
struct fuse *f,
fuse_req_t req,
1562 struct fuse_intr_data *d)
1565 fuse_do_finish_interrupt(f, req, d);
1568 static inline void fuse_prepare_interrupt(
struct fuse *f,
fuse_req_t req,
1569 struct fuse_intr_data *d)
1572 fuse_do_prepare_interrupt(req, d);
1576 char* buf,
size_t len)
1580 snprintf(buf, len,
"%llu", (
unsigned long long) fi->
fh);
1584 int fuse_fs_getattr(
struct fuse_fs *fs,
const char *path,
struct stat *buf,
1588 if (fs->op.getattr) {
1591 fuse_log(FUSE_LOG_DEBUG,
"getattr[%s] %s\n",
1592 file_info_string(fi, buf,
sizeof(buf)),
1595 return fs->op.getattr(path, buf, fi);
1601 int fuse_fs_rename(
struct fuse_fs *fs,
const char *oldpath,
1602 const char *newpath,
unsigned int flags)
1605 if (fs->op.rename) {
1607 fuse_log(FUSE_LOG_DEBUG,
"rename %s %s 0x%x\n", oldpath, newpath,
1610 return fs->op.rename(oldpath, newpath, flags);
1616 int fuse_fs_unlink(
struct fuse_fs *fs,
const char *path)
1619 if (fs->op.unlink) {
1621 fuse_log(FUSE_LOG_DEBUG,
"unlink %s\n", path);
1623 return fs->op.unlink(path);
1629 int fuse_fs_rmdir(
struct fuse_fs *fs,
const char *path)
1634 fuse_log(FUSE_LOG_DEBUG,
"rmdir %s\n", path);
1636 return fs->op.rmdir(path);
1642 int fuse_fs_symlink(
struct fuse_fs *fs,
const char *linkname,
const char *path)
1645 if (fs->op.symlink) {
1647 fuse_log(FUSE_LOG_DEBUG,
"symlink %s %s\n", linkname, path);
1649 return fs->op.symlink(linkname, path);
1655 int fuse_fs_link(
struct fuse_fs *fs,
const char *oldpath,
const char *newpath)
1660 fuse_log(FUSE_LOG_DEBUG,
"link %s %s\n", oldpath, newpath);
1662 return fs->op.link(oldpath, newpath);
1668 int fuse_fs_release(
struct fuse_fs *fs,
const char *path,
1672 if (fs->op.release) {
1674 fuse_log(FUSE_LOG_DEBUG,
"release%s[%llu] flags: 0x%x\n",
1675 fi->
flush ?
"+flush" :
"",
1676 (
unsigned long long) fi->
fh, fi->
flags);
1678 return fs->op.release(path, fi);
1684 int fuse_fs_opendir(
struct fuse_fs *fs,
const char *path,
1688 if (fs->op.opendir) {
1692 fuse_log(FUSE_LOG_DEBUG,
"opendir flags: 0x%x %s\n", fi->
flags,
1695 err = fs->op.opendir(path, fi);
1697 if (fs->debug && !err)
1698 fuse_log(FUSE_LOG_DEBUG,
" opendir[%llu] flags: 0x%x %s\n",
1699 (
unsigned long long) fi->
fh, fi->
flags, path);
1707 int fuse_fs_open(
struct fuse_fs *fs,
const char *path,
1715 fuse_log(FUSE_LOG_DEBUG,
"open flags: 0x%x %s\n", fi->
flags,
1718 err = fs->op.open(path, fi);
1720 if (fs->debug && !err)
1721 fuse_log(FUSE_LOG_DEBUG,
" open[%llu] flags: 0x%x %s\n",
1722 (
unsigned long long) fi->
fh, fi->
flags, path);
1730 static void fuse_free_buf(
struct fuse_bufvec *buf)
1735 for (i = 0; i < buf->
count; i++)
1742 int fuse_fs_read_buf(
struct fuse_fs *fs,
const char *path,
1743 struct fuse_bufvec **bufp,
size_t size, off_t off,
1747 if (fs->op.read || fs->op.read_buf) {
1752 "read[%llu] %zu bytes from %llu flags: 0x%x\n",
1753 (
unsigned long long) fi->
fh,
1754 size, (
unsigned long long) off, fi->
flags);
1756 if (fs->op.read_buf) {
1757 res = fs->op.read_buf(path, bufp, size, off, fi);
1771 *
buf = FUSE_BUFVEC_INIT(size);
1775 res = fs->op.read(path, mem, size,
off, fi);
1780 if (fs->debug && res >= 0)
1781 fuse_log(FUSE_LOG_DEBUG,
" read[%llu] %zu bytes from %llu\n",
1782 (
unsigned long long) fi->
fh,
1784 (
unsigned long long)
off);
1786 fuse_log(FUSE_LOG_ERR,
"fuse: read too many bytes\n");
1797 int fuse_fs_read(
struct fuse_fs *fs,
const char *path,
char *mem,
size_t size,
1801 if (fs->op.read || fs->op.read_buf) {
1806 "read[%llu] %zu bytes from %llu flags: 0x%x\n",
1807 (
unsigned long long) fi->
fh,
1808 size, (
unsigned long long)
off, fi->
flags);
1810 if (fs->op.read_buf) {
1813 res = fs->op.read_buf(path, &
buf, size,
off, fi);
1822 res = fs->op.read(path, mem, size,
off, fi);
1825 if (fs->debug && res >= 0)
1826 fuse_log(FUSE_LOG_DEBUG,
" read[%llu] %u bytes from %llu\n",
1827 (
unsigned long long) fi->
fh,
1829 (
unsigned long long)
off);
1830 if (res >= 0 && res > (
int) size)
1831 fuse_log(FUSE_LOG_ERR,
"fuse: read too many bytes\n");
1839 int fuse_fs_write_buf(
struct fuse_fs *fs,
const char *path,
1844 if (fs->op.write_buf || fs->op.write) {
1848 assert(
buf->idx == 0 &&
buf->off == 0);
1851 "write%s[%llu] %zu bytes to %llu flags: 0x%x\n",
1853 (
unsigned long long) fi->
fh,
1855 (
unsigned long long)
off,
1858 if (fs->op.write_buf) {
1859 res = fs->op.write_buf(path,
buf,
off, fi);
1865 if (
buf->count == 1 &&
1867 flatbuf = &
buf->buf[0];
1880 flatbuf = &tmp.
buf[0];
1883 res = fs->op.write(path, flatbuf->
mem, flatbuf->
size,
1889 if (fs->debug && res >= 0)
1890 fuse_log(FUSE_LOG_DEBUG,
" write%s[%llu] %u bytes to %llu\n",
1892 (
unsigned long long) fi->
fh, res,
1893 (
unsigned long long)
off);
1894 if (res > (
int) size)
1895 fuse_log(FUSE_LOG_ERR,
"fuse: wrote too many bytes\n");
1903 int fuse_fs_write(
struct fuse_fs *fs,
const char *path,
const char *mem,
1908 bufv.
buf[0].
mem = (
void *) mem;
1910 return fuse_fs_write_buf(fs, path, &bufv,
off, fi);
1913 int fuse_fs_fsync(
struct fuse_fs *fs,
const char *path,
int datasync,
1919 fuse_log(FUSE_LOG_DEBUG,
"fsync[%llu] datasync: %i\n",
1920 (
unsigned long long) fi->
fh, datasync);
1922 return fs->op.fsync(path, datasync, fi);
1928 int fuse_fs_fsyncdir(
struct fuse_fs *fs,
const char *path,
int datasync,
1932 if (fs->op.fsyncdir) {
1934 fuse_log(FUSE_LOG_DEBUG,
"fsyncdir[%llu] datasync: %i\n",
1935 (
unsigned long long) fi->
fh, datasync);
1937 return fs->op.fsyncdir(path, datasync, fi);
1943 int fuse_fs_flush(
struct fuse_fs *fs,
const char *path,
1949 fuse_log(FUSE_LOG_DEBUG,
"flush[%llu]\n",
1950 (
unsigned long long) fi->
fh);
1952 return fs->op.flush(path, fi);
1958 int fuse_fs_statfs(
struct fuse_fs *fs,
const char *path,
struct statvfs *
buf)
1961 if (fs->op.statfs) {
1963 fuse_log(FUSE_LOG_DEBUG,
"statfs %s\n", path);
1965 return fs->op.statfs(path,
buf);
1967 buf->f_namemax = 255;
1973 int fuse_fs_releasedir(
struct fuse_fs *fs,
const char *path,
1977 if (fs->op.releasedir) {
1979 fuse_log(FUSE_LOG_DEBUG,
"releasedir[%llu] flags: 0x%x\n",
1980 (
unsigned long long) fi->
fh, fi->
flags);
1982 return fs->op.releasedir(path, fi);
1988 int fuse_fs_readdir(
struct fuse_fs *fs,
const char *path,
void *
buf,
1994 if (fs->op.readdir) {
1996 fuse_log(FUSE_LOG_DEBUG,
"readdir%s[%llu] from %llu\n",
1998 (
unsigned long long) fi->
fh,
1999 (
unsigned long long)
off);
2002 return fs->op.readdir(path,
buf, filler,
off, fi, flags);
2008 int fuse_fs_create(
struct fuse_fs *fs,
const char *path, mode_t mode,
2012 if (fs->op.create) {
2017 "create flags: 0x%x %s 0%o umask=0%03o\n",
2018 fi->
flags, path, mode,
2021 err = fs->op.create(path, mode, fi);
2023 if (fs->debug && !err)
2024 fuse_log(FUSE_LOG_DEBUG,
" create[%llu] flags: 0x%x %s\n",
2025 (
unsigned long long) fi->
fh, fi->
flags, path);
2033 int fuse_fs_lock(
struct fuse_fs *fs,
const char *path,
2039 fuse_log(FUSE_LOG_DEBUG,
"lock[%llu] %s %s start: %llu len: %llu pid: %llu\n",
2040 (
unsigned long long) fi->
fh,
2041 (cmd == F_GETLK ?
"F_GETLK" :
2042 (cmd == F_SETLK ?
"F_SETLK" :
2043 (cmd == F_SETLKW ?
"F_SETLKW" :
"???"))),
2044 (lock->l_type == F_RDLCK ?
"F_RDLCK" :
2045 (lock->l_type == F_WRLCK ?
"F_WRLCK" :
2046 (lock->l_type == F_UNLCK ?
"F_UNLCK" :
2048 (
unsigned long long) lock->l_start,
2049 (
unsigned long long) lock->l_len,
2050 (
unsigned long long) lock->l_pid);
2052 return fs->op.lock(path, fi, cmd, lock);
2058 int fuse_fs_flock(
struct fuse_fs *fs,
const char *path,
2064 int xop = op & ~LOCK_NB;
2066 fuse_log(FUSE_LOG_DEBUG,
"lock[%llu] %s%s\n",
2067 (
unsigned long long) fi->
fh,
2068 xop == LOCK_SH ?
"LOCK_SH" :
2069 (xop == LOCK_EX ?
"LOCK_EX" :
2070 (xop == LOCK_UN ?
"LOCK_UN" :
"???")),
2071 (op & LOCK_NB) ?
"|LOCK_NB" :
"");
2073 return fs->op.flock(path, fi, op);
2079 int fuse_fs_chown(
struct fuse_fs *fs,
const char *path, uid_t uid,
2086 fuse_log(FUSE_LOG_DEBUG,
"chown[%s] %s %lu %lu\n",
2087 file_info_string(fi,
buf,
sizeof(
buf)),
2088 path, (
unsigned long) uid, (
unsigned long) gid);
2090 return fs->op.chown(path, uid, gid, fi);
2096 int fuse_fs_truncate(
struct fuse_fs *fs,
const char *path, off_t size,
2100 if (fs->op.truncate) {
2103 fuse_log(FUSE_LOG_DEBUG,
"truncate[%s] %llu\n",
2104 file_info_string(fi,
buf,
sizeof(
buf)),
2105 (
unsigned long long) size);
2107 return fs->op.truncate(path, size, fi);
2113 int fuse_fs_utimens(
struct fuse_fs *fs,
const char *path,
2117 if (fs->op.utimens) {
2120 fuse_log(FUSE_LOG_DEBUG,
"utimens[%s] %s %li.%09lu %li.%09lu\n",
2121 file_info_string(fi,
buf,
sizeof(
buf)),
2122 path, tv[0].tv_sec, tv[0].tv_nsec,
2123 tv[1].tv_sec, tv[1].tv_nsec);
2125 return fs->op.utimens(path, tv, fi);
2131 int fuse_fs_access(
struct fuse_fs *fs,
const char *path,
int mask)
2134 if (fs->op.access) {
2136 fuse_log(FUSE_LOG_DEBUG,
"access %s 0%o\n", path, mask);
2138 return fs->op.access(path, mask);
2144 int fuse_fs_readlink(
struct fuse_fs *fs,
const char *path,
char *
buf,
2148 if (fs->op.readlink) {
2150 fuse_log(FUSE_LOG_DEBUG,
"readlink %s %lu\n", path,
2151 (
unsigned long) len);
2153 return fs->op.readlink(path,
buf, len);
2159 int fuse_fs_mknod(
struct fuse_fs *fs,
const char *path, mode_t mode,
2165 fuse_log(FUSE_LOG_DEBUG,
"mknod %s 0%o 0x%llx umask=0%03o\n",
2166 path, mode, (
unsigned long long) rdev,
2169 return fs->op.mknod(path, mode, rdev);
2175 int fuse_fs_mkdir(
struct fuse_fs *fs,
const char *path, mode_t mode)
2180 fuse_log(FUSE_LOG_DEBUG,
"mkdir %s 0%o umask=0%03o\n",
2183 return fs->op.mkdir(path, mode);
2189 int fuse_fs_setxattr(
struct fuse_fs *fs,
const char *path,
const char *name,
2190 const char *value,
size_t size,
int flags)
2193 if (fs->op.setxattr) {
2195 fuse_log(FUSE_LOG_DEBUG,
"setxattr %s %s %lu 0x%x\n",
2196 path, name, (
unsigned long) size, flags);
2198 return fs->op.setxattr(path, name, value, size, flags);
2204 int fuse_fs_getxattr(
struct fuse_fs *fs,
const char *path,
const char *name,
2205 char *value,
size_t size)
2208 if (fs->op.getxattr) {
2210 fuse_log(FUSE_LOG_DEBUG,
"getxattr %s %s %lu\n",
2211 path, name, (
unsigned long) size);
2213 return fs->op.getxattr(path, name, value, size);
2219 int fuse_fs_listxattr(
struct fuse_fs *fs,
const char *path,
char *list,
2223 if (fs->op.listxattr) {
2225 fuse_log(FUSE_LOG_DEBUG,
"listxattr %s %lu\n",
2226 path, (
unsigned long) size);
2228 return fs->op.listxattr(path, list, size);
2234 int fuse_fs_bmap(
struct fuse_fs *fs,
const char *path,
size_t blocksize,
2240 fuse_log(FUSE_LOG_DEBUG,
"bmap %s blocksize: %lu index: %llu\n",
2241 path, (
unsigned long) blocksize,
2242 (
unsigned long long) *
idx);
2244 return fs->op.bmap(path, blocksize,
idx);
2250 int fuse_fs_removexattr(
struct fuse_fs *fs,
const char *path,
const char *name)
2253 if (fs->op.removexattr) {
2255 fuse_log(FUSE_LOG_DEBUG,
"removexattr %s %s\n", path, name);
2257 return fs->op.removexattr(path, name);
2263 int fuse_fs_ioctl(
struct fuse_fs *fs,
const char *path,
unsigned int cmd,
2270 fuse_log(FUSE_LOG_DEBUG,
"ioctl[%llu] 0x%x flags: 0x%x\n",
2271 (
unsigned long long) fi->
fh, cmd, flags);
2273 return fs->op.ioctl(path, cmd, arg, fi, flags, data);
2278 int fuse_fs_poll(
struct fuse_fs *fs,
const char *path,
2287 fuse_log(FUSE_LOG_DEBUG,
"poll[%llu] ph: %p, events 0x%x\n",
2288 (
unsigned long long) fi->
fh, ph,
2291 res = fs->op.poll(path, fi, ph, reventsp);
2293 if (fs->debug && !res)
2294 fuse_log(FUSE_LOG_DEBUG,
" poll[%llu] revents: 0x%x\n",
2295 (
unsigned long long) fi->
fh, *reventsp);
2302 int fuse_fs_fallocate(
struct fuse_fs *fs,
const char *path,
int mode,
2306 if (fs->op.fallocate) {
2308 fuse_log(FUSE_LOG_DEBUG,
"fallocate %s mode %x, offset: %llu, length: %llu\n",
2311 (
unsigned long long) offset,
2312 (
unsigned long long) length);
2314 return fs->op.fallocate(path, mode, offset, length, fi);
2319 ssize_t fuse_fs_copy_file_range(
struct fuse_fs *fs,
const char *path_in,
2321 const char *path_out,
2323 size_t len,
int flags)
2326 if (fs->op.copy_file_range) {
2328 fuse_log(FUSE_LOG_DEBUG,
"copy_file_range from %s:%llu to "
2329 "%s:%llu, length: %llu\n",
2331 (
unsigned long long) off_in,
2333 (
unsigned long long) off_out,
2334 (
unsigned long long) len);
2336 return fs->op.copy_file_range(path_in, fi_in, off_in, path_out,
2337 fi_out, off_out, len, flags);
2342 off_t fuse_fs_lseek(
struct fuse_fs *fs,
const char *path, off_t
off,
int whence,
2349 fuse_log(FUSE_LOG_DEBUG,
"lseek[%s] %llu %d\n",
2350 file_info_string(fi,
buf,
sizeof(
buf)),
2351 (
unsigned long long)
off, whence);
2353 return fs->op.lseek(path,
off, whence, fi);
2359 static int is_open(
struct fuse *f,
fuse_ino_t dir,
const char *name)
2363 pthread_mutex_lock(&f->lock);
2364 node = lookup_node(f, dir, name);
2365 if (node && node->open_count > 0)
2367 pthread_mutex_unlock(&f->lock);
2371 static char *hidden_name(
struct fuse *f,
fuse_ino_t dir,
const char *oldname,
2372 char *newname,
size_t bufsize)
2376 struct node *newnode;
2382 pthread_mutex_lock(&f->lock);
2383 node = lookup_node(f, dir, oldname);
2385 pthread_mutex_unlock(&f->lock);
2390 snprintf(newname, bufsize,
".fuse_hidden%08x%08x",
2391 (
unsigned int) node->nodeid, f->hidectr);
2392 newnode = lookup_node(f, dir, newname);
2395 res = try_get_path(f, dir, newname, &newpath, NULL,
false);
2396 pthread_mutex_unlock(&f->lock);
2400 memset(&buf, 0,
sizeof(buf));
2401 res = fuse_fs_getattr(f->fs, newpath, &buf, NULL);
2406 }
while(res == 0 && --failctr);
2411 static int hide_node(
struct fuse *f,
const char *oldpath,
2418 newpath = hidden_name(f, dir, oldname, newname,
sizeof(newname));
2420 err = fuse_fs_rename(f->fs, oldpath, newpath, 0);
2422 err = rename_node(f, dir, oldname, dir, newname, 1);
2428 static int mtime_eq(
const struct stat *stbuf,
const struct timespec *ts)
2430 return stbuf->st_mtime == ts->tv_sec &&
2431 ST_MTIM_NSEC(stbuf) == ts->tv_nsec;
2434 #ifndef CLOCK_MONOTONIC
2435 #define CLOCK_MONOTONIC CLOCK_REALTIME
2438 static void curr_time(
struct timespec *now)
2440 static clockid_t clockid = CLOCK_MONOTONIC;
2441 int res = clock_gettime(clockid, now);
2442 if (res == -1 && errno == EINVAL) {
2443 clockid = CLOCK_REALTIME;
2444 res = clock_gettime(clockid, now);
2447 perror(
"fuse: clock_gettime");
2452 static void update_stat(
struct node *node,
const struct stat *stbuf)
2454 if (node->cache_valid && (!mtime_eq(stbuf, &node->mtime) ||
2455 stbuf->st_size != node->size))
2456 node->cache_valid = 0;
2457 node->mtime.tv_sec = stbuf->st_mtime;
2458 node->mtime.tv_nsec = ST_MTIM_NSEC(stbuf);
2459 node->size = stbuf->st_size;
2460 curr_time(&node->stat_updated);
2463 static int do_lookup(
struct fuse *f,
fuse_ino_t nodeid,
const char *name,
2468 node = find_node(f, nodeid, name);
2472 e->
ino = node->nodeid;
2476 if (f->conf.auto_cache) {
2477 pthread_mutex_lock(&f->lock);
2478 update_stat(node, &e->
attr);
2479 pthread_mutex_unlock(&f->lock);
2481 set_stat(f, e->
ino, &e->
attr);
2485 static int lookup_path(
struct fuse *f,
fuse_ino_t nodeid,
2486 const char *name,
const char *path,
2492 res = fuse_fs_getattr(f->fs, path, &e->
attr, fi);
2494 res = do_lookup(f, nodeid, name, e);
2495 if (res == 0 && f->conf.debug) {
2496 fuse_log(FUSE_LOG_DEBUG,
" NODEID: %llu\n",
2497 (
unsigned long long) e->
ino);
2503 static struct fuse_context_i *fuse_get_context_internal(
void)
2505 return (
struct fuse_context_i *) pthread_getspecific(fuse_context_key);
2508 static struct fuse_context_i *fuse_create_context(
struct fuse *f)
2510 struct fuse_context_i *c = fuse_get_context_internal();
2512 c = (
struct fuse_context_i *)
2513 calloc(1,
sizeof(
struct fuse_context_i));
2519 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate thread specific data\n");
2522 pthread_setspecific(fuse_context_key, c);
2524 memset(c, 0,
sizeof(*c));
2531 static void fuse_freecontext(
void *data)
2536 static int fuse_create_context_key(
void)
2539 pthread_mutex_lock(&fuse_context_lock);
2540 if (!fuse_context_ref) {
2541 err = pthread_key_create(&fuse_context_key, fuse_freecontext);
2543 fuse_log(FUSE_LOG_ERR,
"fuse: failed to create thread specific key: %s\n",
2545 pthread_mutex_unlock(&fuse_context_lock);
2550 pthread_mutex_unlock(&fuse_context_lock);
2554 static void fuse_delete_context_key(
void)
2556 pthread_mutex_lock(&fuse_context_lock);
2558 if (!fuse_context_ref) {
2559 free(pthread_getspecific(fuse_context_key));
2560 pthread_key_delete(fuse_context_key);
2562 pthread_mutex_unlock(&fuse_context_lock);
2565 static struct fuse *req_fuse_prepare(
fuse_req_t req)
2567 struct fuse_context_i *c = fuse_create_context(req_fuse(req));
2570 c->ctx.uid = ctx->
uid;
2571 c->ctx.gid = ctx->
gid;
2572 c->ctx.pid = ctx->
pid;
2573 c->ctx.umask = ctx->
umask;
2577 static inline void reply_err(
fuse_req_t req,
int err)
2587 struct fuse *f = req_fuse(req);
2591 forget_node(f, e->
ino, 1);
2594 reply_err(req, err);
2597 void fuse_fs_init(
struct fuse_fs *fs,
struct fuse_conn_info *conn,
2601 if (!fs->op.write_buf)
2608 fs->user_data = fs->op.init(conn, cfg);
2611 static void fuse_lib_init(
void *data,
struct fuse_conn_info *conn)
2613 struct fuse *f = (
struct fuse *) data;
2615 fuse_create_context(f);
2618 fuse_fs_init(f->fs, conn, &f->conf);
2621 void fuse_fs_destroy(
struct fuse_fs *fs)
2625 fs->op.destroy(fs->user_data);
2629 static void fuse_lib_destroy(
void *data)
2631 struct fuse *f = (
struct fuse *) data;
2633 fuse_create_context(f);
2634 fuse_fs_destroy(f->fs);
2641 struct fuse *f = req_fuse_prepare(req);
2645 struct node *dot = NULL;
2647 if (name[0] ==
'.') {
2648 int len = strlen(name);
2650 if (len == 1 || (name[1] ==
'.' && len == 2)) {
2651 pthread_mutex_lock(&f->lock);
2654 fuse_log(FUSE_LOG_DEBUG,
"LOOKUP-DOT\n");
2655 dot = get_node_nocheck(f, parent);
2657 pthread_mutex_unlock(&f->lock);
2658 reply_entry(req, &e, -ESTALE);
2664 fuse_log(FUSE_LOG_DEBUG,
"LOOKUP-DOTDOT\n");
2665 parent = get_node(f, parent)->parent->nodeid;
2667 pthread_mutex_unlock(&f->lock);
2672 err = get_path_name(f, parent, name, &path);
2674 struct fuse_intr_data d;
2676 fuse_log(FUSE_LOG_DEBUG,
"LOOKUP %s\n", path);
2677 fuse_prepare_interrupt(f, req, &d);
2678 err = lookup_path(f, parent, name, path, &e, NULL);
2679 if (err == -ENOENT && f->conf.negative_timeout != 0.0) {
2684 fuse_finish_interrupt(f, req, &d);
2685 free_path(f, parent, path);
2688 pthread_mutex_lock(&f->lock);
2690 pthread_mutex_unlock(&f->lock);
2692 reply_entry(req, &e, err);
2695 static void do_forget(
struct fuse *f,
fuse_ino_t ino, uint64_t nlookup)
2698 fuse_log(FUSE_LOG_DEBUG,
"FORGET %llu/%llu\n", (
unsigned long long)ino,
2699 (
unsigned long long) nlookup);
2700 forget_node(f, ino, nlookup);
2705 do_forget(req_fuse(req), ino, nlookup);
2709 static void fuse_lib_forget_multi(
fuse_req_t req,
size_t count,
2710 struct fuse_forget_data *forgets)
2712 struct fuse *f = req_fuse(req);
2715 for (i = 0; i < count; i++)
2716 do_forget(f, forgets[i].ino, forgets[i].nlookup);
2725 struct fuse *f = req_fuse_prepare(req);
2730 memset(&buf, 0,
sizeof(buf));
2733 err = get_path_nullok(f, ino, &path);
2735 err = get_path(f, ino, &path);
2737 struct fuse_intr_data d;
2738 fuse_prepare_interrupt(f, req, &d);
2739 err = fuse_fs_getattr(f->fs, path, &buf, fi);
2740 fuse_finish_interrupt(f, req, &d);
2741 free_path(f, ino, path);
2746 pthread_mutex_lock(&f->lock);
2747 node = get_node(f, ino);
2748 if (node->is_hidden && buf.st_nlink > 0)
2750 if (f->conf.auto_cache)
2751 update_stat(node, &buf);
2752 pthread_mutex_unlock(&f->lock);
2753 set_stat(f, ino, &buf);
2756 reply_err(req, err);
2759 int fuse_fs_chmod(
struct fuse_fs *fs,
const char *path, mode_t mode,
2766 fuse_log(FUSE_LOG_DEBUG,
"chmod[%s] %s %llo\n",
2767 file_info_string(fi, buf,
sizeof(buf)),
2768 path, (
unsigned long long) mode);
2770 return fs->op.chmod(path, mode, fi);
2779 struct fuse *f = req_fuse_prepare(req);
2784 memset(&buf, 0,
sizeof(buf));
2786 err = get_path_nullok(f, ino, &path);
2788 err = get_path(f, ino, &path);
2790 struct fuse_intr_data d;
2791 fuse_prepare_interrupt(f, req, &d);
2793 if (!err && (valid & FUSE_SET_ATTR_MODE))
2794 err = fuse_fs_chmod(f->fs, path, attr->st_mode, fi);
2795 if (!err && (valid & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID))) {
2796 uid_t uid = (valid & FUSE_SET_ATTR_UID) ?
2797 attr->st_uid : (uid_t) -1;
2798 gid_t gid = (valid & FUSE_SET_ATTR_GID) ?
2799 attr->st_gid : (gid_t) -1;
2800 err = fuse_fs_chown(f->fs, path, uid, gid, fi);
2802 if (!err && (valid & FUSE_SET_ATTR_SIZE)) {
2803 err = fuse_fs_truncate(f->fs, path,
2806 #ifdef HAVE_UTIMENSAT
2808 (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME))) {
2809 struct timespec tv[2];
2813 tv[0].tv_nsec = UTIME_OMIT;
2814 tv[1].tv_nsec = UTIME_OMIT;
2816 if (valid & FUSE_SET_ATTR_ATIME_NOW)
2817 tv[0].tv_nsec = UTIME_NOW;
2818 else if (valid & FUSE_SET_ATTR_ATIME)
2819 tv[0] = attr->st_atim;
2821 if (valid & FUSE_SET_ATTR_MTIME_NOW)
2822 tv[1].tv_nsec = UTIME_NOW;
2823 else if (valid & FUSE_SET_ATTR_MTIME)
2824 tv[1] = attr->st_mtim;
2826 err = fuse_fs_utimens(f->fs, path, tv, fi);
2830 (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) ==
2831 (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) {
2832 struct timespec tv[2];
2833 tv[0].tv_sec = attr->st_atime;
2834 tv[0].tv_nsec = ST_ATIM_NSEC(attr);
2835 tv[1].tv_sec = attr->st_mtime;
2836 tv[1].tv_nsec = ST_MTIM_NSEC(attr);
2837 err = fuse_fs_utimens(f->fs, path, tv, fi);
2840 err = fuse_fs_getattr(f->fs, path, &buf, fi);
2842 fuse_finish_interrupt(f, req, &d);
2843 free_path(f, ino, path);
2846 if (f->conf.auto_cache) {
2847 pthread_mutex_lock(&f->lock);
2848 update_stat(get_node(f, ino), &buf);
2849 pthread_mutex_unlock(&f->lock);
2851 set_stat(f, ino, &buf);
2854 reply_err(req, err);
2859 struct fuse *f = req_fuse_prepare(req);
2863 err = get_path(f, ino, &path);
2865 struct fuse_intr_data d;
2867 fuse_prepare_interrupt(f, req, &d);
2868 err = fuse_fs_access(f->fs, path, mask);
2869 fuse_finish_interrupt(f, req, &d);
2870 free_path(f, ino, path);
2872 reply_err(req, err);
2877 struct fuse *f = req_fuse_prepare(req);
2878 char linkname[PATH_MAX + 1];
2882 err = get_path(f, ino, &path);
2884 struct fuse_intr_data d;
2885 fuse_prepare_interrupt(f, req, &d);
2886 err = fuse_fs_readlink(f->fs, path, linkname,
sizeof(linkname));
2887 fuse_finish_interrupt(f, req, &d);
2888 free_path(f, ino, path);
2891 linkname[PATH_MAX] =
'\0';
2894 reply_err(req, err);
2898 mode_t mode, dev_t rdev)
2900 struct fuse *f = req_fuse_prepare(req);
2905 err = get_path_name(f, parent, name, &path);
2907 struct fuse_intr_data d;
2909 fuse_prepare_interrupt(f, req, &d);
2911 if (S_ISREG(mode)) {
2914 memset(&fi, 0,
sizeof(fi));
2915 fi.
flags = O_CREAT | O_EXCL | O_WRONLY;
2916 err = fuse_fs_create(f->fs, path, mode, &fi);
2918 err = lookup_path(f, parent, name, path, &e,
2920 fuse_fs_release(f->fs, path, &fi);
2923 if (err == -ENOSYS) {
2924 err = fuse_fs_mknod(f->fs, path, mode, rdev);
2926 err = lookup_path(f, parent, name, path, &e,
2929 fuse_finish_interrupt(f, req, &d);
2930 free_path(f, parent, path);
2932 reply_entry(req, &e, err);
2938 struct fuse *f = req_fuse_prepare(req);
2943 err = get_path_name(f, parent, name, &path);
2945 struct fuse_intr_data d;
2947 fuse_prepare_interrupt(f, req, &d);
2948 err = fuse_fs_mkdir(f->fs, path, mode);
2950 err = lookup_path(f, parent, name, path, &e, NULL);
2951 fuse_finish_interrupt(f, req, &d);
2952 free_path(f, parent, path);
2954 reply_entry(req, &e, err);
2960 struct fuse *f = req_fuse_prepare(req);
2965 err = get_path_wrlock(f, parent, name, &path, &wnode);
2967 struct fuse_intr_data d;
2969 fuse_prepare_interrupt(f, req, &d);
2970 if (!f->conf.hard_remove && is_open(f, parent, name)) {
2971 err = hide_node(f, path, parent, name);
2973 err = fuse_fs_unlink(f->fs, path);
2975 remove_node(f, parent, name);
2977 fuse_finish_interrupt(f, req, &d);
2978 free_path_wrlock(f, parent, wnode, path);
2980 reply_err(req, err);
2985 struct fuse *f = req_fuse_prepare(req);
2990 err = get_path_wrlock(f, parent, name, &path, &wnode);
2992 struct fuse_intr_data d;
2994 fuse_prepare_interrupt(f, req, &d);
2995 err = fuse_fs_rmdir(f->fs, path);
2996 fuse_finish_interrupt(f, req, &d);
2998 remove_node(f, parent, name);
2999 free_path_wrlock(f, parent, wnode, path);
3001 reply_err(req, err);
3004 static void fuse_lib_symlink(
fuse_req_t req,
const char *linkname,
3007 struct fuse *f = req_fuse_prepare(req);
3012 err = get_path_name(f, parent, name, &path);
3014 struct fuse_intr_data d;
3016 fuse_prepare_interrupt(f, req, &d);
3017 err = fuse_fs_symlink(f->fs, linkname, path);
3019 err = lookup_path(f, parent, name, path, &e, NULL);
3020 fuse_finish_interrupt(f, req, &d);
3021 free_path(f, parent, path);
3023 reply_entry(req, &e, err);
3028 const char *newname,
unsigned int flags)
3030 struct fuse *f = req_fuse_prepare(req);
3033 struct node *wnode1;
3034 struct node *wnode2;
3037 err = get_path2(f, olddir, oldname, newdir, newname,
3038 &oldpath, &newpath, &wnode1, &wnode2);
3040 struct fuse_intr_data d;
3042 fuse_prepare_interrupt(f, req, &d);
3043 if (!f->conf.hard_remove && !(flags & RENAME_EXCHANGE) &&
3044 is_open(f, newdir, newname))
3045 err = hide_node(f, newpath, newdir, newname);
3047 err = fuse_fs_rename(f->fs, oldpath, newpath, flags);
3049 if (flags & RENAME_EXCHANGE) {
3050 err = exchange_node(f, olddir, oldname,
3053 err = rename_node(f, olddir, oldname,
3054 newdir, newname, 0);
3058 fuse_finish_interrupt(f, req, &d);
3059 free_path2(f, olddir, newdir, wnode1, wnode2, oldpath, newpath);
3061 reply_err(req, err);
3065 const char *newname)
3067 struct fuse *f = req_fuse_prepare(req);
3073 err = get_path2(f,
ino, NULL, newparent, newname,
3074 &oldpath, &newpath, NULL, NULL);
3076 struct fuse_intr_data d;
3078 fuse_prepare_interrupt(f, req, &d);
3079 err = fuse_fs_link(f->fs, oldpath, newpath);
3081 err = lookup_path(f, newparent, newname, newpath,
3083 fuse_finish_interrupt(f, req, &d);
3084 free_path2(f, ino, newparent, NULL, NULL, oldpath, newpath);
3086 reply_entry(req, &e, err);
3089 static void fuse_do_release(
struct fuse *f,
fuse_ino_t ino,
const char *path,
3093 int unlink_hidden = 0;
3095 fuse_fs_release(f->fs, path, fi);
3097 pthread_mutex_lock(&f->lock);
3098 node = get_node(f, ino);
3099 assert(node->open_count > 0);
3101 if (node->is_hidden && !node->open_count) {
3103 node->is_hidden = 0;
3105 pthread_mutex_unlock(&f->lock);
3109 fuse_fs_unlink(f->fs, path);
3110 }
else if (f->conf.nullpath_ok) {
3113 if (get_path(f, ino, &unlinkpath) == 0)
3114 fuse_fs_unlink(f->fs, unlinkpath);
3116 free_path(f, ino, unlinkpath);
3122 const char *name, mode_t mode,
3125 struct fuse *f = req_fuse_prepare(req);
3126 struct fuse_intr_data d;
3131 err = get_path_name(f, parent, name, &path);
3133 fuse_prepare_interrupt(f, req, &d);
3134 err = fuse_fs_create(f->fs, path, mode, fi);
3136 err = lookup_path(f, parent, name, path, &e, fi);
3138 fuse_fs_release(f->fs, path, fi);
3139 else if (!S_ISREG(e.
attr.st_mode)) {
3141 fuse_fs_release(f->fs, path, fi);
3142 forget_node(f, e.
ino, 1);
3144 if (f->conf.direct_io)
3146 if (f->conf.kernel_cache)
3149 f->conf.parallel_direct_writes)
3153 fuse_finish_interrupt(f, req, &d);
3156 pthread_mutex_lock(&f->lock);
3157 get_node(f, e.
ino)->open_count++;
3158 pthread_mutex_unlock(&f->lock);
3162 fuse_do_release(f, e.
ino, path, fi);
3163 forget_node(f, e.
ino, 1);
3166 reply_err(req, err);
3169 free_path(f, parent, path);
3172 static double diff_timespec(
const struct timespec *t1,
3173 const struct timespec *t2)
3175 return (t1->tv_sec - t2->tv_sec) +
3176 ((double) t1->tv_nsec - (
double) t2->tv_nsec) / 1000000000.0;
3179 static void open_auto_cache(
struct fuse *f,
fuse_ino_t ino,
const char *path,
3184 pthread_mutex_lock(&f->lock);
3185 node = get_node(f, ino);
3186 if (node->cache_valid) {
3187 struct timespec now;
3190 if (diff_timespec(&now, &node->stat_updated) >
3191 f->conf.ac_attr_timeout) {
3194 pthread_mutex_unlock(&f->lock);
3195 err = fuse_fs_getattr(f->fs, path, &stbuf, fi);
3196 pthread_mutex_lock(&f->lock);
3198 update_stat(node, &stbuf);
3200 node->cache_valid = 0;
3203 if (node->cache_valid)
3206 node->cache_valid = 1;
3207 pthread_mutex_unlock(&f->lock);
3213 struct fuse *f = req_fuse_prepare(req);
3214 struct fuse_intr_data d;
3218 err = get_path(f, ino, &path);
3220 fuse_prepare_interrupt(f, req, &d);
3221 err = fuse_fs_open(f->fs, path, fi);
3223 if (f->conf.direct_io)
3225 if (f->conf.kernel_cache)
3228 if (f->conf.auto_cache)
3229 open_auto_cache(f, ino, path, fi);
3231 if (f->conf.no_rofd_flush &&
3232 (fi->
flags & O_ACCMODE) == O_RDONLY)
3235 if (fi->
direct_io && f->conf.parallel_direct_writes)
3239 fuse_finish_interrupt(f, req, &d);
3242 pthread_mutex_lock(&f->lock);
3243 get_node(f, ino)->open_count++;
3244 pthread_mutex_unlock(&f->lock);
3248 fuse_do_release(f, ino, path, fi);
3251 reply_err(req, err);
3253 free_path(f, ino, path);
3259 struct fuse *f = req_fuse_prepare(req);
3264 res = get_path_nullok(f, ino, &path);
3266 struct fuse_intr_data d;
3268 fuse_prepare_interrupt(f, req, &d);
3269 res = fuse_fs_read_buf(f->fs, path, &buf, size, off, fi);
3270 fuse_finish_interrupt(f, req, &d);
3271 free_path(f, ino, path);
3277 reply_err(req, res);
3286 struct fuse *f = req_fuse_prepare(req);
3290 res = get_path_nullok(f, ino, &path);
3292 struct fuse_intr_data d;
3294 fuse_prepare_interrupt(f, req, &d);
3295 res = fuse_fs_write_buf(f->fs, path, buf, off, fi);
3296 fuse_finish_interrupt(f, req, &d);
3297 free_path(f, ino, path);
3303 reply_err(req, res);
3309 struct fuse *f = req_fuse_prepare(req);
3313 err = get_path_nullok(f, ino, &path);
3315 struct fuse_intr_data d;
3317 fuse_prepare_interrupt(f, req, &d);
3318 err = fuse_fs_fsync(f->fs, path, datasync, fi);
3319 fuse_finish_interrupt(f, req, &d);
3320 free_path(f, ino, path);
3322 reply_err(req, err);
3325 static struct fuse_dh *get_dirhandle(
const struct fuse_file_info *llfi,
3328 struct fuse_dh *dh = (
struct fuse_dh *) (uintptr_t) llfi->
fh;
3337 struct fuse *f = req_fuse_prepare(req);
3338 struct fuse_intr_data d;
3344 dh = (
struct fuse_dh *) malloc(
sizeof(
struct fuse_dh));
3346 reply_err(req, -ENOMEM);
3349 memset(dh, 0,
sizeof(
struct fuse_dh));
3351 dh->contents = NULL;
3356 pthread_mutex_init(&dh->lock, NULL);
3358 llfi->
fh = (uintptr_t) dh;
3360 memset(&fi, 0,
sizeof(fi));
3363 err = get_path(f, ino, &path);
3365 fuse_prepare_interrupt(f, req, &d);
3366 err = fuse_fs_opendir(f->fs, path, &fi);
3367 fuse_finish_interrupt(f, req, &d);
3374 fuse_fs_releasedir(f->fs, path, &fi);
3375 pthread_mutex_destroy(&dh->lock);
3379 reply_err(req, err);
3380 pthread_mutex_destroy(&dh->lock);
3383 free_path(f, ino, path);
3386 static int extend_contents(
struct fuse_dh *dh,
unsigned minsize)
3388 if (minsize > dh->size) {
3390 unsigned newsize = dh->size;
3393 while (newsize < minsize) {
3394 if (newsize >= 0x80000000)
3395 newsize = 0xffffffff;
3400 newptr = (
char *) realloc(dh->contents, newsize);
3402 dh->error = -ENOMEM;
3405 dh->contents = newptr;
3411 static int fuse_add_direntry_to_dh(
struct fuse_dh *dh,
const char *name,
3414 struct fuse_direntry *de;
3416 de = malloc(
sizeof(
struct fuse_direntry));
3418 dh->error = -ENOMEM;
3421 de->name = strdup(name);
3423 dh->error = -ENOMEM;
3431 dh->last = &de->next;
3442 pthread_mutex_lock(&f->lock);
3443 node = lookup_node(f, parent, name);
3446 pthread_mutex_unlock(&f->lock);
3451 static int fill_dir(
void *dh_,
const char *name,
const struct stat *statp,
3454 struct fuse_dh *dh = (
struct fuse_dh *) dh_;
3465 memset(&stbuf, 0,
sizeof(stbuf));
3466 stbuf.st_ino = FUSE_UNKNOWN_INO;
3469 if (!dh->fuse->conf.use_ino) {
3470 stbuf.st_ino = FUSE_UNKNOWN_INO;
3471 if (dh->fuse->conf.readdir_ino) {
3472 stbuf.st_ino = (ino_t)
3473 lookup_nodeid(dh->fuse, dh->nodeid, name);
3490 if (extend_contents(dh, dh->needlen) == -1)
3495 dh->needlen - dh->len, name,
3497 if (newlen > dh->needlen)
3504 if (fuse_add_direntry_to_dh(dh, name, &stbuf) == -1)
3510 static int is_dot_or_dotdot(
const char *name)
3512 return name[0] ==
'.' && (name[1] ==
'\0' ||
3513 (name[1] ==
'.' && name[2] ==
'\0'));
3516 static int fill_dir_plus(
void *dh_,
const char *name,
const struct stat *statp,
3519 struct fuse_dh *dh = (
struct fuse_dh *) dh_;
3524 struct fuse *f = dh->fuse;
3535 if (!is_dot_or_dotdot(name)) {
3536 res = do_lookup(f, dh->nodeid, name, &e);
3543 e.
attr.st_ino = FUSE_UNKNOWN_INO;
3545 e.
attr.st_mode = statp->st_mode;
3546 if (f->conf.use_ino)
3547 e.
attr.st_ino = statp->st_ino;
3549 if (!f->conf.use_ino && f->conf.readdir_ino) {
3550 e.
attr.st_ino = (ino_t)
3551 lookup_nodeid(f, dh->nodeid, name);
3567 if (extend_contents(dh, dh->needlen) == -1)
3572 dh->needlen - dh->len, name,
3574 if (newlen > dh->needlen)
3580 if (fuse_add_direntry_to_dh(dh, name, &e.
attr) == -1)
3587 static void free_direntries(
struct fuse_direntry *de)
3590 struct fuse_direntry *next = de->next;
3598 size_t size, off_t off,
struct fuse_dh *dh,
3605 if (f->fs->op.readdir)
3606 err = get_path_nullok(f, ino, &path);
3608 err = get_path(f, ino, &path);
3610 struct fuse_intr_data d;
3614 filler = fill_dir_plus;
3616 free_direntries(dh->first);
3618 dh->last = &dh->first;
3624 fuse_prepare_interrupt(f, req, &d);
3625 err = fuse_fs_readdir(f->fs, path, dh, filler, off, fi, flags);
3626 fuse_finish_interrupt(f, req, &d);
3632 free_path(f, ino, path);
3637 static int readdir_fill_from_list(
fuse_req_t req,
struct fuse_dh *dh,
3641 struct fuse_direntry *de = dh->first;
3645 if (extend_contents(dh, dh->needlen) == -1)
3648 for (pos = 0; pos < off; pos++) {
3655 char *p = dh->contents + dh->len;
3656 unsigned rem = dh->needlen - dh->len;
3670 de->name, &de->stat, pos);
3672 newlen = dh->len + thislen;
3673 if (newlen > dh->needlen)
3685 struct fuse *f = req_fuse_prepare(req);
3687 struct fuse_dh *dh = get_dirhandle(llfi, &fi);
3690 pthread_mutex_lock(&dh->lock);
3697 err = readdir_fill(f, req, ino, size, off, dh, &fi, flags);
3699 reply_err(req, err);
3705 err = readdir_fill_from_list(req, dh, off, flags);
3707 reply_err(req, err);
3713 pthread_mutex_unlock(&dh->lock);
3719 fuse_readdir_common(req, ino, size, off, llfi, 0);
3731 struct fuse *f = req_fuse_prepare(req);
3732 struct fuse_intr_data d;
3734 struct fuse_dh *dh = get_dirhandle(llfi, &fi);
3737 get_path_nullok(f, ino, &path);
3739 fuse_prepare_interrupt(f, req, &d);
3740 fuse_fs_releasedir(f->fs, path, &fi);
3741 fuse_finish_interrupt(f, req, &d);
3742 free_path(f, ino, path);
3744 pthread_mutex_lock(&dh->lock);
3745 pthread_mutex_unlock(&dh->lock);
3746 pthread_mutex_destroy(&dh->lock);
3747 free_direntries(dh->first);
3756 struct fuse *f = req_fuse_prepare(req);
3761 get_dirhandle(llfi, &fi);
3763 err = get_path_nullok(f, ino, &path);
3765 struct fuse_intr_data d;
3766 fuse_prepare_interrupt(f, req, &d);
3767 err = fuse_fs_fsyncdir(f->fs, path, datasync, &fi);
3768 fuse_finish_interrupt(f, req, &d);
3769 free_path(f, ino, path);
3771 reply_err(req, err);
3776 struct fuse *f = req_fuse_prepare(req);
3781 memset(&buf, 0,
sizeof(buf));
3783 err = get_path(f, ino, &path);
3786 struct fuse_intr_data d;
3787 fuse_prepare_interrupt(f, req, &d);
3788 err = fuse_fs_statfs(f->fs, path ? path :
"/", &buf);
3789 fuse_finish_interrupt(f, req, &d);
3790 free_path(f, ino, path);
3796 reply_err(req, err);
3800 const char *value,
size_t size,
int flags)
3802 struct fuse *f = req_fuse_prepare(req);
3806 err = get_path(f, ino, &path);
3808 struct fuse_intr_data d;
3809 fuse_prepare_interrupt(f, req, &d);
3810 err = fuse_fs_setxattr(f->fs, path, name, value, size, flags);
3811 fuse_finish_interrupt(f, req, &d);
3812 free_path(f, ino, path);
3814 reply_err(req, err);
3818 const char *name,
char *value,
size_t size)
3823 err = get_path(f, ino, &path);
3825 struct fuse_intr_data d;
3826 fuse_prepare_interrupt(f, req, &d);
3827 err = fuse_fs_getxattr(f->fs, path, name, value, size);
3828 fuse_finish_interrupt(f, req, &d);
3829 free_path(f, ino, path);
3837 struct fuse *f = req_fuse_prepare(req);
3841 char *value = (
char *) malloc(size);
3842 if (value == NULL) {
3843 reply_err(req, -ENOMEM);
3846 res = common_getxattr(f, req, ino, name, value, size);
3850 reply_err(req, res);
3853 res = common_getxattr(f, req, ino, name, NULL, 0);
3857 reply_err(req, res);
3862 char *list,
size_t size)
3867 err = get_path(f, ino, &path);
3869 struct fuse_intr_data d;
3870 fuse_prepare_interrupt(f, req, &d);
3871 err = fuse_fs_listxattr(f->fs, path, list, size);
3872 fuse_finish_interrupt(f, req, &d);
3873 free_path(f, ino, path);
3880 struct fuse *f = req_fuse_prepare(req);
3884 char *list = (
char *) malloc(size);
3886 reply_err(req, -ENOMEM);
3889 res = common_listxattr(f, req, ino, list, size);
3893 reply_err(req, res);
3896 res = common_listxattr(f, req, ino, NULL, 0);
3900 reply_err(req, res);
3907 struct fuse *f = req_fuse_prepare(req);
3911 err = get_path(f, ino, &path);
3913 struct fuse_intr_data d;
3914 fuse_prepare_interrupt(f, req, &d);
3915 err = fuse_fs_removexattr(f->fs, path, name);
3916 fuse_finish_interrupt(f, req, &d);
3917 free_path(f, ino, path);
3919 reply_err(req, err);
3922 static struct lock *locks_conflict(
struct node *node,
const struct lock *lock)
3926 for (l = node->locks; l; l = l->next)
3927 if (l->owner != lock->owner &&
3928 lock->start <= l->end && l->start <= lock->end &&
3929 (l->type == F_WRLCK || lock->type == F_WRLCK))
3935 static void delete_lock(
struct lock **lockp)
3937 struct lock *l = *lockp;
3942 static void insert_lock(
struct lock **pos,
struct lock *lock)
3948 static int locks_insert(
struct node *node,
struct lock *lock)
3951 struct lock *newl1 = NULL;
3952 struct lock *newl2 = NULL;
3954 if (lock->type != F_UNLCK || lock->start != 0 ||
3955 lock->end != OFFSET_MAX) {
3956 newl1 = malloc(
sizeof(
struct lock));
3957 newl2 = malloc(
sizeof(
struct lock));
3959 if (!newl1 || !newl2) {
3966 for (lp = &node->locks; *lp;) {
3967 struct lock *l = *lp;
3968 if (l->owner != lock->owner)
3971 if (lock->type == l->type) {
3972 if (l->end < lock->start - 1)
3974 if (lock->end < l->start - 1)
3976 if (l->start <= lock->start && lock->end <= l->end)
3978 if (l->start < lock->start)
3979 lock->start = l->start;
3980 if (lock->end < l->end)
3984 if (l->end < lock->start)
3986 if (lock->end < l->start)
3988 if (lock->start <= l->start && l->end <= lock->end)
3990 if (l->end <= lock->end) {
3991 l->end = lock->start - 1;
3994 if (lock->start <= l->start) {
3995 l->start = lock->end + 1;
3999 newl2->start = lock->end + 1;
4000 l->end = lock->start - 1;
4001 insert_lock(&l->next, newl2);
4011 if (lock->type != F_UNLCK) {
4013 insert_lock(lp, newl1);
4022 static void flock_to_lock(
struct flock *flock,
struct lock *lock)
4024 memset(lock, 0,
sizeof(
struct lock));
4025 lock->type = flock->l_type;
4026 lock->start = flock->l_start;
4028 flock->l_len ? flock->l_start + flock->l_len - 1 : OFFSET_MAX;
4029 lock->pid = flock->l_pid;
4032 static void lock_to_flock(
struct lock *lock,
struct flock *flock)
4034 flock->l_type = lock->type;
4035 flock->l_start = lock->start;
4037 (lock->end == OFFSET_MAX) ? 0 : lock->end - lock->start + 1;
4038 flock->l_pid = lock->pid;
4044 struct fuse_intr_data d;
4050 fuse_prepare_interrupt(f, req, &d);
4051 memset(&lock, 0,
sizeof(lock));
4052 lock.l_type = F_UNLCK;
4053 lock.l_whence = SEEK_SET;
4054 err = fuse_fs_flush(f->fs, path, fi);
4055 errlock = fuse_fs_lock(f->fs, path, fi, F_SETLK, &lock);
4056 fuse_finish_interrupt(f, req, &d);
4058 if (errlock != -ENOSYS) {
4059 flock_to_lock(&lock, &l);
4061 pthread_mutex_lock(&f->lock);
4062 locks_insert(get_node(f, ino), &l);
4063 pthread_mutex_unlock(&f->lock);
4076 struct fuse *f = req_fuse_prepare(req);
4077 struct fuse_intr_data d;
4081 get_path_nullok(f, ino, &path);
4083 err = fuse_flush_common(f, req, ino, path, fi);
4088 fuse_prepare_interrupt(f, req, &d);
4089 fuse_do_release(f, ino, path, fi);
4090 fuse_finish_interrupt(f, req, &d);
4091 free_path(f, ino, path);
4093 reply_err(req, err);
4099 struct fuse *f = req_fuse_prepare(req);
4103 get_path_nullok(f, ino, &path);
4104 err = fuse_flush_common(f, req, ino, path, fi);
4105 free_path(f, ino, path);
4107 reply_err(req, err);
4114 struct fuse *f = req_fuse_prepare(req);
4118 err = get_path_nullok(f, ino, &path);
4120 struct fuse_intr_data d;
4121 fuse_prepare_interrupt(f, req, &d);
4122 err = fuse_fs_lock(f->fs, path, fi, cmd, lock);
4123 fuse_finish_interrupt(f, req, &d);
4124 free_path(f, ino, path);
4134 struct lock *conflict;
4135 struct fuse *f = req_fuse(req);
4137 flock_to_lock(lock, &l);
4139 pthread_mutex_lock(&f->lock);
4140 conflict = locks_conflict(get_node(f, ino), &l);
4142 lock_to_flock(conflict, lock);
4143 pthread_mutex_unlock(&f->lock);
4145 err = fuse_lock_common(req, ino, fi, lock, F_GETLK);
4152 reply_err(req, err);
4159 int err = fuse_lock_common(req, ino, fi, lock,
4160 sleep ? F_SETLKW : F_SETLK);
4162 struct fuse *f = req_fuse(req);
4164 flock_to_lock(lock, &l);
4166 pthread_mutex_lock(&f->lock);
4167 locks_insert(get_node(f, ino), &l);
4168 pthread_mutex_unlock(&f->lock);
4170 reply_err(req, err);
4176 struct fuse *f = req_fuse_prepare(req);
4180 err = get_path_nullok(f, ino, &path);
4182 struct fuse_intr_data d;
4183 fuse_prepare_interrupt(f, req, &d);
4184 err = fuse_fs_flock(f->fs, path, fi, op);
4185 fuse_finish_interrupt(f, req, &d);
4186 free_path(f, ino, path);
4188 reply_err(req, err);
4194 struct fuse *f = req_fuse_prepare(req);
4195 struct fuse_intr_data d;
4199 err = get_path(f, ino, &path);
4201 fuse_prepare_interrupt(f, req, &d);
4202 err = fuse_fs_bmap(f->fs, path, blocksize, &idx);
4203 fuse_finish_interrupt(f, req, &d);
4204 free_path(f, ino, path);
4209 reply_err(req, err);
4214 unsigned int flags,
const void *in_buf,
4215 size_t in_bufsz,
size_t out_bufsz)
4217 struct fuse *f = req_fuse_prepare(req);
4218 struct fuse_intr_data d;
4220 char *path, *out_buf = NULL;
4224 if (
flags & FUSE_IOCTL_UNRESTRICTED)
4227 if (
flags & FUSE_IOCTL_DIR)
4228 get_dirhandle(llfi, &fi);
4234 out_buf = malloc(out_bufsz);
4239 assert(!in_bufsz || !out_bufsz || in_bufsz == out_bufsz);
4240 if (out_buf && in_bufsz)
4241 memcpy(out_buf, in_buf, in_bufsz);
4243 err = get_path_nullok(f, ino, &path);
4247 fuse_prepare_interrupt(f, req, &d);
4249 err = fuse_fs_ioctl(f->fs, path, cmd, arg, &fi,
flags,
4250 out_buf ? out_buf : (
void *)in_buf);
4252 fuse_finish_interrupt(f, req, &d);
4253 free_path(f, ino, path);
4260 reply_err(req, err);
4268 struct fuse *f = req_fuse_prepare(req);
4269 struct fuse_intr_data d;
4272 unsigned revents = 0;
4274 err = get_path_nullok(f, ino, &path);
4276 fuse_prepare_interrupt(f, req, &d);
4277 err = fuse_fs_poll(f->fs, path, fi, ph, &revents);
4278 fuse_finish_interrupt(f, req, &d);
4279 free_path(f, ino, path);
4284 reply_err(req, err);
4290 struct fuse *f = req_fuse_prepare(req);
4291 struct fuse_intr_data d;
4295 err = get_path_nullok(f, ino, &path);
4297 fuse_prepare_interrupt(f, req, &d);
4298 err = fuse_fs_fallocate(f->fs, path, mode, offset, length, fi);
4299 fuse_finish_interrupt(f, req, &d);
4300 free_path(f, ino, path);
4302 reply_err(req, err);
4311 struct fuse *f = req_fuse_prepare(req);
4312 struct fuse_intr_data d;
4313 char *path_in, *path_out;
4317 err = get_path_nullok(f, nodeid_in, &path_in);
4319 reply_err(req, err);
4323 err = get_path_nullok(f, nodeid_out, &path_out);
4325 free_path(f, nodeid_in, path_in);
4326 reply_err(req, err);
4330 fuse_prepare_interrupt(f, req, &d);
4331 res = fuse_fs_copy_file_range(f->fs, path_in, fi_in, off_in, path_out,
4332 fi_out, off_out, len, flags);
4333 fuse_finish_interrupt(f, req, &d);
4338 reply_err(req, res);
4340 free_path(f, nodeid_in, path_in);
4341 free_path(f, nodeid_out, path_out);
4347 struct fuse *f = req_fuse_prepare(req);
4348 struct fuse_intr_data d;
4353 err = get_path(f, ino, &path);
4355 reply_err(req, err);
4359 fuse_prepare_interrupt(f, req, &d);
4360 res = fuse_fs_lseek(f->fs, path, off, whence, fi);
4361 fuse_finish_interrupt(f, req, &d);
4362 free_path(f, ino, path);
4366 reply_err(req, res);
4369 static int clean_delay(
struct fuse *f)
4377 int max_sleep = 3600;
4378 int sleep_time = f->conf.remember / 10;
4380 if (sleep_time > max_sleep)
4382 if (sleep_time < min_sleep)
4389 struct node_lru *lnode;
4390 struct list_head *curr, *next;
4392 struct timespec now;
4394 pthread_mutex_lock(&f->lock);
4398 for (curr = f->lru_table.next; curr != &f->lru_table; curr = next) {
4402 lnode = list_entry(curr,
struct node_lru, lru);
4403 node = &lnode->node;
4405 age = diff_timespec(&now, &lnode->forget_time);
4406 if (age <= f->conf.remember)
4409 assert(node->nlookup == 1);
4412 if (node->refctr > 1)
4416 unhash_name(f, node);
4417 unref_node(f, node);
4419 pthread_mutex_unlock(&f->lock);
4421 return clean_delay(f);
4425 .
init = fuse_lib_init,
4426 .destroy = fuse_lib_destroy,
4427 .lookup = fuse_lib_lookup,
4428 .forget = fuse_lib_forget,
4429 .forget_multi = fuse_lib_forget_multi,
4430 .getattr = fuse_lib_getattr,
4431 .setattr = fuse_lib_setattr,
4432 .access = fuse_lib_access,
4433 .readlink = fuse_lib_readlink,
4434 .mknod = fuse_lib_mknod,
4435 .mkdir = fuse_lib_mkdir,
4436 .unlink = fuse_lib_unlink,
4437 .rmdir = fuse_lib_rmdir,
4438 .symlink = fuse_lib_symlink,
4439 .rename = fuse_lib_rename,
4440 .link = fuse_lib_link,
4441 .create = fuse_lib_create,
4442 .open = fuse_lib_open,
4443 .read = fuse_lib_read,
4444 .write_buf = fuse_lib_write_buf,
4445 .flush = fuse_lib_flush,
4446 .release = fuse_lib_release,
4447 .fsync = fuse_lib_fsync,
4448 .opendir = fuse_lib_opendir,
4449 .readdir = fuse_lib_readdir,
4450 .readdirplus = fuse_lib_readdirplus,
4451 .releasedir = fuse_lib_releasedir,
4452 .fsyncdir = fuse_lib_fsyncdir,
4453 .statfs = fuse_lib_statfs,
4454 .setxattr = fuse_lib_setxattr,
4455 .getxattr = fuse_lib_getxattr,
4456 .listxattr = fuse_lib_listxattr,
4457 .removexattr = fuse_lib_removexattr,
4458 .getlk = fuse_lib_getlk,
4459 .setlk = fuse_lib_setlk,
4460 .flock = fuse_lib_flock,
4461 .bmap = fuse_lib_bmap,
4462 .ioctl = fuse_lib_ioctl,
4463 .poll = fuse_lib_poll,
4464 .fallocate = fuse_lib_fallocate,
4465 .copy_file_range = fuse_lib_copy_file_range,
4466 .lseek = fuse_lib_lseek,
4469 int fuse_notify_poll(
struct fuse_pollhandle *ph)
4479 static int fuse_session_loop_remember(
struct fuse *f)
4481 struct fuse_session *se = f->se;
4483 struct timespec now;
4485 struct pollfd fds = {
4494 next_clean = now.tv_sec;
4499 if (now.tv_sec < next_clean)
4500 timeout = next_clean - now.tv_sec;
4504 res = poll(&fds, 1, timeout * 1000);
4510 }
else if (res > 0) {
4511 res = fuse_session_receive_buf_int(se, &fbuf, NULL);
4518 fuse_session_process_buf_int(se, &fbuf, NULL);
4522 next_clean = now.tv_sec + timeout;
4528 return res < 0 ? -1 : 0;
4537 return fuse_session_loop_remember(f);
4542 FUSE_SYMVER(
"fuse_loop_mt_312",
"fuse_loop_mt@@FUSE_3.12")
4557 int fuse_loop_mt_32(
struct fuse *f,
struct fuse_loop_config_v1 *config_v1);
4558 FUSE_SYMVER(
"fuse_loop_mt_32",
"fuse_loop_mt@FUSE_3.2")
4559 int fuse_loop_mt_32(struct fuse *f, struct fuse_loop_config_v1 *config_v1)
4567 int res = fuse_loop_mt_312(f, config);
4574 int fuse_loop_mt_31(
struct fuse *f,
int clone_fd);
4575 FUSE_SYMVER(
"fuse_loop_mt_31",
"fuse_loop_mt@FUSE_3.0")
4576 int fuse_loop_mt_31(struct fuse *f,
int clone_fd)
4586 err = fuse_loop_mt_312(f, config);
4600 struct fuse_context_i *c = fuse_get_context_internal();
4610 struct fuse_context_i *c = fuse_get_context_internal();
4619 struct fuse_context_i *c = fuse_get_context_internal();
4629 int err = lookup_path_in_cache(f, path, &ino);
4637 #define FUSE_LIB_OPT(t, p, v) { t, offsetof(struct fuse_config, p), v }
4639 static const struct fuse_opt fuse_lib_opts[] = {
4642 FUSE_LIB_OPT(
"debug", debug, 1),
4643 FUSE_LIB_OPT(
"-d", debug, 1),
4644 FUSE_LIB_OPT(
"kernel_cache", kernel_cache, 1),
4645 FUSE_LIB_OPT(
"auto_cache", auto_cache, 1),
4646 FUSE_LIB_OPT(
"noauto_cache", auto_cache, 0),
4647 FUSE_LIB_OPT(
"no_rofd_flush", no_rofd_flush, 1),
4648 FUSE_LIB_OPT(
"umask=", set_mode, 1),
4649 FUSE_LIB_OPT(
"umask=%o", umask, 0),
4650 FUSE_LIB_OPT(
"uid=", set_uid, 1),
4651 FUSE_LIB_OPT(
"uid=%d", uid, 0),
4652 FUSE_LIB_OPT(
"gid=", set_gid, 1),
4653 FUSE_LIB_OPT(
"gid=%d", gid, 0),
4654 FUSE_LIB_OPT(
"entry_timeout=%lf", entry_timeout, 0),
4655 FUSE_LIB_OPT(
"attr_timeout=%lf", attr_timeout, 0),
4656 FUSE_LIB_OPT(
"ac_attr_timeout=%lf", ac_attr_timeout, 0),
4657 FUSE_LIB_OPT(
"ac_attr_timeout=", ac_attr_timeout_set, 1),
4658 FUSE_LIB_OPT(
"negative_timeout=%lf", negative_timeout, 0),
4659 FUSE_LIB_OPT(
"noforget", remember, -1),
4660 FUSE_LIB_OPT(
"remember=%u", remember, 0),
4661 FUSE_LIB_OPT(
"modules=%s", modules, 0),
4662 FUSE_LIB_OPT(
"parallel_direct_write=%d", parallel_direct_writes, 0),
4666 static int fuse_lib_opt_proc(
void *data,
const char *arg,
int key,
4669 (void) arg; (void) outargs; (void) data; (void) key;
4676 static const struct fuse_opt fuse_help_opts[] = {
4677 FUSE_LIB_OPT(
"modules=%s", modules, 1),
4682 static void print_module_help(
const char *name,
4689 printf(
"\nOptions for %s module:\n", name);
4699 " -o kernel_cache cache files in kernel\n"
4700 " -o [no]auto_cache enable caching based on modification times (off)\n"
4701 " -o no_rofd_flush disable flushing of read-only fd on close (off)\n"
4702 " -o umask=M set file permissions (octal)\n"
4703 " -o uid=N set file owner\n"
4704 " -o gid=N set file group\n"
4705 " -o entry_timeout=T cache timeout for names (1.0s)\n"
4706 " -o negative_timeout=T cache timeout for deleted names (0.0s)\n"
4707 " -o attr_timeout=T cache timeout for attributes (1.0s)\n"
4708 " -o ac_attr_timeout=T auto cache timeout for attributes (attr_timeout)\n"
4709 " -o noforget never forget cached inodes\n"
4710 " -o remember=T remember cached inodes for T seconds (0s)\n"
4711 " -o modules=M1[:M2...] names of modules to push onto filesystem stack\n");
4718 print_module_help(
"subdir", &fuse_module_subdir_factory);
4720 print_module_help(
"iconv", &fuse_module_iconv_factory);
4727 fuse_lib_opt_proc) == -1
4736 for (module = conf.modules; module; module = next) {
4738 for (p = module; *p && *p !=
':'; p++);
4739 next = *p ? p + 1 : NULL;
4742 m = fuse_get_module(module);
4744 print_module_help(module, &m->factory);
4750 static int fuse_init_intr_signal(
int signum,
int *installed)
4752 struct sigaction old_sa;
4754 if (sigaction(signum, NULL, &old_sa) == -1) {
4755 perror(
"fuse: cannot get old signal handler");
4759 if (old_sa.sa_handler == SIG_DFL) {
4760 struct sigaction sa;
4762 memset(&sa, 0,
sizeof(
struct sigaction));
4763 sa.sa_handler = fuse_intr_sighandler;
4764 sigemptyset(&sa.sa_mask);
4766 if (sigaction(signum, &sa, NULL) == -1) {
4767 perror(
"fuse: cannot set interrupt signal handler");
4775 static void fuse_restore_intr_signal(
int signum)
4777 struct sigaction sa;
4779 memset(&sa, 0,
sizeof(
struct sigaction));
4780 sa.sa_handler = SIG_DFL;
4781 sigaction(signum, &sa, NULL);
4785 static int fuse_push_module(
struct fuse *f,
const char *module,
4788 struct fuse_fs *fs[2] = { f->fs, NULL };
4789 struct fuse_fs *newfs;
4795 newfs = m->factory(args, fs);
4810 fuse_log(FUSE_LOG_ERR,
"fuse: warning: library too old, some operations may not not work\n");
4814 fs = (
struct fuse_fs *) calloc(1,
sizeof(
struct fuse_fs));
4816 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate fuse_fs object\n");
4820 fs->user_data = user_data;
4822 memcpy(&fs->op, op, op_size);
4826 static int node_table_init(
struct node_table *t)
4828 t->size = NODE_TABLE_MIN_SIZE;
4829 t->array = (
struct node **) calloc(1,
sizeof(
struct node *) * t->size);
4830 if (t->array == NULL) {
4831 fuse_log(FUSE_LOG_ERR,
"fuse: memory allocation failed\n");
4840 static void *fuse_prune_nodes(
void *fuse)
4842 struct fuse *f = fuse;
4855 return fuse_start_thread(&f->prune_thread, fuse_prune_nodes, f);
4862 if (lru_enabled(f)) {
4863 pthread_mutex_lock(&f->lock);
4864 pthread_cancel(f->prune_thread);
4865 pthread_mutex_unlock(&f->lock);
4866 pthread_join(f->prune_thread, NULL);
4871 FUSE_SYMVER(
"fuse_new_31",
"fuse_new@@FUSE_3.1")
4874 size_t op_size,
void *user_data)
4881 f = (
struct fuse *) calloc(1,
sizeof(
struct fuse));
4883 fuse_log(FUSE_LOG_ERR,
"fuse: failed to allocate fuse object\n");
4887 f->conf.entry_timeout = 1.0;
4888 f->conf.attr_timeout = 1.0;
4889 f->conf.negative_timeout = 0.0;
4890 f->conf.intr_signal = FUSE_DEFAULT_INTR_SIGNAL;
4894 fuse_lib_opt_proc) == -1)
4897 pthread_mutex_lock(&fuse_context_lock);
4898 static int builtin_modules_registered = 0;
4900 if (builtin_modules_registered == 0) {
4902 fuse_register_module(
"subdir", fuse_module_subdir_factory, NULL);
4904 fuse_register_module(
"iconv", fuse_module_iconv_factory, NULL);
4906 builtin_modules_registered= 1;
4908 pthread_mutex_unlock(&fuse_context_lock);
4910 if (fuse_create_context_key() == -1)
4915 goto out_delete_context_key;
4925 f->pagesize = getpagesize();
4926 init_list_head(&f->partial_slabs);
4927 init_list_head(&f->full_slabs);
4928 init_list_head(&f->lru_table);
4930 if (f->conf.modules) {
4934 for (module = f->conf.modules; module; module = next) {
4936 for (p = module; *p && *p !=
':'; p++);
4937 next = *p ? p + 1 : NULL;
4940 fuse_push_module(f, module, args) == -1)
4945 if (!f->conf.ac_attr_timeout_set)
4946 f->conf.ac_attr_timeout = f->conf.attr_timeout;
4948 #if defined(__FreeBSD__) || defined(__NetBSD__)
4953 f->conf.readdir_ino = 1;
4960 if (f->conf.debug) {
4961 fuse_log(FUSE_LOG_DEBUG,
"nullpath_ok: %i\n", f->conf.nullpath_ok);
4965 f->fs->debug = f->conf.debug;
4968 if (node_table_init(&f->name_table) == -1)
4969 goto out_free_session;
4971 if (node_table_init(&f->id_table) == -1)
4972 goto out_free_name_table;
4974 pthread_mutex_init(&f->lock, NULL);
4976 root = alloc_node(f);
4978 fuse_log(FUSE_LOG_ERR,
"fuse: memory allocation failed\n");
4979 goto out_free_id_table;
4981 if (lru_enabled(f)) {
4982 struct node_lru *lnode = node_lru(root);
4983 init_list_head(&lnode->lru);
4986 strcpy(root->inline_name,
"/");
4987 root->name = root->inline_name;
4990 fuse_init_intr_signal(f->conf.intr_signal,
4991 &f->intr_installed) == -1)
4994 root->parent = NULL;
5004 free(f->id_table.array);
5005 out_free_name_table:
5006 free(f->name_table.array);
5011 free(f->conf.modules);
5012 out_delete_context_key:
5013 fuse_delete_context_key();
5022 size_t op_size,
void *private_data);
5023 FUSE_SYMVER(
"fuse_new_30",
"fuse_new@FUSE_3.0")
5024 struct fuse *fuse_new_30(struct
fuse_args *args,
5026 size_t op_size,
void *user_data)
5030 memset(&conf, 0,
sizeof(conf));
5033 FUSE_LIB_OPT(
"-h", show_help, 1),
5034 FUSE_LIB_OPT(
"--help", show_help, 1),
5039 fuse_lib_opt_proc) == -1)
5042 if (conf.show_help) {
5053 if (f->conf.intr && f->intr_installed)
5054 fuse_restore_intr_signal(f->conf.intr_signal);
5057 fuse_create_context(f);
5059 for (i = 0; i < f->id_table.size; i++) {
5062 for (node = f->id_table.array[i]; node != NULL;
5063 node = node->id_next) {
5064 if (node->is_hidden) {
5066 if (try_get_path(f, node->nodeid, NULL, &path, NULL,
false) == 0) {
5067 fuse_fs_unlink(f->fs, path);
5074 for (i = 0; i < f->id_table.size; i++) {
5078 for (node = f->id_table.array[i]; node != NULL; node = next) {
5079 next = node->id_next;
5084 assert(list_empty(&f->partial_slabs));
5085 assert(list_empty(&f->full_slabs));
5087 while (fuse_modules) {
5088 fuse_put_module(fuse_modules);
5090 free(f->id_table.array);
5091 free(f->name_table.array);
5092 pthread_mutex_destroy(&f->lock);
5094 free(f->conf.modules);
5096 fuse_delete_context_key();
5110 return FUSE_VERSION;
5115 return PACKAGE_VERSION;
struct fuse_session * fuse_get_session(struct fuse *f)
int fuse_getgroups(int size, gid_t list[])
int fuse_mount(struct fuse *f, const char *mountpoint)
int fuse_interrupted(void)
void fuse_destroy(struct fuse *f)
int fuse_start_cleanup_thread(struct fuse *fuse)
int fuse_invalidate_path(struct fuse *f, const char *path)
struct fuse * fuse_new_31(struct fuse_args *args, const struct fuse_operations *op, size_t op_size, void *user_data)
int fuse_loop(struct fuse *f)
struct fuse_fs * fuse_fs_new(const struct fuse_operations *op, size_t op_size, void *private_data)
int(* fuse_fill_dir_t)(void *buf, const char *name, const struct stat *stbuf, off_t off, enum fuse_fill_dir_flags flags)
void fuse_exit(struct fuse *f)
int fuse_clean_cache(struct fuse *fuse)
struct fuse_context * fuse_get_context(void)
void fuse_lib_help(struct fuse_args *args)
void fuse_unmount(struct fuse *f)
struct fuse_fs *(* fuse_module_factory_t)(struct fuse_args *args, struct fuse_fs *fs[])
void fuse_stop_cleanup_thread(struct fuse *fuse)
void fuse_loop_cfg_convert(struct fuse_loop_config *config, struct fuse_loop_config_v1 *v1_conf)
#define FUSE_CAP_SPLICE_READ
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
#define FUSE_CAP_EXPORT_SUPPORT
#define FUSE_CAP_POSIX_LOCKS
struct fuse_loop_config * fuse_loop_cfg_create(void)
void fuse_loop_cfg_set_clone_fd(struct fuse_loop_config *config, unsigned int value)
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
void fuse_loop_cfg_destroy(struct fuse_loop_config *config)
const char * fuse_pkgversion(void)
#define FUSE_CAP_FLOCK_LOCKS
void fuse_log(enum fuse_log_level level, const char *fmt,...)
void fuse_session_destroy(struct fuse_session *se)
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_session_exited(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_session_loop(struct fuse_session *se)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_session_unmount(struct fuse_session *se)
void fuse_reply_none(fuse_req_t req)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
struct fuse_session * fuse_session_new(struct fuse_args *args, const struct fuse_lowlevel_ops *op, size_t op_size, void *userdata)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
void * fuse_req_userdata(fuse_req_t req)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
void fuse_session_reset(struct fuse_session *se)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
void fuse_opt_free_args(struct fuse_args *args)
#define FUSE_OPT_KEY(templ, key)
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
#define FUSE_OPT_KEY_KEEP
#define FUSE_ARGS_INIT(argc, argv)
enum fuse_buf_flags flags
unsigned int parallel_direct_writes
void(* getlk)(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, struct flock *lock)
void(* init)(void *userdata, struct fuse_conn_info *conn)
void(* setlk)(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, struct flock *lock, int sleep)