diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 58c855a..3656c9a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -43,5 +43,6 @@ jobs: RDM_NODE_ID: ${{ secrets.RDM_NODE_ID }} RDM_TOKEN: ${{ secrets.RDM_TOKEN }} RDM_STORAGE: ${{ secrets.RDM_STORAGE || 'osfstorage' }} + SKIP_TEST_DOCKER: ${{ github.event_name == 'pull_request' && '1' || '' }} run: | pytest --cov diff --git a/README.md b/README.md index 53a7006..10b564a 100644 --- a/README.md +++ b/README.md @@ -6,10 +6,9 @@ RDMFS is a FUSE filesystem that allows you to mount your GakuNin RDM project as RDMFS requires libfuse-dev to be installed on your system. -## Run RDMFS on Docker +## Mount a single project -You can easily try out RDMFS by using a Docker container with libfuse-dev installed. -You can try RDMFS by executing the following commands. +You can easily try out RDMFS by using a Docker container with libfuse-dev installed. The following example mounts a single project by supplying its node ID via environment variables. ``` $ docker build -t rcosdp/cs-rdmfs . @@ -28,6 +27,25 @@ googledrive osfstorage file1.txt file2.txt ``` +## Mount all accessible projects + +Omit `RDM_NODE_ID` when you launch the Docker container (or pass `--all-projects` to the CLI) to expose every project you can access under the mount root. +This layout adds `.children` / `.linked` directories that contain symbolic links to related projects, while single-project mounts keep the previous structure and hide them. + +``` +$ docker run -it -v $(pwd)/mnt:/mnt -e RDM_TOKEN=YOUR_PERSONAL_TOKEN -e RDM_API_URL=http://192.168.168.167:8000/v2/ -e MOUNT_PATH=/mnt/all --name rdmfs --privileged rcosdp/cs-rdmfs +$ docker exec -it rdmfs bash +# cd /mnt/all +# ls +abcde fghij klmno +# ls abcde +googledrive osfstorage +# ls abcde/.linked +klmno -> ../../klmno/ +``` + +> Links to projects where you are not a contributor (public projects) are not supported; GakuNin RDM deployments do not expose publicly accessible nodes. + # Run Tests on Docker You can run the tests on a Docker container by executing the following commands. diff --git a/bin/start.sh b/bin/start.sh index 0d0d95b..cb3ee6d 100644 --- a/bin/start.sh +++ b/bin/start.sh @@ -2,16 +2,47 @@ set -ue -export RDM_MOUNT_PATH=${MOUNT_PATH:-/mnt} -mkdir -p ${RDM_MOUNT_PATH} +RDM_MOUNT_PATH=${MOUNT_PATH:-/mnt} +mkdir -p "${RDM_MOUNT_PATH}" -export RDM_MOUNT_FILE_MODE=${MOUNT_FILE_MODE:-0666} -export RDM_MOUNT_DIR_MODE=${MOUNT_DIR_MODE:-0777} +RDM_MOUNT_FILE_MODE=${MOUNT_FILE_MODE:-0666} +RDM_MOUNT_DIR_MODE=${MOUNT_DIR_MODE:-0777} +RDM_API_URL=${RDM_API_URL:-https://api.rdm.nii.ac.jp/v2/} +RDM_NODE_ID=${RDM_NODE_ID:-} -export DEBUG=--debug +DEBUG=--debug export OSF_TOKEN=${RDM_TOKEN} -python3 -m rdmfs.__main__ \ - --file-mode ${RDM_MOUNT_FILE_MODE} \ - --dir-mode ${RDM_MOUNT_DIR_MODE} \ - --allow-other -p ${RDM_NODE_ID} \ - --base-url ${RDM_API_URL} ${DEBUG} ${RDM_MOUNT_PATH} $@ + + +ALL_PROJECTS=false +EXTRA_ARGS=() +for arg in "$@"; do + if [ "$arg" = "--all-projects" ]; then + ALL_PROJECTS=true + else + EXTRA_ARGS+=("$arg") + fi +done + +if [ -z "${RDM_NODE_ID}" ]; then + ALL_PROJECTS=true +fi + +CMD=(python3 -m rdmfs + --file-mode "${RDM_MOUNT_FILE_MODE}" + --dir-mode "${RDM_MOUNT_DIR_MODE}" + --allow-other + --base-url "${RDM_API_URL}" + ${DEBUG} +) + +if [ "${ALL_PROJECTS}" = true ]; then + CMD+=(--all-projects) +else + CMD+=(--project "${RDM_NODE_ID}") +fi + +CMD+=("${RDM_MOUNT_PATH}") +CMD+=("${EXTRA_ARGS[@]}") + +exec "${CMD[@]}" diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 0000000..0bff495 --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,113 @@ +# CS-rdmfs Usage Guide + +## Overview + +CS-rdmfs mounts projects from the Open Science Framework (OSF) as a FUSE filesystem. You can mount either a single project by ID or expose every accessible project beneath the mount root. Each project directory provides additional virtual entries that surface API metadata and related projects. + +## Running via `bin/start.sh` + +The recommended entrypoint (used by the Docker image) is `bin/start.sh`. It +reads environment variables, prepares the mount directory, and invokes +`python -m rdmfs` with the appropriate options. + +Required environment variables: + +- `RDM_NODE_ID` – project GUID to mount. (Omit or leave empty to mount all projects.) +- `RDM_TOKEN` – personal access token (forwarded to `OSF_TOKEN`). +- `RDM_API_URL` – API base URL (defaults to `https://api.rdm.nii.ac.jp/v2/`). +- `MOUNT_PATH` – mountpoint inside the container (default `/mnt`). + +Optional overrides: + +- `MOUNT_FILE_MODE` / `MOUNT_DIR_MODE` – forwarded to `--file-mode` and + `--dir-mode`. +- Any extra arguments appended to `start.sh` are passed through to the Python + module, enabling `--debug` or additional FUSE options. + +If `RDM_NODE_ID` is unset (and you do not explicitly pass `--all-projects`), +`start.sh` automatically enables the all-projects mode. + +### Docker Examples + +Mount a single project inside `/mnt/osf`: + +```bash +docker run --rm -it --privileged \ + -v "$(pwd)/mnt":/mnt \ + -e RDM_NODE_ID=abc123 \ + -e RDM_TOKEN=$RDM_TOKEN \ + -e RDM_API_URL=https://api.rdm.nii.ac.jp/v2/ \ + -e MOUNT_PATH=/mnt/osf \ + rcosdp/cs-rdmfs +``` + +Mount every accessible project by omitting `RDM_NODE_ID`: + +```bash +docker run --rm -it --privileged \ + -v "$(pwd)/mnt":/mnt \ + -e RDM_TOKEN=$RDM_TOKEN \ + -e RDM_API_URL=https://api.rdm.nii.ac.jp/v2/ \ + rcosdp/cs-rdmfs +``` + +## Direct CLI Usage + +When running outside Docker (or bypassing `start.sh`), set `OSF_TOKEN` manually: + +```bash +export OSF_TOKEN=your_token +python -m rdmfs [mountpoint] \ + (--project | --all-projects) \ + [--base-url https://api.rdm.nii.ac.jp/v2/] \ + [--file-mode 0644] [--dir-mode 0755] \ + [--allow-other] [--debug] [--debug-fuse] +``` + +`--project` and `--all-projects` are mutually exclusive. Remaining options match +those used by `start.sh`. + +## Virtual Directory Layout + +Each mounted project contains virtual entries ahead of storage providers: + +``` +/project-id/ + .attributes.json # live view of OSF node attributes (nodes_read) + .children/ # child projects returned by nodes_children_list + .linked/ # linked projects from collections_linked_nodes_list + osfstorage/ # standard storage providers follow the virtual entries + ... +``` + +- `.attributes.json` is read-only. Every read triggers `GET /v2/nodes/{id}/` and + returns `data.attributes` formatted as indented JSON. +- `.children/` lists child node IDs; each entry behaves like a project directory + with the same virtual structure. +- `.linked/` lists linked nodes; likewise they expose `.attributes.json`, + `.children`, `.linked`, and storages. + +## API Endpoints + +The filesystem relies on: + +- `GET /v2/nodes/{id}/` (nodes_read) for node attributes. +- `GET /v2/nodes/{id}/children/` (nodes_children_list) for child projects. +- `GET /v2/nodes/{id}/linked_nodes/` (collections_linked_nodes_list) for linked + projects. + +All collection requests apply `page[size]=100` to reduce page churn and follow +`links.next` until completion. + +## Testing + +Run the repository tests inside Docker. Supplying `RDM_NODE_ID` and `RDM_TOKEN` +allows the Docker-specific test to execute; omitting them results in a single +expected failure while the rest succeed. + +```bash +docker run --rm -v "$(pwd)":/code -w /code \ + -e RDM_NODE_ID=your_project_id \ + -e RDM_TOKEN=$RDM_TOKEN \ + rcosdp/cs-rdmfs py.test --cov +``` diff --git a/rdmfs/__main__.py b/rdmfs/__main__.py index 662269d..fd73622 100644 --- a/rdmfs/__main__.py +++ b/rdmfs/__main__.py @@ -1,6 +1,7 @@ from argparse import ArgumentParser import asyncio import logging +import os import grp import pwd import re @@ -45,8 +46,11 @@ def parse_args(): 'OSF_PASSWORD environment variable')) parser.add_argument('--base-url', default=None, help='OSF API URL (Default is https://api.osf.io/v2/)') - parser.add_argument('-p', '--project', default=None, - help='OSF project ID') + project_group = parser.add_mutually_exclusive_group() + project_group.add_argument('-p', '--project', default=None, + help='OSF project ID') + project_group.add_argument('--all-projects', action='store_true', default=False, + help='Mount all accessible projects under the root directory') parser.add_argument('--file-mode', default='0644', help='Mode of files. default: 0644') parser.add_argument('--dir-mode', default='0755', @@ -85,7 +89,17 @@ def main(): options = parse_args() init_logging(options.debug) + placeholder_project = None + if options.all_projects and options.project is None: + placeholder_project = '__all_projects__' + options.project = placeholder_project + osf = cli._setup_osf(options) + resolved_project = None if options.all_projects else options.project + + if placeholder_project is not None: + options.project = None + file_mode = parse_mode(options.file_mode) dir_mode = parse_mode(options.dir_mode) uid = parse_uid(options.owner) @@ -94,7 +108,10 @@ def main(): if options.writable_whitelist is not None: with open(options.writable_whitelist, 'r') as f: writable_whitelist = whitelist.Whitelist(f) - rdmfs = fs.RDMFileSystem(osf, options.project, + if not options.all_projects and resolved_project is None: + raise SystemExit('either --project or --all-projects must be specified') + rdmfs = fs.RDMFileSystem(osf, resolved_project, + list_all_projects=options.all_projects, file_mode=file_mode, dir_mode=dir_mode, uid=uid, gid=gid, writable_whitelist=writable_whitelist) diff --git a/rdmfs/fs.py b/rdmfs/fs.py index 1fb7f3a..0e8f33c 100644 --- a/rdmfs/fs.py +++ b/rdmfs/fs.py @@ -18,9 +18,10 @@ class RDMFileSystem(pyfuse3.Operations): def __init__(self, osf, project, dir_mode=0o755, file_mode=0o644, uid=None, gid=None, - writable_whitelist: Optional[Whitelist]=None): + writable_whitelist: Optional[Whitelist]=None, + list_all_projects: bool=False): super(RDMFileSystem, self).__init__() - self.inodes = Inodes(osf, project) + self.inodes = Inodes(osf, project, list_all_projects=list_all_projects) self.file_handlers = FileHandlers() self.dir_mode = dir_mode self.file_mode = file_mode @@ -36,7 +37,10 @@ async def getattr(self, inode_num, ctx=None): if inode is None: raise pyfuse3.FUSEError(errno.ENOENT) await inode.refresh(self.inodes) - if inode.has_children(): + if inode.is_symlink: + entry.st_mode = (stat.S_IFLNK | 0o777) + entry.st_size = len(inode.target) + elif inode.has_children(): entry.st_mode = (stat.S_IFDIR | self.dir_mode) entry.st_size = 0 else: @@ -49,15 +53,15 @@ async def getattr(self, inode_num, ctx=None): if self.writable_whitelist is not None and \ not self.writable_whitelist.includes(inode): entry.st_mode = entry.st_mode & (~0o200) - stamp = 0 - mstamp = stamp + ctime_stamp = 0 + mtime_stamp = 0 if inode.date_created is not None: - stamp = fromisoformat(inode.date_created) + ctime_stamp = fromisoformat(inode.date_created) if inode.date_modified is not None: - mstamp = fromisoformat(inode.date_modified) - entry.st_atime_ns = stamp - entry.st_ctime_ns = stamp - entry.st_mtime_ns = mstamp + mtime_stamp = fromisoformat(inode.date_modified) + entry.st_atime_ns = mtime_stamp + entry.st_ctime_ns = ctime_stamp + entry.st_mtime_ns = mtime_stamp entry.st_gid = self.gid entry.st_uid = self.uid entry.st_ino = inode.id @@ -101,6 +105,21 @@ async def lookup(self, parent_inode_num, bname, ctx=None): except BaseException as e: reraise_fuse_error(e) + async def readlink(self, inode_num, ctx): + log.info('readlink: inode={}'.format(inode_num)) + try: + inode = await self.inodes.get(inode_num) + if inode is None: + raise pyfuse3.FUSEError(errno.ENOENT) + if not inode.is_symlink: + raise pyfuse3.FUSEError(errno.EINVAL) + target = inode.target + return os.fsencode(target) + except pyfuse3.FUSEError as e: + raise e + except BaseException as e: + reraise_fuse_error(e) + async def opendir(self, inode_num, ctx): log.info('opendir: inode={inode}'.format(inode=inode_num)) try: @@ -147,6 +166,8 @@ async def open(self, inode_num, flags, ctx): inode = await self.inodes.get(inode_num) if inode is None: raise pyfuse3.FUSEError(errno.ENOENT) + if flags_can_write(flags) and getattr(inode, 'readonly', False): + raise pyfuse3.FUSEError(errno.EACCES) if flags_can_write(flags) and \ self.writable_whitelist is not None and \ not self.writable_whitelist.includes(inode): diff --git a/rdmfs/inode.py b/rdmfs/inode.py index c75a811..b4d0cce 100644 --- a/rdmfs/inode.py +++ b/rdmfs/inode.py @@ -4,6 +4,8 @@ import logging import errno import time +import inspect +from urllib.parse import urlparse, parse_qsl, urlencode, urlunparse from typing import Optional, Union, List, Dict, AsyncGenerator, Any import pyfuse3 @@ -15,6 +17,7 @@ log = logging.getLogger(__name__) FILE_ATTRIBUTE_CACHE_TTL = 60 # 1 minute LIST_CACHE_TTL = 180 # 3 minutes +NODE_PAGE_SIZE = 100 def fromisoformat(datestr): @@ -89,16 +92,33 @@ def can_create(self) -> bool: def can_move(self) -> bool: return False + @property + def is_symlink(self) -> bool: + return False + class ProjectInode(BaseInode): """The class for managing single project inode.""" - def __init__(self, id: int, project: Project): + def __init__( + self, + id: int, + project: Project, + parent: Optional[BaseInode] = None, + name: Optional[str] = None, + metadata: Optional[Dict[str, Any]] = None, + ): super(ProjectInode, self).__init__(id) self.project = project + self._parent = parent + default_name = getattr(project, 'id', None) + if default_name is None: + default_name = getattr(project, 'title', '') + self._name = str(name or default_name or '') + self.metadata: Dict[str, Any] = metadata or {} @property def parent(self) -> Optional[BaseInode]: - return None + return self._parent @property def storage(self): @@ -110,15 +130,359 @@ def object(self): @property def name(self): - return self.project.title + return self._name def has_children(self): return True @property def path(self): + if self.parent is not None: + return f'{self.parent.path}{self.name}/' return f'/{self.project.id}/' + @property + def display_path(self): + if self.parent is not None: + return f'{self.parent.display_path}{self.name}/' + return '/' + + def update_metadata(self, metadata: Dict[str, Any]): + self.metadata = metadata or {} + setattr(self.project, '_rdmfs_attributes', self.metadata) + + @property + def date_created(self) -> Optional[str]: + return self.metadata.get('date_created') + + @property + def date_modified(self) -> Optional[str]: + return self.metadata.get('date_modified') + + +class ProjectAttributesEntry: + """Virtual file representing project attributes as JSON.""" + + def __init__( + self, + project_inode: 'ProjectInode', + fetcher, + attributes: Optional[Dict[str, Any]] = None, + ): + self.project_inode = project_inode + self._fetcher = fetcher + self.attributes = attributes or {} + self.name = '.attributes.json' + + @property + def path(self) -> str: + return f'{self.project_inode.path}{self.name}' + + @property + def display_path(self) -> str: + return f'{self.project_inode.display_path}{self.name}' + + @property + def date_created(self) -> Optional[str]: + return self.attributes.get('date_created') + + @property + def date_modified(self) -> Optional[str]: + return self.attributes.get('date_modified') + + @property + def size(self) -> int: + text = json.dumps(self.attributes, sort_keys=True, indent=2) + return len(text.encode('utf-8')) + + async def write_to(self, fp): + attrs = await self._fetcher() + self.attributes = attrs or {} + self.project_inode.update_metadata(self.attributes) + text = json.dumps(self.attributes, sort_keys=True, indent=2) + data = text.encode('utf-8') + result = fp.write(data) + if inspect.isawaitable(result): + await result + + def invalidate(self): + self.attributes = self.project_inode.metadata + + +class ProjectAttributesInode(BaseInode): + """Inode exposing project attributes as a read-only JSON file.""" + + def __init__(self, id: int, project_inode: ProjectInode, content: ProjectAttributesEntry): + super(ProjectAttributesInode, self).__init__(id) + self.project_inode = project_inode + self._content = content + self.readonly = True + + def invalidate(self, name: Optional[str] = None): + self._content.invalidate() + + def set_content(self, content: ProjectAttributesEntry): + self._content = content + + @property + def parent(self) -> Optional[BaseInode]: + return self.project_inode + + @property + def storage(self): + raise ValueError('Metadata inode does not have storage') + + @property + def object(self): + return self._content + + @property + def name(self): + return self._content.name + + def set_content(self, content: ProjectAttributesEntry): + self._content = content + + def has_children(self): + return False + + @property + def path(self): + return self._content.path + + @property + def display_path(self): + return self._content.display_path + + @property + def size(self) -> Optional[int]: + return self._content.size + + @property + def date_created(self) -> Optional[str]: + return self._content.date_created + + @property + def date_modified(self) -> Optional[str]: + return self._content.date_modified + + @property + def can_move(self) -> bool: + return False + + +class ProjectChildrenEntry: + """Virtual directory exposing project child nodes.""" + + def __init__(self, project_inode: 'ProjectInode'): + self.project_inode = project_inode + self.name = '.children' + + @property + def path(self) -> str: + return f'{self.project_inode.path}{self.name}/' + + @property + def display_path(self) -> str: + return f'{self.project_inode.display_path}{self.name}/' + + +class ProjectChildrenInode(BaseInode): + """Inode representing the `.children` directory for a project.""" + + def __init__(self, id: int, project_inode: ProjectInode): + super(ProjectChildrenInode, self).__init__(id) + self.project_inode = project_inode + + @property + def parent(self) -> Optional[BaseInode]: + return self.project_inode + + @property + def storage(self): + raise ValueError('Children inode does not have storage') + + @property + def object(self): + return self + + @property + def name(self) -> str: + return '.children' + + def has_children(self) -> bool: + return True + + def invalidate(self, name: Optional[str] = None): + pass + + @property + def path(self) -> str: + return f'{self.parent.path}{self.name}/' + + @property + def display_path(self) -> str: + return f'{self.parent.display_path}{self.name}/' + + +class ProjectLinkedEntry: + """Virtual directory exposing project linked nodes.""" + + def __init__(self, project_inode: 'ProjectInode'): + self.project_inode = project_inode + self.name = '.linked' + + @property + def path(self) -> str: + return f'{self.project_inode.path}{self.name}/' + + @property + def display_path(self) -> str: + return f'{self.project_inode.display_path}{self.name}/' + + +class ProjectLinkedInode(BaseInode): + """Inode representing the `.linked` directory for a project.""" + + def __init__(self, id: int, project_inode: ProjectInode): + super(ProjectLinkedInode, self).__init__(id) + self.project_inode = project_inode + + @property + def parent(self) -> Optional[BaseInode]: + return self.project_inode + + @property + def storage(self): + raise ValueError('Linked inode does not have storage') + + @property + def object(self): + return self + + @property + def name(self) -> str: + return '.linked' + + def has_children(self) -> bool: + return True + + def invalidate(self, name: Optional[str] = None): + pass + + @property + def path(self) -> str: + return f'{self.parent.path}{self.name}/' + + @property + def display_path(self) -> str: + return f'{self.parent.display_path}{self.name}/' + + +class ProjectSymlinkEntry: + """Symlink entry pointing to a child or linked project.""" + + def __init__(self, parent_inode: BaseInode, target_project: Project): + self.parent_inode = parent_inode + self.target_project = target_project + self._name = getattr(target_project, 'id', None) + + @property + def name(self) -> str: + return self._name + + @property + def path(self) -> str: + return f'{self.parent_inode.path}{self.name}' + + @property + def display_path(self) -> str: + return f'{self.parent_inode.display_path}{self.name}' + + @property + def target(self) -> str: + """Calculate relative path to target project.""" + target_id = getattr(self.target_project, 'id', None) + return f'../../{target_id}/' + + +class SymlinkInode(BaseInode): + """Inode representing a symbolic link to a project.""" + + def __init__(self, id: int, parent: BaseInode, entry: ProjectSymlinkEntry): + super(SymlinkInode, self).__init__(id) + self._parent = parent + self._entry = entry + + @property + def parent(self) -> Optional[BaseInode]: + return self._parent + + @property + def storage(self): + raise ValueError('Symlink inode does not have storage') + + @property + def object(self): + return self._entry + + @property + def name(self) -> str: + return self._entry.name + + def has_children(self) -> bool: + return False + + @property + def path(self) -> str: + return self._entry.path + + @property + def display_path(self) -> str: + return self._entry.display_path + + @property + def target(self) -> str: + """Return the symlink target path.""" + return self._entry.target + + @property + def is_symlink(self) -> bool: + return True + + @property + def can_move(self) -> bool: + return False + + +class ProjectsRootInode(BaseInode): + """The virtual root inode when mounting all accessible projects.""" + def __init__(self, id: int): + super(ProjectsRootInode, self).__init__(id) + + @property + def parent(self) -> Optional[BaseInode]: + return None + + @property + def storage(self) -> 'StorageInode': + raise ValueError('Root inode does not have storage') + + @property + def object(self): + return None + + @property + def name(self): + return '' + + def has_children(self): + return True + + @property + def path(self): + return '/' + @property def display_path(self): return '/' @@ -404,26 +768,36 @@ class Inodes: """The class for managing multiple inodes.""" INODE_DUMMY = -1 osf: OSF - project: str + project: Optional[str] + list_all_projects: bool osfproject: Optional[Project] _inodes: Dict[int, BaseInode] _child_relations: Cache - def __init__(self, osf: OSF, project: str): + def __init__(self, osf: OSF, project: Optional[str], list_all_projects: bool=False): """Initialize Inodes object.""" super(Inodes, self).__init__() self.osf = osf self.project = project + self.list_all_projects = list_all_projects self.osfproject = None self.offset_inode = pyfuse3.ROOT_INODE + 1 self._inodes = {} self._child_relations = Cache(maxsize=256, ttl=LIST_CACHE_TTL, timer=time.time, default=None) + self._projects_cache: Optional[List[Project]] = None + self._projects_cache_loaded_at = 0.0 + self._project_children_cache = Cache(maxsize=256, ttl=LIST_CACHE_TTL, timer=time.time, default=None) + self._project_linked_cache = Cache(maxsize=256, ttl=LIST_CACHE_TTL, timer=time.time, default=None) async def _get_osfproject(self): """Get OSF project object.""" if self.osfproject is not None: return self.osfproject + if self.project is None: + raise ValueError('Project ID is not specified') self.osfproject = await self.osf.project(self.project) + metadata = await self._fetch_node_attributes(self.osfproject.id) + setattr(self.osfproject, '_rdmfs_attributes', metadata) return self.osfproject async def register(self, parent_inode: BaseInode, name: str): @@ -451,8 +825,12 @@ async def get(self, inode_num: int) -> Optional[BaseInode]: if inode_num in self._inodes: return self._inodes[inode_num] if inode_num == pyfuse3.ROOT_INODE: - project = await self._get_osfproject() - inode = ProjectInode(pyfuse3.ROOT_INODE, project) + if self.list_all_projects: + inode = ProjectsRootInode(pyfuse3.ROOT_INODE) + else: + project = await self._get_osfproject() + metadata = getattr(project, '_rdmfs_attributes', None) + inode = ProjectInode(pyfuse3.ROOT_INODE, project, metadata=metadata) self._inodes[inode_num] = inode return inode return None @@ -493,21 +871,53 @@ async def get_children_of(self, parent: BaseInode) -> AsyncGenerator[Union[Stora log.debug(f'get_children_of: parent={parent}') if not parent.has_children(): raise pyfuse3.FUSEError(errno.ENOTDIR) + if isinstance(parent, ProjectsRootInode): + for project in await self._list_projects(): + yield project + return if isinstance(parent, ProjectInode): project = parent.object + initial_attributes = getattr(project, '_rdmfs_attributes', None) or parent.metadata + if initial_attributes: + parent.update_metadata(initial_attributes) + + async def fetch_metadata(): + attrs = await self._fetch_node_attributes(project.id) + parent.update_metadata(attrs) + setattr(project, '_rdmfs_attributes', attrs) + return attrs + + yield ProjectAttributesEntry(parent, fetch_metadata, initial_attributes) + if self.list_all_projects: + yield ProjectChildrenEntry(parent) + yield ProjectLinkedEntry(parent) async for storage in project.storages: yield storage return + if isinstance(parent, ProjectChildrenInode): + project = parent.project_inode.object + for child in await self._list_child_projects(project.id): + yield ProjectSymlinkEntry(parent, child) + return + if isinstance(parent, ProjectLinkedInode): + project = parent.project_inode.object + for linked in await self._list_linked_projects(project.id): + yield ProjectSymlinkEntry(parent, linked) + return async for child in parent.object.children: yield child - async def _get_object_inode(self, parent: BaseInode, object: Union[Storage, File, Folder]) -> BaseInode: + async def _get_object_inode(self, parent: BaseInode, object: Union[Project, Storage, File, Folder, ProjectAttributesEntry, ProjectChildrenEntry, ProjectLinkedEntry, ProjectSymlinkEntry]) -> BaseInode: """Get inode for the object.""" dummy_inode = self._create_object_inode(self.INODE_DUMMY, parent, object) for inode in self._inodes.values(): if inode.removed: continue if inode.path == dummy_inode.path: + if isinstance(inode, ProjectAttributesInode) and isinstance(dummy_inode, ProjectAttributesInode): + inode.project_inode.update_metadata(dummy_inode.object.attributes) + inode.set_content(dummy_inode.object) + inode.invalidate() return inode new_file_inode = await self._find_new_file_by_name(parent, dummy_inode.name) if new_file_inode is not None: @@ -524,8 +934,19 @@ async def _get_object_inode(self, parent: BaseInode, object: Union[Storage, File log.debug(f'new inode: inode={r}') return r - def _create_object_inode(self, inode_num: int, parent: BaseInode, object: Union[Storage, File, Folder]) -> BaseInode: + def _create_object_inode(self, inode_num: int, parent: BaseInode, object: Union[Project, Storage, File, Folder, ProjectAttributesEntry, ProjectChildrenEntry, ProjectLinkedEntry, ProjectSymlinkEntry]) -> BaseInode: """Create inode object for the object.""" + if isinstance(object, Project): + metadata = getattr(object, '_rdmfs_attributes', None) + return ProjectInode(inode_num, object, parent, metadata=metadata) + if isinstance(object, ProjectAttributesEntry): + return ProjectAttributesInode(inode_num, object.project_inode, object) + if isinstance(object, ProjectChildrenEntry): + return ProjectChildrenInode(inode_num, object.project_inode) + if isinstance(object, ProjectLinkedEntry): + return ProjectLinkedInode(inode_num, object.project_inode) + if isinstance(object, ProjectSymlinkEntry): + return SymlinkInode(inode_num, parent, object) if isinstance(object, Storage): return StorageInode(inode_num, parent, object) if isinstance(object, Folder): @@ -549,3 +970,146 @@ async def _find_new_file_by_name(self, parent: BaseInode, name: str) -> Optional log.debug(f'Failed to refresh: {inode}', exc_info=True) return inode return None + + async def _list_projects(self) -> List[Project]: + """Fetch and cache projects available to the authenticated user.""" + if not self.list_all_projects: + raise ValueError('Listing projects is only available when mounting all projects') + + now = time.time() + if self._projects_cache is not None and (now - self._projects_cache_loaded_at) < LIST_CACHE_TTL: + return self._projects_cache + + url = self.osf._build_url('users', 'me', 'nodes') + projects: Dict[str, Project] = {} + async for node in self._paginate_nodes(url, page_size=NODE_PAGE_SIZE): + project = self._build_project_from_node(node) + if project is None: + continue + project_id = getattr(project, 'id', None) + if not project_id: + continue + attributes = node.get('attributes', {}) or {} + setattr(project, '_rdmfs_attributes', attributes) + if 'title' not in attributes and hasattr(project, 'title'): + attributes['title'] = project.title + projects[project_id] = project + + ordered = sorted(projects.values(), key=lambda p: getattr(p, 'id', '')) + self._projects_cache = ordered + self._projects_cache_loaded_at = now + return ordered + + async def _list_child_projects(self, project_id: str) -> List[Project]: + cached = self._project_children_cache.get(project_id) + if cached is not None: + return cached + url = self.osf._build_url('nodes', project_id, 'children') + children: Dict[str, Project] = {} + async for node in self._paginate_nodes(url, page_size=NODE_PAGE_SIZE): + project = self._build_project_from_node(node) + if project is None: + continue + child_id = getattr(project, 'id', None) + if not child_id: + continue + attributes = node.get('attributes', {}) or {} + setattr(project, '_rdmfs_attributes', attributes) + children[child_id] = project + ordered = sorted(children.values(), key=lambda p: getattr(p, 'id', '')) + self._project_children_cache.set(project_id, ordered) + return ordered + + async def _list_linked_projects(self, project_id: str) -> List[Project]: + cached = self._project_linked_cache.get(project_id) + if cached is not None: + return cached + url = self.osf._build_url('nodes', project_id, 'linked_nodes') + linked: Dict[str, Project] = {} + async for node in self._paginate_nodes(url, page_size=NODE_PAGE_SIZE): + project = self._build_project_from_node(node) + if project is None: + continue + linked_id = getattr(project, 'id', None) + if not linked_id: + continue + attributes = node.get('attributes', {}) or {} + setattr(project, '_rdmfs_attributes', attributes) + linked[linked_id] = project + ordered = sorted(linked.values(), key=lambda p: getattr(p, 'id', '')) + self._project_linked_cache.set(project_id, ordered) + return ordered + + async def _fetch_node_attributes(self, project_id: str) -> Dict[str, Any]: + url = self.osf._build_url('nodes', project_id) + response = await self.osf._get(url) + payload = self.osf._json(response, 200) + data = payload.get('data', {}) or {} + attributes = data.get('attributes', {}) or {} + return attributes + + def _build_project_from_node(self, node: Dict[str, Any]) -> Optional[Project]: + if not isinstance(node, dict): + return None + if node.get('type') != 'nodes': + return None + attributes = node.get('attributes', {}) + # Skip registrations because they are read-only and do not expose files in the same way + if attributes.get('registration'): + return None + node_id = node.get('id') + if not node_id: + return None + related = ( + node.get('relationships', {}) + .get('files', {}) + .get('links', {}) + .get('related', {}) + .get('href') + ) + if not related: + related = self.osf._build_url('nodes', node_id, 'files') + project_payload = { + 'data': { + 'id': node_id, + 'relationships': { + 'files': { + 'links': { + 'related': { + 'href': related + } + } + } + } + } + } + project = Project(project_payload, self.osf.session) + title = attributes.get('title') + if title is not None: + setattr(project, 'title', title) + setattr(project, 'name', node_id) + setattr(project, '_rdmfs_attributes', attributes) + return project + + async def _paginate_nodes(self, url: str, page_size: Optional[int] = None) -> AsyncGenerator[Dict[str, Any], None]: + """Iterate through paginated OSF node listings following `links.next`.""" + next_url = self._with_page_size(url, page_size) if page_size is not None else url + visited: set[str] = set() + while next_url: + response = await self.osf._get(next_url) + payload = self.osf._json(response, 200) + for node in payload.get('data', []) or []: + yield node + links = payload.get('links') or {} + next_link = links.get('next') + if not next_link or next_link in visited: + break + visited.add(next_url) + next_url = next_link + + def _with_page_size(self, url: str, page_size: int) -> str: + parsed = urlparse(url) + query = dict(parse_qsl(parsed.query, keep_blank_values=True)) + query['page[size]'] = str(page_size) + new_query = urlencode(query, doseq=True) + return urlunparse(parsed._replace(query=new_query)) diff --git a/rdmfs/tests/conftest.py b/rdmfs/tests/conftest.py index 4e05823..4ed3e06 100644 --- a/rdmfs/tests/conftest.py +++ b/rdmfs/tests/conftest.py @@ -7,6 +7,9 @@ def rdm_storage(): @pytest.fixture def docker_container(): + if os.getenv("SKIP_TEST_DOCKER"): + pytest.skip("SKIP_TEST_DOCKER is set.") + # Retrieve RDM_NODE_ID and RDM_TOKEN from environment variables rdm_node_id = os.getenv("RDM_NODE_ID", "") rdm_token = os.getenv("RDM_TOKEN", "") diff --git a/rdmfs/tests/mocks.py b/rdmfs/tests/mocks.py index d8a60db..250c154 100644 --- a/rdmfs/tests/mocks.py +++ b/rdmfs/tests/mocks.py @@ -109,6 +109,12 @@ def MockProject(name): return_value=FutureMockStorage('osfstorage')) type(mock).storage = storage mock._storage_mock = storage + project_id = PropertyMock(return_value=name) + type(mock).id = project_id + mock._id_mock = project_id + project_title = PropertyMock(return_value=name) + type(mock).title = project_title + mock._title_mock = project_title return mock diff --git a/rdmfs/tests/test_inode.py b/rdmfs/tests/test_inode.py index 355833f..1ed400f 100644 --- a/rdmfs/tests/test_inode.py +++ b/rdmfs/tests/test_inode.py @@ -1,9 +1,12 @@ +import io +import json + import pytest -from mock import MagicMock, patch +from mock import MagicMock, patch, AsyncMock import pyfuse3 -from rdmfs.inode import Inodes, FolderInode, FileInode, StorageInode +from rdmfs.inode import Inodes, FolderInode, FileInode, StorageInode, ProjectsRootInode from .mocks import FutureWrapper, MockProject @@ -13,6 +16,18 @@ async def test_find_by_name(mock_create_object_inode): MockOSF = MagicMock() MockOSF.project = MagicMock(side_effect=lambda p: FutureWrapper(MockProject(p))) MockOSF.aclose = lambda: FutureWrapper() + MockOSF._build_url = MagicMock(side_effect=lambda *parts: 'https://api.test/' + '/'.join(parts) + '/') + metadata_response = MagicMock() + metadata_response.status_code = 200 + metadata_response.json.return_value = { + 'data': { + 'attributes': { + 'title': 'Project Test', + } + } + } + MockOSF._get = AsyncMock(return_value=metadata_response) + MockOSF._json = MagicMock(side_effect=lambda resp, status: resp.json()) def _create_object_inode(i, p, o): if o.name.startswith('Folder-'): return FolderInode(i, p, o) @@ -35,3 +50,289 @@ def _create_object_inode(i, p, o): file_inode = await inodes.find_by_name(sub_folder_inode, 'b') assert file_inode.name == 'b' + + +@pytest.mark.asyncio +async def test_list_all_projects_creates_virtual_root(): + mock_osf = MagicMock() + mock_osf.session = MagicMock() + mock_osf._build_url = MagicMock(side_effect=lambda *parts: 'https://api.test/' + '/'.join(parts) + '/') + node_payload = { + 'id': 'proj1', + 'type': 'nodes', + 'attributes': { + 'title': 'Project One', + 'registration': False, + }, + 'relationships': { + 'files': { + 'links': { + 'related': { + 'href': 'https://api.test/nodes/proj1/files/' + } + } + } + } + } + response_payload = { + 'data': [node_payload], + 'links': {'next': None}, + } + metadata_payload = { + 'data': { + 'attributes': { + 'title': 'Project One (updated)', + 'date_created': '2020-01-01T00:00:00Z', + } + } + } + children_payload = { + 'data': [ + { + 'id': 'child1', + 'type': 'nodes', + 'attributes': { + 'title': 'Child Project', + 'registration': False, + }, + 'relationships': { + 'files': { + 'links': { + 'related': { + 'href': 'https://api.test/nodes/child1/files/' + } + } + } + } + } + ], + 'links': {'next': None}, + } + child_metadata_payload = { + 'data': { + 'attributes': { + 'title': 'Child Project Latest', + } + } + } + linked_payload = { + 'data': [ + { + 'id': 'linked1', + 'type': 'nodes', + 'attributes': { + 'title': 'Linked Project', + 'registration': False, + }, + 'relationships': { + 'files': { + 'links': { + 'related': { + 'href': 'https://api.test/nodes/linked1/files/' + } + } + } + } + } + ], + 'links': {'next': None}, + } + linked_metadata_payload = { + 'data': { + 'attributes': { + 'title': 'Linked Project Latest', + } + } + } + response_list = MagicMock() + response_list.status_code = 200 + response_list.json.return_value = response_payload + response_meta = MagicMock() + response_meta.status_code = 200 + response_meta.json.return_value = metadata_payload + response_children = MagicMock() + response_children.status_code = 200 + response_children.json.return_value = children_payload + response_child_meta = MagicMock() + response_child_meta.status_code = 200 + response_child_meta.json.return_value = child_metadata_payload + response_linked = MagicMock() + response_linked.status_code = 200 + response_linked.json.return_value = linked_payload + response_linked_meta = MagicMock() + response_linked_meta.status_code = 200 + response_linked_meta.json.return_value = linked_metadata_payload + storage_response = MagicMock() + storage_response.status_code = 200 + storage_response.json.return_value = {'data': []} + mock_osf.session.get = AsyncMock(return_value=storage_response) + + responses = [ + response_list, + response_meta, + response_children, + response_linked, + ] + response_index = [0] + async def get_response(*args, **kwargs): + if response_index[0] < len(responses): + resp = responses[response_index[0]] + response_index[0] += 1 + return resp + # Default response for any additional calls + default_resp = MagicMock() + default_resp.status_code = 200 + default_resp.json.return_value = {'data': [], 'links': {'next': None}} + return default_resp + + mock_osf._get = AsyncMock(side_effect=get_response) + mock_osf._json = MagicMock(side_effect=lambda resp, status: resp.json()) + + inodes = Inodes(mock_osf, None, list_all_projects=True) + root_inode = await inodes.get(pyfuse3.ROOT_INODE) + + assert isinstance(root_inode, ProjectsRootInode) + + project_inode = await inodes.find_by_name(root_inode, 'proj1') + assert project_inode is not None + assert project_inode.name == 'proj1' + assert project_inode.display_path == '/proj1/' + + # second call should use cache + metadata_inode = await inodes.find_by_name(project_inode, '.attributes.json') + assert metadata_inode.name == '.attributes.json' + assert metadata_inode.readonly + + buffer = io.BytesIO() + await metadata_inode.object.write_to(buffer) + payload = json.loads(buffer.getvalue().decode('utf-8')) + assert payload['title'] == 'Project One (updated)' + assert metadata_inode.object.attributes['title'] == 'Project One (updated)' + + children_dir = await inodes.find_by_name(project_inode, '.children') + assert children_dir.name == '.children' + + child_symlink = await inodes.find_by_name(children_dir, 'child1') + assert child_symlink.name == 'child1' + assert child_symlink.is_symlink + assert child_symlink.target == '../../child1/' + + linked_dir = await inodes.find_by_name(project_inode, '.linked') + assert linked_dir.name == '.linked' + + linked_symlink = await inodes.find_by_name(linked_dir, 'linked1') + assert linked_symlink is not None + assert linked_symlink.name == 'linked1' + assert linked_symlink.is_symlink + assert linked_symlink.target == '../../linked1/' + + project_inode_again = await inodes.find_by_name(root_inode, 'proj1') + assert project_inode_again.id == project_inode.id + + +@pytest.mark.asyncio +async def test_list_all_projects_handles_pagination(): + mock_osf = MagicMock() + mock_osf.session = MagicMock() + mock_osf._build_url = MagicMock(return_value='https://api.test/users/me/nodes/') + + node_payload_page1 = { + 'id': 'proj1', + 'type': 'nodes', + 'attributes': { + 'title': 'Project One', + 'registration': False, + }, + 'relationships': { + 'files': { + 'links': { + 'related': { + 'href': 'https://api.test/nodes/proj1/files/' + } + } + } + } + } + node_payload_page2 = { + 'id': 'proj2', + 'type': 'nodes', + 'attributes': { + 'title': 'Project Two', + 'registration': False, + }, + 'relationships': { + 'files': { + 'links': { + 'related': { + 'href': 'https://api.test/nodes/proj2/files/' + } + } + } + } + } + + page1_payload = { + 'data': [node_payload_page1], + 'links': {'next': 'https://api.test/users/me/nodes/?page=2'}, + } + page2_payload = { + 'data': [node_payload_page2], + 'links': {'next': None}, + } + + metadata_payload_proj1 = { + 'data': { + 'attributes': { + 'title': 'Project One latest', + } + } + } + metadata_payload_proj2 = { + 'data': { + 'attributes': { + 'title': 'Project Two latest', + } + } + } + + response_page1 = MagicMock() + response_page1.status_code = 200 + response_page1.json.return_value = page1_payload + response_page2 = MagicMock() + response_page2.status_code = 200 + response_page2.json.return_value = page2_payload + response_meta1 = MagicMock() + response_meta1.status_code = 200 + response_meta1.json.return_value = metadata_payload_proj1 + response_meta2 = MagicMock() + response_meta2.status_code = 200 + response_meta2.json.return_value = metadata_payload_proj2 + + storage_response = MagicMock() + storage_response.status_code = 200 + storage_response.json.return_value = {'data': []} + mock_osf.session.get = AsyncMock(return_value=storage_response) + mock_osf._get = AsyncMock(side_effect=[response_page1, response_page2, response_meta1, response_meta2]) + mock_osf._json = MagicMock(side_effect=lambda resp, status: resp.json()) + + inodes = Inodes(mock_osf, None, list_all_projects=True) + root_inode = await inodes.get(pyfuse3.ROOT_INODE) + + first_project = await inodes.find_by_name(root_inode, 'proj1') + assert first_project is not None + + second_project = await inodes.find_by_name(root_inode, 'proj2') + assert second_project is not None + + metadata_first = await inodes.find_by_name(first_project, '.attributes.json') + metadata_second = await inodes.find_by_name(second_project, '.attributes.json') + + buffer1 = io.BytesIO() + buffer2 = io.BytesIO() + await metadata_first.object.write_to(buffer1) + await metadata_second.object.write_to(buffer2) + + assert json.loads(buffer1.getvalue().decode('utf-8'))['title'] == 'Project One latest' + assert json.loads(buffer2.getvalue().decode('utf-8'))['title'] == 'Project Two latest' + + assert mock_osf._get.await_count == 4 diff --git a/rdmfs/tests/test_node.py b/rdmfs/tests/test_node.py index f20cd79..b5d255a 100644 --- a/rdmfs/tests/test_node.py +++ b/rdmfs/tests/test_node.py @@ -3,8 +3,20 @@ import pyfuse3 -from rdmfs.inode import Inodes, FolderInode, FileInode, StorageInode -from .mocks import FutureWrapper, MockProject, MockFolder +from rdmfs.inode import ( + Inodes, + ProjectInode, + FolderInode, + FileInode, + StorageInode, + ProjectAttributesInode, + ProjectAttributesEntry, + ProjectChildrenInode, + ProjectChildrenEntry, + ProjectLinkedInode, + ProjectLinkedEntry, +) +from .mocks import FutureWrapper, MockProject, MockFolder, AsyncIterator from rdmfs.node import FileContext @@ -18,14 +30,47 @@ async def test_readdir_from_project( MockOSF = MagicMock() MockOSF.project = MagicMock(side_effect=lambda p: FutureWrapper(MockProject(p))) MockOSF.aclose = lambda: FutureWrapper() + MockOSF._build_url = MagicMock(side_effect=lambda *parts: 'https://api.test/' + '/'.join(parts) + '/') + metadata_response = MagicMock() + metadata_response.status_code = 200 + metadata_response.json.return_value = { + 'data': { + 'attributes': { + 'title': 'Project Test', + } + } + } + MockOSF._get = AsyncMock(return_value=metadata_response) + MockOSF._json = MagicMock(side_effect=lambda resp, status: resp.json()) def _create_object_inode(i, p, o): + if isinstance(o, ProjectAttributesEntry): + return ProjectAttributesInode(i, o.project_inode, o) + if isinstance(o, ProjectChildrenEntry): + return ProjectChildrenInode(i, o.project_inode) + if isinstance(o, ProjectLinkedEntry): + return ProjectLinkedInode(i, o.project_inode) if o.name.startswith('Folder-'): return FolderInode(i, p, o) if o.name.startswith('File-'): return FileInode(i, p, o) return StorageInode(i, p, o) mock_create_object_inode.side_effect = _create_object_inode + async def _metadata_fetcher(): + return {} + def _get_children_of(p): + if isinstance(p, ProjectChildrenInode): + return AsyncIterator([]) + if isinstance(p, ProjectLinkedInode): + return AsyncIterator([]) + if isinstance(p, ProjectInode): + metadata = ProjectAttributesEntry(p, _metadata_fetcher, {}) + children = ProjectChildrenEntry(p) + linked = ProjectLinkedEntry(p) + storages = [] + if hasattr(p.object, 'storages'): + storages = list(p.object.storages.__aiter__.return_value) + return AsyncIterator([metadata, children, linked, *storages]) return p.object.storages mock_get_children_of.side_effect = _get_children_of @@ -41,7 +86,7 @@ def _get_children_of(p): await fc.readdir(0, 'token_a') mock_readdir_reply.assert_called_once_with( - 'token_a', b'osfstorage', { + 'token_a', b'.attributes.json', { 'text': 'test metadata' }, 1 ) @@ -50,10 +95,37 @@ def _get_children_of(p): await fc.readdir(1, 'token_b') mock_readdir_reply.assert_called_once_with( - 'token_b', b'gh', { + 'token_b', b'.children', { 'text': 'test metadata' }, 2 ) + mock_readdir_reply.reset_mock() + + await fc.readdir(2, 'token_c') + + mock_readdir_reply.assert_called_once_with( + 'token_c', b'.linked', { + 'text': 'test metadata' + }, 3 + ) + mock_readdir_reply.reset_mock() + + await fc.readdir(3, 'token_d') + + mock_readdir_reply.assert_called_once_with( + 'token_d', b'osfstorage', { + 'text': 'test metadata' + }, 4 + ) + mock_readdir_reply.reset_mock() + + await fc.readdir(4, 'token_e') + + mock_readdir_reply.assert_called_once_with( + 'token_e', b'gh', { + 'text': 'test metadata' + }, 5 + ) @pytest.mark.asyncio @@ -65,7 +137,25 @@ async def test_readdir_from_storage( MockOSF = MagicMock() MockOSF.project = MagicMock(side_effect=lambda p: FutureWrapper(MockProject(p))) MockOSF.aclose = lambda: FutureWrapper() + MockOSF._build_url = MagicMock(side_effect=lambda *parts: 'https://api.test/' + '/'.join(parts) + '/') + metadata_response = MagicMock() + metadata_response.status_code = 200 + metadata_response.json.return_value = { + 'data': { + 'attributes': { + 'title': 'Project Test', + } + } + } + MockOSF._get = AsyncMock(return_value=metadata_response) + MockOSF._json = MagicMock(side_effect=lambda resp, status: resp.json()) def _create_object_inode(i, p, o): + if isinstance(o, ProjectAttributesEntry): + return ProjectAttributesInode(i, o.project_inode, o) + if isinstance(o, ProjectChildrenEntry): + return ProjectChildrenInode(i, o.project_inode) + if isinstance(o, ProjectLinkedEntry): + return ProjectLinkedInode(i, o.project_inode) if o.name.startswith('Folder-'): return FolderInode(i, p, o) if o.name.startswith('File-'): @@ -109,3 +199,52 @@ def _create_object_inode(i, p, o): 'text': 'test metadata' }, 3 ) + + +@pytest.mark.asyncio +@patch.object(pyfuse3, 'readdir_reply') +async def test_readdir_from_all_projects_root(mock_readdir_reply): + mock_osf = MagicMock() + mock_osf.session = MagicMock() + mock_osf._build_url = MagicMock(side_effect=lambda *parts: 'https://api.test/' + '/'.join(parts) + '/') + node_payload = { + 'id': 'proj1', + 'type': 'nodes', + 'attributes': { + 'title': 'Project One', + 'registration': False, + }, + 'relationships': { + 'files': { + 'links': { + 'related': { + 'href': 'https://api.test/nodes/proj1/files/' + } + } + } + } + } + response_payload = { + 'data': [node_payload], + 'links': {'next': None}, + } + response = MagicMock() + response.status_code = 200 + response.json.return_value = response_payload + mock_osf._get = AsyncMock(side_effect=[response]) + mock_osf._json = MagicMock(side_effect=lambda resp, status: resp.json()) + + inodes = Inodes(mock_osf, None, list_all_projects=True) + root_inode = await inodes.get(pyfuse3.ROOT_INODE) + + context = MagicMock() + context.inodes = inodes + context.getattr = AsyncMock(return_value={'text': 'meta'}) + + fc = FileContext(context, root_inode) + await fc.readdir(0, 'token_proj') + + mock_readdir_reply.assert_called_once_with( + 'token_proj', b'proj1', {'text': 'meta'}, 1 + ) + mock_osf._get.assert_awaited_once()