#!python
# Copyright 2019-2026 DADoES, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the root directory in the "LICENSE" file or at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Rendered.ai CLI - A comprehensive command-line interface for the Rendered.ai Platform.

This CLI provides JSON output for all commands, making it suitable for automation
and integration with AI agents.

Environment Variables:
    RENDEREDAI_API_KEY      - API key for authentication
    RENDEREDAI_ENVIRONMENT  - Environment: prod, test, or dev (default: prod)
    RENDEREDAI_ENDPOINT     - Custom API endpoint URL

Usage:
    renderedai <resource> <action> [options]

Examples:
    renderedai workspaces get --orgid abc123
    renderedai datasets get --workspaceid xyz789 --limit 10
    renderedai volumes create --name "My Volume" --orgid abc123
    renderedai graphs get --workspaceid xyz789 --graphid graph123
"""

import argparse
import json
import os
import sys
from typing import Any, Dict, List, Optional

import yaml


def get_client():
    """Initialize and return the anatools client."""
    import anatools

    api_key = os.environ.get('RENDEREDAI_API_KEY')
    environment = os.environ.get('RENDEREDAI_ENVIRONMENT', 'prod')
    endpoint = os.environ.get('RENDEREDAI_ENDPOINT')

    if not api_key and not endpoint:
        output_error("Authentication required. Set RENDEREDAI_API_KEY environment variable.", "AUTH_REQUIRED")
        sys.exit(1)

    try:
        client = anatools.client(
            APIKey=api_key,
            environment=environment,
            endpoint=endpoint,
            interactive=False,
            verbose=None
        )
        return client
    except Exception as e:
        output_error(f"Authentication failed: {str(e)}")
        sys.exit(1)


def output_json(data: Any, pretty: bool = True):
    """Output data as JSON."""
    if pretty:
        print(json.dumps(data, indent=2, default=str))
    else:
        print(json.dumps(data, default=str))


def output_error(message: str, code: str = "ERROR"):
    """Output an error message as JSON."""
    output_json({"error": code, "message": message})


def require_arg(args, name: str, display_name: str) -> str:
    """Get a required argument, exit if not provided."""
    value = getattr(args, name, None)
    if not value:
        output_error(f"{display_name} is required. Use --{name}", f"MISSING_{name.upper()}")
        sys.exit(1)
    return value


def parse_json_arg(value: str) -> Any:
    """Parse a JSON string argument."""
    try:
        return json.loads(value)
    except json.JSONDecodeError as e:
        output_error(f"Invalid JSON: {str(e)}")
        sys.exit(1)


def parse_list_arg(value: str) -> List[str]:
    """Parse a comma-separated list argument."""
    if not value:
        return []
    return [item.strip() for item in value.split(',')]


def load_graph_file(filepath: str) -> Optional[Dict[str, Any]]:
    """Load a graph from a YAML or JSON file.

    Args:
        filepath: Path to the graph file (.yaml, .yml, or .json)

    Returns:
        Dict containing the graph data, or None if loading failed
    """
    if not os.path.exists(filepath):
        output_error(f"File not found: {filepath}", "FILE_NOT_FOUND")
        return None

    try:
        with open(filepath, 'r') as f:
            if filepath.endswith('.json'):
                return json.load(f)
            else:  # .yaml or .yml
                return yaml.safe_load(f)
    except (json.JSONDecodeError, yaml.YAMLError) as e:
        output_error(f"Failed to parse file: {str(e)}", "PARSE_ERROR")
        return None
    except Exception as e:
        output_error(f"Failed to read file: {str(e)}", "READ_ERROR")
        return None


def save_graph_file(filepath: str, data: Dict[str, Any]) -> bool:
    """Save a graph to a YAML or JSON file.

    Args:
        filepath: Path to save the graph file (.yaml, .yml, or .json)
        data: Graph data to save

    Returns:
        True if save succeeded, False otherwise
    """
    try:
        with open(filepath, 'w') as f:
            if filepath.endswith('.json'):
                json.dump(data, f, indent=2)
            else:  # .yaml or .yml
                yaml.dump(data, f, default_flow_style=False, sort_keys=False, allow_unicode=True)
        return True
    except Exception as e:
        output_error(f"Failed to save file: {str(e)}", "WRITE_ERROR")
        return False


# =============================================================================
# WORKSPACES
# =============================================================================

def cmd_workspaces_get(args):
    """Get workspaces."""
    client = get_client()

    result = client.get_workspaces(
        workspaceId=args.workspaceid,
        organizationId=args.orgid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_workspaces_create(args):
    """Create a workspace."""
    client = get_client()
    org_id = require_arg(args, 'orgid', 'Organization ID')

    result = client.create_workspace(
        name=args.name,
        description=args.description or '',
        organizationId=org_id,
        channelIds=parse_list_arg(args.channelids) if args.channelids else [],
        volumeIds=parse_list_arg(args.volumeids) if args.volumeids else [],
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"workspaceId": result})


def cmd_workspaces_edit(args):
    """Edit a workspace."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.edit_workspace(
        workspaceId=workspace_id,
        name=args.name,
        description=args.description,
        channelIds=parse_list_arg(args.channelids) if args.channelids else None,
        volumeIds=parse_list_arg(args.volumeids) if args.volumeids else None,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"success": result})


def cmd_workspaces_delete(args):
    """Delete a workspace."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    client.interactive = False
    result = client.ana_api.deleteWorkspace(workspaceId=workspace_id)
    output_json({"success": result})


# =============================================================================
# ORGANIZATIONS
# =============================================================================

def cmd_organizations_get(args):
    """Get organizations."""
    client = get_client()

    result = client.get_organizations(
        organizationId=args.orgid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


# =============================================================================
# MEMBERS
# =============================================================================

def cmd_members_get(args):
    """Get organization members."""
    client = get_client()
    org_id = require_arg(args, 'orgid', 'Organization ID')

    result = client.get_organization_members(
        organizationId=org_id,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


# =============================================================================
# DATASETS
# =============================================================================

def cmd_datasets_get(args):
    """Get datasets."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.get_datasets(
        workspaceId=workspace_id,
        datasetId=args.datasetid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_datasets_create(args):
    """Create a dataset."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.create_dataset(
        workspaceId=workspace_id,
        name=args.name,
        graphId=args.graphid,
        description=args.description or '',
        runs=args.runs,
        seed=args.seed,
        priority=args.priority,
        tags=parse_list_arg(args.tags) if args.tags else []
    )
    output_json({"datasetId": result})


def cmd_datasets_edit(args):
    """Edit a dataset."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')

    result = client.edit_dataset(
        workspaceId=workspace_id,
        datasetId=dataset_id,
        name=args.name,
        description=args.description,
        tags=parse_list_arg(args.tags) if args.tags else None,
        pause=args.pause,
        priority=args.priority
    )
    output_json({"success": result})


def cmd_datasets_delete(args):
    """Delete a dataset."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')

    result = client.delete_dataset(
        workspaceId=workspace_id,
        datasetId=dataset_id
    )
    output_json({"success": result})


def cmd_datasets_cancel(args):
    """Cancel a running dataset job."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')

    result = client.cancel_dataset(
        workspaceId=workspace_id,
        datasetId=dataset_id
    )
    output_json({"success": result})


def cmd_datasets_download(args):
    """Download a dataset or a single file from a dataset."""
    import zipfile

    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')

    # If filepath is provided, download a single file; otherwise download the entire dataset
    if args.filepath:
        result = client.download_dataset_file(
            workspaceId=workspace_id,
            datasetId=dataset_id,
            filepath=args.filepath,
            localDir=args.outputdir
        )
    else:
        result = client.download_dataset(
            workspaceId=workspace_id,
            datasetId=dataset_id,
            localDir=args.outputdir
        )

    if args.extract and result and result.endswith('.zip') and os.path.isfile(result):
        extract_dir = os.path.splitext(result)[0]
        with zipfile.ZipFile(result, 'r') as zf:
            zf.extractall(extract_dir)
        os.remove(result)
        output_json({"downloadPath": extract_dir, "extracted": True})
    else:
        output_json({"downloadPath": result})


def cmd_datasets_upload(args):
    """Upload a dataset."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.upload_dataset(
        workspaceId=workspace_id,
        filename=args.file,
        description=args.description,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"datasetId": result})


def cmd_datasets_runs(args):
    """Get dataset runs."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')

    result = client.get_dataset_runs(
        workspaceId=workspace_id,
        datasetId=dataset_id,
        state=args.state,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_datasets_log(args):
    """Get dataset run log."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')
    run_id = require_arg(args, 'runid', 'Run ID')

    result = client.get_dataset_log(
        workspaceId=workspace_id,
        datasetId=dataset_id,
        runId=run_id,
        saveLogFile=False,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_datasets_files(args):
    """Get dataset files."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')

    result = client.get_dataset_files(
        workspaceId=workspace_id,
        datasetId=dataset_id,
        path=args.path,
        limit=args.limit,
        cursor=args.cursor
    )
    output_json(result)


def cmd_datasets_jobs(args):
    """Get dataset jobs."""
    client = get_client()

    result = client.get_dataset_jobs(
        workspaceId=args.workspaceid,
        organizationId=args.orgid,
        datasetId=args.datasetid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_datasets_create_mixed(args):
    """Create a mixed dataset from multiple datasets."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    parameters = parse_json_arg(args.parameters)

    result = client.create_mixed_dataset(
        workspaceId=workspace_id,
        name=args.name,
        parameters=parameters,
        description=args.description or '',
        seed=args.seed,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"datasetId": result})


# =============================================================================
# VOLUMES
# =============================================================================

def cmd_volumes_get(args):
    """Get volumes."""
    client = get_client()

    result = client.get_volumes(
        volumeId=args.volumeid,
        workspaceId=args.workspaceid,
        organizationId=args.orgid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_volumes_create(args):
    """Create a volume."""
    client = get_client()
    org_id = require_arg(args, 'orgid', 'Organization ID')

    result = client.create_volume(
        name=args.name,
        description=args.description,
        organizationId=org_id,
        permission=args.permission,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"volumeId": result})


def cmd_volumes_edit(args):
    """Edit a volume."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')

    result = client.edit_volume(
        volumeId=volume_id,
        name=args.name,
        description=args.description,
        permission=args.permission,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"success": result})


def cmd_volumes_delete(args):
    """Delete a volume."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')

    result = client.delete_volume(volumeId=volume_id)
    output_json({"success": result})


# =============================================================================
# VOLUME-DATA
# =============================================================================

def cmd_volume_data_get(args):
    """Get volume data."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')

    result = client.get_volume_data(
        volumeId=volume_id,
        dir=args.dir,
        files=parse_list_arg(args.files) if args.files else None,
        recursive=args.recursive,
        limit=args.limit,
        cursor=args.cursor
    )
    output_json(result)


def cmd_volume_data_upload(args):
    """Upload data to a volume."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')

    client.upload_volume_data(
        volumeId=volume_id,
        localDir=args.localdir,
        files=parse_list_arg(args.files) if args.files else None,
        destinationDir=args.destdir,
        sync=args.sync
    )
    output_json({"success": True})


def cmd_volume_data_download(args):
    """Download data from a volume."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')

    client.download_volume_data(
        volumeId=volume_id,
        localDir=args.outputdir,
        files=parse_list_arg(args.files) if args.files else [],
        recursive=args.recursive,
        sync=args.sync
    )
    output_json({"success": True})


def cmd_volume_data_delete(args):
    """Delete data from a volume."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')

    result = client.delete_volume_data(
        volumeId=volume_id,
        files=parse_list_arg(args.files) if args.files else None
    )
    output_json({"success": result})


def cmd_volume_data_search(args):
    """Search a volume."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')

    result = client.search_volume(
        volumeId=volume_id,
        directory=args.dir,
        recursive=args.recursive,
        keywords=parse_list_arg(args.keywords) if args.keywords else None,
        fileformats=parse_list_arg(args.formats) if args.formats else None,
        filetypes=parse_list_arg(args.types) if args.types else None,
        limit=args.limit,
        cursor=args.cursor
    )
    output_json(result)


def cmd_volumes_mount(args):
    """Mount a volume to local filesystem."""
    import subprocess
    import time

    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')
    path = args.path or os.getcwd()
    mountexec = args.mountexec or 'goofys'
    home = os.path.expanduser('~')

    # Verify mount executable is available
    exec_available = False
    for exec_name in ['goofys', 's3fs', 'mount-s3']:
        try:
            subprocess.run([exec_name, '--version'], capture_output=True, check=True)
            if mountexec == exec_name:
                exec_available = True
                break
            elif not exec_available:
                mountexec = exec_name
                exec_available = True
        except:
            pass

    if not exec_available:
        output_error("No mount executable found. Install goofys, s3fs, or mount-s3.")
        return

    # Get volume info
    volume_data = client.get_volumes(volumeId=volume_id)
    if not volume_data:
        output_error("Volume not found or permission denied", "VOLUME_NOT_FOUND")
        return

    if volume_data[0].get('permission') not in ['read', 'write']:
        output_error("Insufficient permissions (view-only)", "PERMISSION_DENIED")
        return

    # Get mount credentials
    mount_data = client.mount_volumes(volumes=[volume_id])
    if not mount_data:
        output_error("Failed to get mount credentials", "MOUNT_FAILED")
        return

    # Write AWS credentials
    aws_dir = os.path.join(home, '.aws')
    os.makedirs(aws_dir, exist_ok=True)
    profile_name = f'renderedai-volumes-{volume_id}'

    # Read existing credentials
    creds_file = os.path.join(aws_dir, 'credentials')
    profiles = {}
    if os.path.exists(creds_file):
        with open(creds_file, 'r') as f:
            current_profile = None
            for line in f:
                line = line.rstrip()
                if line.startswith('[') and line.endswith(']'):
                    current_profile = line[1:-1]
                    profiles[current_profile] = []
                elif current_profile:
                    profiles[current_profile].append(line)

    # Add new profile
    profiles[profile_name] = [
        f"aws_access_key_id={mount_data['credentials']['accesskeyid']}",
        f"aws_secret_access_key={mount_data['credentials']['accesskey']}",
        f"aws_session_token={mount_data['credentials']['sessiontoken']}"
    ]

    # Write credentials
    with open(creds_file, 'w') as f:
        for profile, lines in profiles.items():
            f.write(f'[{profile}]\n')
            for line in lines:
                if line:
                    f.write(f'{line}\n')

    # Create mount point
    mountpoint = os.path.join(home, '.renderedai', 'volumes', volume_id)
    os.makedirs(mountpoint, exist_ok=True)

    # Build mount command
    bucket_key = mount_data['keys'][0] if mount_data.get('keys') else None
    if not bucket_key:
        output_error("No bucket key returned", "MOUNT_FAILED")
        return

    rw_flag = '-o ro' if mount_data.get('rw', ['r'])[0] == 'r' else ''

    if mountexec == 'goofys':
        command = f'goofys {rw_flag} --profile {profile_name} {bucket_key[:-1]} {mountpoint}'
    elif mountexec == 's3fs':
        command = f's3fs {bucket_key[:-1]} {mountpoint} -o profile={profile_name} -o endpoint=us-west-2 -o url="https://s3-us-west-2.amazonaws.com" {rw_flag}'
    else:  # mount-s3
        readonly = '--read-only' if rw_flag else ''
        command = f'mount-s3 {readonly} --profile {profile_name} --prefix {bucket_key[1:]+"/"} {bucket_key[:-1]} {mountpoint}'

    # Execute mount
    proc = subprocess.Popen(command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
    time.sleep(2)  # Wait for mount to complete

    # Create symlink in target path
    symlink_path = os.path.join(path, 'volumes', volume_id)
    os.makedirs(os.path.join(path, 'volumes'), exist_ok=True)
    if os.path.exists(symlink_path):
        try:
            os.unlink(symlink_path)
        except:
            pass
    try:
        os.symlink(mountpoint, symlink_path)
    except:
        pass

    # Save mount info
    mountfile = os.path.join(home, '.renderedai', '.mounts.json')
    mounts = {"volumes": {}, "workspaces": {}}
    if os.path.exists(mountfile):
        with open(mountfile, 'r') as f:
            mounts = json.load(f)

    mounts['volumes'][volume_id] = {
        'status': 'mounted',
        'exec': mountexec,
        'name': volume_data[0].get('name', volume_id),
        'mountpath': mountpoint,
        'symlink': symlink_path,
        'profile': profile_name,
        'pid': proc.pid
    }

    with open(mountfile, 'w') as f:
        json.dump(mounts, indent=4, sort_keys=True, fp=f)

    output_json({
        "volumeId": volume_id,
        "name": volume_data[0].get('name'),
        "mountpath": mountpoint,
        "symlink": symlink_path,
        "readonly": mount_data.get('rw', ['r'])[0] == 'r'
    })


def cmd_volumes_unmount(args):
    """Unmount a volume from local filesystem."""
    import subprocess

    volume_id = require_arg(args, 'volumeid', 'Volume ID')
    home = os.path.expanduser('~')
    mountfile = os.path.join(home, '.renderedai', '.mounts.json')

    if not os.path.exists(mountfile):
        output_error("Volume not mounted", "NOT_MOUNTED")
        return

    with open(mountfile, 'r') as f:
        mounts = json.load(f)

    mount_info = mounts.get('volumes', {}).get(volume_id)
    if not mount_info or mount_info.get('status') != 'mounted':
        output_error("Volume not mounted", "NOT_MOUNTED")
        return

    try:
        # Kill processes using the mount
        subprocess.run(["fuser", "-km", mount_info['mountpath']], check=False,
                     stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)

        # Remove symlink
        if os.path.exists(mount_info.get('symlink', '')):
            os.unlink(mount_info['symlink'])

        # Unmount
        subprocess.run(["fusermount", "-uz", mount_info['mountpath']], check=False,
                     stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
        subprocess.run(["umount", "-lf", mount_info['mountpath']], check=False,
                     stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)

        # Clean up mount directory
        if os.path.isdir(mount_info['mountpath']):
            contents = os.listdir(mount_info['mountpath'])
            if not contents or (len(contents) == 1 and contents[0] == 'lost+found'):
                subprocess.run(["rm", "-rf", mount_info['mountpath']], check=False,
                             stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)

        del mounts['volumes'][volume_id]

        # Save updated mounts
        with open(mountfile, 'w') as f:
            json.dump(mounts, indent=4, sort_keys=True, fp=f)

        output_json({"volumeId": volume_id, "name": mount_info.get('name'), "success": True})

    except Exception as e:
        output_error(str(e), "UNMOUNT_FAILED")


def cmd_workspaces_mount(args):
    """Mount a workspace to local filesystem."""
    import subprocess
    import time

    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    path = args.path or os.getcwd()
    mountexec = args.mountexec or 'goofys'
    home = os.path.expanduser('~')

    # Verify mount executable is available
    exec_available = False
    for exec_name in ['goofys', 's3fs', 'mount-s3']:
        try:
            subprocess.run([exec_name, '--version'], capture_output=True, check=True)
            if mountexec == exec_name:
                exec_available = True
                break
            elif not exec_available:
                mountexec = exec_name
                exec_available = True
        except:
            pass

    if not exec_available:
        output_error("No mount executable found. Install goofys, s3fs, or mount-s3.")
        return

    # Get workspace info
    workspace_data = client.get_workspaces(workspaceId=workspace_id)
    if not workspace_data:
        output_error("Workspace not found or permission denied", "WORKSPACE_NOT_FOUND")
        return

    # Get mount credentials
    mount_data = client.mount_workspaces(workspaces=[workspace_id])
    if not mount_data:
        output_error("Failed to get mount credentials", "MOUNT_FAILED")
        return

    # Write AWS credentials
    aws_dir = os.path.join(home, '.aws')
    os.makedirs(aws_dir, exist_ok=True)
    profile_name = f'renderedai-workspaces-{workspace_id}'

    # Read existing credentials
    creds_file = os.path.join(aws_dir, 'credentials')
    profiles = {}
    if os.path.exists(creds_file):
        with open(creds_file, 'r') as f:
            current_profile = None
            for line in f:
                line = line.rstrip()
                if line.startswith('[') and line.endswith(']'):
                    current_profile = line[1:-1]
                    profiles[current_profile] = []
                elif current_profile:
                    profiles[current_profile].append(line)

    # Add new profile
    profiles[profile_name] = [
        f"aws_access_key_id={mount_data['credentials']['accesskeyid']}",
        f"aws_secret_access_key={mount_data['credentials']['accesskey']}",
        f"aws_session_token={mount_data['credentials']['sessiontoken']}"
    ]

    # Write credentials
    with open(creds_file, 'w') as f:
        for profile, lines in profiles.items():
            f.write(f'[{profile}]\n')
            for line in lines:
                if line:
                    f.write(f'{line}\n')

    # Create mount point
    mountpoint = os.path.join(home, '.renderedai', 'workspaces', workspace_id)
    os.makedirs(mountpoint, exist_ok=True)

    # Build mount command
    bucket_key = mount_data['keys'][0] if mount_data.get('keys') else None
    if not bucket_key:
        output_error("No bucket key returned", "MOUNT_FAILED")
        return

    rw_flag = '-o ro' if mount_data.get('rw', ['r'])[0] == 'r' else ''

    if mountexec == 'goofys':
        command = f'goofys {rw_flag} --profile {profile_name} {bucket_key[:-1]} {mountpoint}'
    elif mountexec == 's3fs':
        command = f's3fs {bucket_key[:-1]} {mountpoint} -o profile={profile_name} -o endpoint=us-west-2 -o url="https://s3-us-west-2.amazonaws.com" {rw_flag}'
    else:  # mount-s3
        readonly = '--read-only' if rw_flag else ''
        command = f'mount-s3 {readonly} --profile {profile_name} --prefix {bucket_key[1:]+"/"} {bucket_key[:-1]} {mountpoint}'

    # Execute mount
    proc = subprocess.Popen(command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
    time.sleep(2)  # Wait for mount to complete

    # Create symlink in target path
    symlink_path = os.path.join(path, 'workspaces', workspace_id)
    os.makedirs(os.path.join(path, 'workspaces'), exist_ok=True)
    if os.path.exists(symlink_path):
        try:
            os.unlink(symlink_path)
        except:
            pass
    try:
        os.symlink(mountpoint, symlink_path)
    except:
        pass

    # Save mount info
    mountfile = os.path.join(home, '.renderedai', '.mounts.json')
    mounts = {"volumes": {}, "workspaces": {}}
    if os.path.exists(mountfile):
        with open(mountfile, 'r') as f:
            mounts = json.load(f)

    mounts['workspaces'][workspace_id] = {
        'status': 'mounted',
        'exec': mountexec,
        'name': workspace_data[0].get('name', workspace_id),
        'mountpath': mountpoint,
        'symlink': symlink_path,
        'profile': profile_name,
        'pid': proc.pid
    }

    with open(mountfile, 'w') as f:
        json.dump(mounts, indent=4, sort_keys=True, fp=f)

    output_json({
        "workspaceId": workspace_id,
        "name": workspace_data[0].get('name'),
        "mountpath": mountpoint,
        "symlink": symlink_path,
        "readonly": mount_data.get('rw', ['r'])[0] == 'r'
    })


def cmd_workspaces_unmount(args):
    """Unmount a workspace from local filesystem."""
    import subprocess

    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    home = os.path.expanduser('~')
    mountfile = os.path.join(home, '.renderedai', '.mounts.json')

    if not os.path.exists(mountfile):
        output_error("Workspace not mounted", "NOT_MOUNTED")
        return

    with open(mountfile, 'r') as f:
        mounts = json.load(f)

    mount_info = mounts.get('workspaces', {}).get(workspace_id)
    if not mount_info or mount_info.get('status') != 'mounted':
        output_error("Workspace not mounted", "NOT_MOUNTED")
        return

    try:
        # Kill processes using the mount
        subprocess.run(["fuser", "-km", mount_info['mountpath']], check=False,
                     stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)

        # Remove symlink
        if os.path.exists(mount_info.get('symlink', '')):
            os.unlink(mount_info['symlink'])

        # Unmount
        subprocess.run(["fusermount", "-uz", mount_info['mountpath']], check=False,
                     stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
        subprocess.run(["umount", "-lf", mount_info['mountpath']], check=False,
                     stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)

        # Clean up mount directory
        if os.path.isdir(mount_info['mountpath']):
            contents = os.listdir(mount_info['mountpath'])
            if not contents or (len(contents) == 1 and contents[0] == 'lost+found'):
                subprocess.run(["rm", "-rf", mount_info['mountpath']], check=False,
                             stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)

        del mounts['workspaces'][workspace_id]

        # Save updated mounts
        with open(mountfile, 'w') as f:
            json.dump(mounts, indent=4, sort_keys=True, fp=f)

        output_json({"workspaceId": workspace_id, "name": mount_info.get('name'), "success": True})

    except Exception as e:
        output_error(str(e), "UNMOUNT_FAILED")


# =============================================================================
# GRAPHS
# =============================================================================

def cmd_graphs_get(args):
    """Get graphs."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.get_graphs(
        workspaceId=workspace_id,
        graphId=args.graphid,
        staged=args.staged,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_graphs_create(args):
    """Create a graph (editable)."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.upload_graph(
        workspaceId=workspace_id,
        graph=args.file,
        channelId=args.channelid,
        name=args.name,
        description=args.description,
        staged=False
    )
    output_json({"graphId": result})


def cmd_graphs_edit(args):
    """Edit a graph's metadata and/or contents."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    graph_id = require_arg(args, 'graphid', 'Graph ID')

    result = client.edit_graph(
        workspaceId=workspace_id,
        graphId=graph_id,
        name=args.name,
        description=args.description,
        graph=args.file if args.file else None,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"success": result})


def cmd_graphs_delete(args):
    """Delete a graph."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    graph_id = require_arg(args, 'graphid', 'Graph ID')

    result = client.delete_graph(
        workspaceId=workspace_id,
        graphId=graph_id
    )
    output_json({"success": result})


def cmd_graphs_download(args):
    """Download a graph to a file."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    graph_id = require_arg(args, 'graphid', 'Graph ID')
    output_path = args.outputfile or f"{graph_id}.yaml"

    result = client.download_graph(
        workspaceId=workspace_id,
        graphId=graph_id,
        filepath=output_path
    )
    output_json({"filepath": result})


def cmd_graphs_stage(args):
    """Stage an existing graph (creates a read-only copy)."""
    import tempfile
    import os

    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    graph_id = require_arg(args, 'graphid', 'Graph ID')

    # Get the graph metadata
    graphs = client.get_graphs(workspaceId=workspace_id, graphId=graph_id)
    if not graphs:
        output_error(f"Graph {graph_id} not found in workspace")
        return
    graph_info = graphs[0]

    # Download the graph to a temp file
    with tempfile.TemporaryDirectory() as tmpdir:
        filepath = os.path.join(tmpdir, "graph.yaml")
        client.download_graph(workspaceId=workspace_id, graphId=graph_id, filepath=filepath)

        # Re-upload as staged
        name = args.name if args.name else f"{graph_info['name']}-staged"
        result = client.upload_graph(
            workspaceId=workspace_id,
            graph=filepath,
            channelId=graph_info['channelId'],
            name=name,
            description=args.description if args.description else graph_info.get('description', ''),
            staged=True
        )
    output_json({"graphId": result})


# =============================================================================
# DATASET-VIEWER
# =============================================================================

DATASET_VIEWER_TRIGGER_PATH = os.path.join(os.path.expanduser('~'), '.theia', 'dataset-viewer-open')
DATASET_VIEWER_STATUS_PATH = os.path.join(os.path.expanduser('~'), '.theia', 'dataset-viewer-status.json')


def _write_dataset_viewer_trigger(action: str, payload: dict):
    """Write a trigger file for the dataset viewer extension to pick up."""
    trigger_data = {"action": action, **payload}
    trigger_dir = os.path.dirname(DATASET_VIEWER_TRIGGER_PATH)
    os.makedirs(trigger_dir, exist_ok=True)
    with open(DATASET_VIEWER_TRIGGER_PATH, 'w') as f:
        json.dump(trigger_data, f, indent=2)
    return trigger_data


def cmd_dataset_viewer_open(args):
    """Open a dataset folder in the Annotation Viewer."""
    dataset_path = getattr(args, 'path', None)
    if not dataset_path:
        output_error("Dataset path is required. Use --path", "MISSING_PATH")
        sys.exit(1)

    dataset_path = os.path.abspath(dataset_path)
    if not os.path.isdir(dataset_path):
        output_error(f"Directory not found: {dataset_path}", "PATH_NOT_FOUND")
        sys.exit(1)

    images_dir = os.path.join(dataset_path, 'images')
    if not os.path.isdir(images_dir):
        output_error(f"No images/ directory found in {dataset_path}", "NO_IMAGES_DIR")
        sys.exit(1)

    image_index = getattr(args, 'index', None) or 0

    trigger_data = _write_dataset_viewer_trigger('open', {
        'datasetPath': dataset_path,
        'imageIndex': int(image_index),
    })

    output_json({
        "status": "ok",
        "action": "open",
        "datasetPath": dataset_path,
        "imageIndex": int(image_index),
        "triggerPath": DATASET_VIEWER_TRIGGER_PATH,
    })


def cmd_dataset_viewer_next(args):
    """Navigate to the next image in the dataset viewer."""
    trigger_data = _write_dataset_viewer_trigger('next', {})
    output_json({"status": "ok", "action": "next", "triggerPath": DATASET_VIEWER_TRIGGER_PATH})


def cmd_dataset_viewer_prev(args):
    """Navigate to the previous image in the dataset viewer."""
    trigger_data = _write_dataset_viewer_trigger('prev', {})
    output_json({"status": "ok", "action": "prev", "triggerPath": DATASET_VIEWER_TRIGGER_PATH})


def cmd_dataset_viewer_goto(args):
    """Navigate to a specific image by index or name."""
    index = getattr(args, 'index', None)
    name = getattr(args, 'name', None)

    if index is None and not name:
        output_error("Specify --index or --name", "MISSING_TARGET")
        sys.exit(1)

    payload = {}
    if index is not None:
        payload['imageIndex'] = int(index)
    if name:
        payload['imageName'] = name

    trigger_data = _write_dataset_viewer_trigger('goto', payload)
    output_json({
        "status": "ok",
        "action": "goto",
        **payload,
        "triggerPath": DATASET_VIEWER_TRIGGER_PATH,
    })


def cmd_dataset_viewer_annotations(args):
    """Set which annotation types are enabled in the viewer."""
    types_str = getattr(args, 'types', None)
    if not types_str:
        output_error("Annotation types required. Use --types (comma-separated: bbox,bbox3d,segmentation,centroid,mask)", "MISSING_TYPES")
        sys.exit(1)

    valid_types = {'bbox', 'bbox3d', 'segmentation', 'centroid', 'mask'}
    types = [t.strip() for t in types_str.split(',')]
    invalid = [t for t in types if t not in valid_types]
    if invalid:
        output_error(f"Invalid annotation types: {', '.join(invalid)}. Valid: {', '.join(sorted(valid_types))}", "INVALID_TYPES")
        sys.exit(1)

    trigger_data = _write_dataset_viewer_trigger('setAnnotations', {'annotations': types})
    output_json({
        "status": "ok",
        "action": "setAnnotations",
        "annotations": types,
        "triggerPath": DATASET_VIEWER_TRIGGER_PATH,
    })


def cmd_dataset_viewer_filter(args):
    """Set object type filter in the viewer."""
    types_str = getattr(args, 'types', None)

    if types_str:
        types = [t.strip() for t in types_str.split(',')]
    else:
        # No types = show all (clear filter)
        types = []

    trigger_data = _write_dataset_viewer_trigger('setFilter', {'objectTypes': types})
    output_json({
        "status": "ok",
        "action": "setFilter",
        "objectTypes": types,
        "triggerPath": DATASET_VIEWER_TRIGGER_PATH,
    })


def cmd_dataset_viewer_status(args):
    """Get the current status of the dataset annotation viewer."""
    if not os.path.exists(DATASET_VIEWER_STATUS_PATH):
        output_json({
            "status": "no_status_file",
            "message": "No dataset viewer status file found. The viewer may not have been opened yet.",
            "statusPath": DATASET_VIEWER_STATUS_PATH,
            "sessions": [],
        })
        return

    try:
        with open(DATASET_VIEWER_STATUS_PATH, 'r') as f:
            status_data = json.load(f)
    except json.JSONDecodeError as e:
        output_error(f"Failed to parse status file: {e}", "PARSE_ERROR")
        return
    except IOError as e:
        output_error(f"Failed to read status file: {e}", "READ_ERROR")
        return

    # Optionally filter by dataset path
    filter_path = getattr(args, 'path', None)
    if filter_path:
        filter_path = os.path.abspath(filter_path)
        sessions = status_data.get('sessions', {})
        if isinstance(sessions, dict):
            filtered = {k: v for k, v in sessions.items() if os.path.abspath(k) == filter_path}
            status_data['sessions'] = filtered

    output_json({
        "status": "ok",
        "statusPath": DATASET_VIEWER_STATUS_PATH,
        **status_data,
    })


# =============================================================================
# GRAPH-EDITOR
# =============================================================================

def cmd_graph_editor_open(args):
    """Open a graph in the graph editor.

    Supports two modes:
    1. Download from platform: --workspaceid and --graphid (downloads graph and schema)
    2. Local files: --graphfile and --schemafile (uses existing local files)
    """
    # Get arguments
    workspace_id = getattr(args, 'workspaceid', None)
    graph_id = getattr(args, 'graphid', None)
    graph_file = getattr(args, 'graphfile', None)
    schema_file = getattr(args, 'schemafile', None)

    # Determine mode and validate arguments
    has_platform_args = workspace_id or graph_id
    has_local_args = graph_file or schema_file

    if has_platform_args and has_local_args:
        output_error(
            "Cannot use both platform arguments (--workspaceid/--graphid) and local file arguments (--graphfile/--schemafile)",
            "INVALID_ARGS"
        )
        return

    if not has_platform_args and not has_local_args:
        output_error(
            "Must provide either --workspaceid and --graphid (to download from platform) or --graphfile and --schemafile (to use local files)",
            "MISSING_ARGS"
        )
        return

    # Mode 1: Local files
    if has_local_args:
        if not graph_file:
            output_error("--graphfile is required when using local files", "MISSING_GRAPHFILE")
            return
        if not schema_file:
            output_error("--schemafile is required when using local files", "MISSING_SCHEMAFILE")
            return

        # Validate files exist
        if not os.path.exists(graph_file):
            output_error(f"Graph file not found: {graph_file}", "FILE_NOT_FOUND")
            return
        if not os.path.exists(schema_file):
            output_error(f"Schema file not found: {schema_file}", "FILE_NOT_FOUND")
            return

        graph_path = graph_file
        schema_path = schema_file
        graph_id = None
        channel_id = None
        graph_name = os.path.basename(graph_file)

    # Mode 2: Download from platform
    else:
        if not workspace_id:
            output_error("--workspaceid is required when downloading from platform", "MISSING_WORKSPACEID")
            return
        if not graph_id:
            output_error("--graphid is required when downloading from platform", "MISSING_GRAPHID")
            return

        client = get_client()
        directory = args.outputdir or os.getcwd()

        # Get graph metadata to find channelId
        graphs = client.get_graphs(workspaceId=workspace_id, graphId=graph_id)
        if not graphs:
            output_error(f"Graph {graph_id} not found", "GRAPH_NOT_FOUND")
            return

        graph_info = graphs[0]
        channel_id = graph_info.get('channelId')
        if not channel_id:
            output_error("Graph has no associated channel", "NO_CHANNEL")
            return

        # Create output directory if needed
        os.makedirs(directory, exist_ok=True)

        # Download graph
        graph_name = graph_info.get('name', graph_id).replace(' ', '_')
        graph_path = os.path.join(directory, f"{graph_name}.yaml")
        client.download_graph(workspaceId=workspace_id, graphId=graph_id, filepath=graph_path)

        # Download channel schema
        schema = client.get_channel_nodes(channelId=channel_id)
        if not schema:
            output_error("Failed to fetch channel schema", "SCHEMA_ERROR")
            return

        schema_path = os.path.join(directory, f"{channel_id}_schema.json")
        with open(schema_path, 'w') as f:
            json.dump(schema, f, indent=2)

    # Write trigger file to open in graph editor
    trigger_path = os.path.join(os.path.expanduser('~'), '.theia', 'graph-editor-open')
    os.makedirs(os.path.dirname(trigger_path), exist_ok=True)

    trigger_data = {
        "graphPath": os.path.abspath(graph_path),
        "schemaPath": os.path.abspath(schema_path),
        "autoLayout": True
    }

    with open(trigger_path, 'w') as f:
        json.dump(trigger_data, f)

    # Build output
    result = {
        "graphPath": os.path.abspath(graph_path),
        "schemaPath": os.path.abspath(schema_path),
        "triggerPath": trigger_path
    }

    # Add platform-specific fields if downloaded
    if graph_id:
        result["graphId"] = graph_id
    if channel_id:
        result["channelId"] = channel_id
    if has_local_args:
        result["mode"] = "local"
    else:
        result["mode"] = "platform"
        result["graphName"] = graph_info.get('name')

    output_json(result)


def cmd_graph_editor_edit_node(args):
    """Edit a node's values in a local graph file.

    Modifies the values of an existing node in the graph. Use --values to pass
    a JSON object with the key-value pairs to update. Existing values not
    specified in --values are preserved.
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    node_name = require_arg(args, 'node', 'Node name')
    values_json = require_arg(args, 'values', 'Values JSON')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})
    if node_name not in nodes:
        output_error(f"Node '{node_name}' not found in graph", "NODE_NOT_FOUND")
        return

    # Parse the values JSON
    try:
        new_values = json.loads(values_json)
    except json.JSONDecodeError as e:
        output_error(f"Invalid JSON for --values: {str(e)}", "INVALID_JSON")
        return

    if not isinstance(new_values, dict):
        output_error("--values must be a JSON object", "INVALID_VALUES")
        return

    # Update the node's values
    node = nodes[node_name]
    if 'values' not in node:
        node['values'] = {}

    node['values'].update(new_values)

    if not save_graph_file(filepath, graph):
        return

    output_json({
        "success": True,
        "file": os.path.abspath(filepath),
        "node": node_name,
        "updatedValues": new_values,
        "allValues": node['values']
    })


def cmd_graph_editor_add_node(args):
    """Add a new node to a local graph file.

    Adds a node with the specified nodeClass. Use --name for a custom node name
    (defaults to nodeClass_N where N is auto-incremented). Use --values to set
    initial parameter values, --location to set x,y position.
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    node_class = require_arg(args, 'nodeclass', 'Node class')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    # Generate node name if not provided
    node_name = args.name
    if not node_name:
        # Find the next available number for this node class
        counter = 1
        while f"{node_class}_{counter}" in nodes:
            counter += 1
        node_name = f"{node_class}_{counter}"

    if node_name in nodes:
        output_error(f"Node '{node_name}' already exists in graph", "NODE_EXISTS")
        return

    # Parse optional values
    values = {}
    if args.values:
        try:
            values = json.loads(args.values)
            if not isinstance(values, dict):
                output_error("--values must be a JSON object", "INVALID_VALUES")
                return
        except json.JSONDecodeError as e:
            output_error(f"Invalid JSON for --values: {str(e)}", "INVALID_JSON")
            return

    # Parse optional location
    location = {"x": 0, "y": 0}
    if args.location:
        try:
            loc = json.loads(args.location)
            if isinstance(loc, dict) and 'x' in loc and 'y' in loc:
                location = {"x": loc['x'], "y": loc['y']}
            elif isinstance(loc, list) and len(loc) >= 2:
                location = {"x": loc[0], "y": loc[1]}
            else:
                output_error("--location must be {\"x\": N, \"y\": N} or [x, y]", "INVALID_LOCATION")
                return
        except json.JSONDecodeError as e:
            output_error(f"Invalid JSON for --location: {str(e)}", "INVALID_JSON")
            return

    # Create the new node
    new_node = {
        "name": node_name,
        "nodeClass": node_class,
        "color": args.color or "#808080",
        "links": {},
        "location": location,
        "ports": {
            "inputs": [],
            "outputs": []
        },
        "values": values
    }

    if args.tooltip:
        new_node["tooltip"] = args.tooltip

    nodes[node_name] = new_node
    graph['nodes'] = nodes

    if not save_graph_file(filepath, graph):
        return

    output_json({
        "success": True,
        "file": os.path.abspath(filepath),
        "node": node_name,
        "nodeClass": node_class,
        "location": location,
        "values": values
    })


def cmd_graph_editor_add_link(args):
    """Add a link between two nodes in a local graph file.

    Creates a connection from a source node's output port to a target node's
    input port. The link is stored on the target node under its links property.
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    source_node = require_arg(args, 'source', 'Source node name')
    output_port = require_arg(args, 'output', 'Output port name')
    target_node = require_arg(args, 'target', 'Target node name')
    input_port = require_arg(args, 'input', 'Input port name')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    # Validate source node exists
    if source_node not in nodes:
        output_error(f"Source node '{source_node}' not found in graph", "SOURCE_NOT_FOUND")
        return

    # Validate target node exists
    if target_node not in nodes:
        output_error(f"Target node '{target_node}' not found in graph", "TARGET_NOT_FOUND")
        return

    target = nodes[target_node]

    # Initialize links if not present
    if 'links' not in target:
        target['links'] = {}

    # Add or append to the input port's links
    if input_port not in target['links']:
        target['links'][input_port] = []

    # Check if this exact link already exists
    new_link = {
        "outputPort": output_port,
        "sourceNode": source_node
    }

    for existing_link in target['links'][input_port]:
        if (existing_link.get('outputPort') == output_port and
                existing_link.get('sourceNode') == source_node):
            output_error(
                f"Link already exists: {source_node}.{output_port} -> {target_node}.{input_port}",
                "LINK_EXISTS"
            )
            return

    target['links'][input_port].append(new_link)

    if not save_graph_file(filepath, graph):
        return

    output_json({
        "success": True,
        "file": os.path.abspath(filepath),
        "link": {
            "source": source_node,
            "outputPort": output_port,
            "target": target_node,
            "inputPort": input_port
        }
    })


def cmd_graph_editor_remove_node(args):
    """Remove a node from a local graph file.

    Removes the specified node and all links connected to it (both incoming
    links on this node and outgoing links from other nodes that reference it).
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    node_name = require_arg(args, 'node', 'Node name')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    if node_name not in nodes:
        output_error(f"Node '{node_name}' not found in graph", "NODE_NOT_FOUND")
        return

    # Remove the node
    removed_node = nodes.pop(node_name)

    # Remove all links that reference this node from other nodes
    removed_links = []
    for other_name, other_node in nodes.items():
        links = other_node.get('links', {})
        for port_name, port_links in list(links.items()):
            original_count = len(port_links)
            port_links[:] = [
                link for link in port_links
                if link.get('sourceNode') != node_name
            ]
            if len(port_links) < original_count:
                removed_links.append({
                    "target": other_name,
                    "inputPort": port_name,
                    "source": node_name
                })
            # Clean up empty link lists
            if not port_links:
                del links[port_name]

    if not save_graph_file(filepath, graph):
        return

    output_json({
        "success": True,
        "file": os.path.abspath(filepath),
        "removedNode": node_name,
        "nodeClass": removed_node.get('nodeClass'),
        "removedLinks": removed_links
    })


def cmd_graph_editor_remove_link(args):
    """Remove a link between two nodes in a local graph file.

    Removes the connection from the source node's output port to the target
    node's input port.
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    source_node = require_arg(args, 'source', 'Source node name')
    output_port = require_arg(args, 'output', 'Output port name')
    target_node = require_arg(args, 'target', 'Target node name')
    input_port = require_arg(args, 'input', 'Input port name')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    # Validate target node exists
    if target_node not in nodes:
        output_error(f"Target node '{target_node}' not found in graph", "TARGET_NOT_FOUND")
        return

    target = nodes[target_node]
    links = target.get('links', {})

    if input_port not in links:
        output_error(
            f"No links found on input port '{input_port}' of node '{target_node}'",
            "LINK_NOT_FOUND"
        )
        return

    # Find and remove the specific link
    port_links = links[input_port]
    original_count = len(port_links)

    port_links[:] = [
        link for link in port_links
        if not (link.get('outputPort') == output_port and
                link.get('sourceNode') == source_node)
    ]

    if len(port_links) == original_count:
        output_error(
            f"Link not found: {source_node}.{output_port} -> {target_node}.{input_port}",
            "LINK_NOT_FOUND"
        )
        return

    # Clean up empty link lists
    if not port_links:
        del links[input_port]

    if not save_graph_file(filepath, graph):
        return

    output_json({
        "success": True,
        "file": os.path.abspath(filepath),
        "removedLink": {
            "source": source_node,
            "outputPort": output_port,
            "target": target_node,
            "inputPort": input_port
        }
    })


def cmd_graph_editor_add_volume_file(args):
    """Add a VolumeFile node to a local graph file.

    Creates a VolumeFile node that references a file in a Rendered.ai volume.
    The volume reference uses the format volumeId:/path/to/file.
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    volume_id = require_arg(args, 'volumeid', 'Volume ID')
    volume_path = require_arg(args, 'path', 'File path in volume')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    # Generate node name if not provided
    node_name = args.name
    if not node_name:
        counter = 1
        while f"VolumeFile_{counter}" in nodes:
            counter += 1
        node_name = f"VolumeFile_{counter}"

    if node_name in nodes:
        output_error(f"Node '{node_name}' already exists in graph", "NODE_EXISTS")
        return

    # Parse optional location
    location = {"x": 0, "y": 0}
    if args.location:
        try:
            loc = json.loads(args.location)
            if isinstance(loc, dict) and 'x' in loc and 'y' in loc:
                location = {"x": loc['x'], "y": loc['y']}
            elif isinstance(loc, list) and len(loc) >= 2:
                location = {"x": loc[0], "y": loc[1]}
            else:
                output_error("--location must be {\"x\": N, \"y\": N} or [x, y]", "INVALID_LOCATION")
                return
        except json.JSONDecodeError as e:
            output_error(f"Invalid JSON for --location: {str(e)}", "INVALID_JSON")
            return

    # Normalize path (ensure it starts with /)
    if not volume_path.startswith('/'):
        volume_path = '/' + volume_path

    # Build the volume reference
    volume_ref = f"{volume_id}:{volume_path}"

    # Build tooltip (use volume name if provided, otherwise volume ID)
    volume_display = args.volumename or volume_id
    tooltip = f"{volume_display}:{volume_path}"

    # Create the VolumeFile node
    new_node = {
        "name": node_name,
        "nodeClass": "VolumeFile",
        "color": "#246BB3",
        "hash": "8d56c9b8e4bae85fd61620e1d4d44a24",
        "links": {},
        "location": location,
        "ports": {
            "inputs": [
                {
                    "name": "File",
                    "description": "",
                    "default": volume_ref,
                    "hidden": True
                }
            ],
            "outputs": [
                {
                    "name": "File",
                    "description": ""
                }
            ]
        },
        "tooltip": tooltip,
        "values": {
            "File": volume_ref
        }
    }

    nodes[node_name] = new_node
    graph['nodes'] = nodes

    if not save_graph_file(filepath, graph):
        return

    output_json({
        "success": True,
        "file": os.path.abspath(filepath),
        "node": node_name,
        "nodeClass": "VolumeFile",
        "volumeId": volume_id,
        "path": volume_path,
        "volumeRef": volume_ref,
        "location": location
    })


def cmd_graph_editor_add_volume_directory(args):
    """Add a VolumeDirectory node to a local graph file.

    Creates a VolumeDirectory node that references a directory in a Rendered.ai volume.
    The volume reference uses the format volumeId:/path/to/directory.
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    volume_id = require_arg(args, 'volumeid', 'Volume ID')
    volume_path = args.path or '/'

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    # Generate node name if not provided
    node_name = args.name
    if not node_name:
        counter = 1
        while f"VolumeDirectory_{counter}" in nodes:
            counter += 1
        node_name = f"VolumeDirectory_{counter}"

    if node_name in nodes:
        output_error(f"Node '{node_name}' already exists in graph", "NODE_EXISTS")
        return

    # Parse optional location
    location = {"x": 0, "y": 0}
    if args.location:
        try:
            loc = json.loads(args.location)
            if isinstance(loc, dict) and 'x' in loc and 'y' in loc:
                location = {"x": loc['x'], "y": loc['y']}
            elif isinstance(loc, list) and len(loc) >= 2:
                location = {"x": loc[0], "y": loc[1]}
            else:
                output_error("--location must be {\"x\": N, \"y\": N} or [x, y]", "INVALID_LOCATION")
                return
        except json.JSONDecodeError as e:
            output_error(f"Invalid JSON for --location: {str(e)}", "INVALID_JSON")
            return

    # Normalize path (ensure it starts with /)
    if not volume_path.startswith('/'):
        volume_path = '/' + volume_path

    # Build the volume reference
    volume_ref = f"{volume_id}:{volume_path}"

    # Build tooltip (use volume name if provided, otherwise volume ID)
    volume_display = args.volumename or volume_id
    tooltip = f"{volume_display}:{volume_path}"

    # Create the VolumeDirectory node
    new_node = {
        "name": node_name,
        "nodeClass": "VolumeDirectory",
        "color": "#246BB3",
        "hash": "a7c19eb160150ee04d82af60c9332d104f0a7f89",
        "links": {},
        "location": location,
        "ports": {
            "inputs": [
                {
                    "name": "Directory",
                    "description": "",
                    "default": volume_ref,
                    "hidden": True
                }
            ],
            "outputs": [
                {
                    "name": "Directory",
                    "description": ""
                }
            ]
        },
        "tooltip": tooltip,
        "values": {
            "Directory": volume_ref
        }
    }

    nodes[node_name] = new_node
    graph['nodes'] = nodes

    if not save_graph_file(filepath, graph):
        return

    output_json({
        "success": True,
        "file": os.path.abspath(filepath),
        "node": node_name,
        "nodeClass": "VolumeDirectory",
        "volumeId": volume_id,
        "path": volume_path,
        "volumeRef": volume_ref,
        "location": location
    })


def cmd_graph_editor_list_nodes(args):
    """List all nodes in a local graph file.

    Returns a summary of all nodes including their name, class, location,
    and connection counts. Useful for understanding graph structure.
    """
    filepath = require_arg(args, 'file', 'Graph file path')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    # Build summary for each node
    node_list = []
    for node_name, node_data in nodes.items():
        # Count incoming links
        incoming_links = 0
        link_sources = []
        for port_name, port_links in node_data.get('links', {}).items():
            incoming_links += len(port_links)
            for link in port_links:
                link_sources.append(f"{link.get('sourceNode')}.{link.get('outputPort')}")

        # Count outgoing links (links from other nodes to this one)
        outgoing_links = 0
        link_targets = []
        for other_name, other_data in nodes.items():
            if other_name == node_name:
                continue
            for port_name, port_links in other_data.get('links', {}).items():
                for link in port_links:
                    if link.get('sourceNode') == node_name:
                        outgoing_links += 1
                        link_targets.append(f"{other_name}.{port_name}")

        node_summary = {
            "name": node_name,
            "nodeClass": node_data.get('nodeClass'),
            "location": node_data.get('location'),
            "incomingLinks": incoming_links,
            "outgoingLinks": outgoing_links,
            "hasValues": bool(node_data.get('values'))
        }

        if args.verbose:
            node_summary["linkSources"] = link_sources
            node_summary["linkTargets"] = link_targets

        node_list.append(node_summary)

    # Sort by name for consistent output
    node_list.sort(key=lambda x: x['name'])

    output_json({
        "file": os.path.abspath(filepath),
        "nodeCount": len(node_list),
        "nodes": node_list
    })


def cmd_graph_editor_get_node(args):
    """Get detailed information about a specific node in a local graph file.

    Returns full node data including all values, ports, links, and metadata.
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    node_name = require_arg(args, 'node', 'Node name')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    if node_name not in nodes:
        output_error(f"Node '{node_name}' not found in graph", "NODE_NOT_FOUND")
        return

    node_data = nodes[node_name]

    # Find outgoing links (other nodes that link to this one)
    outgoing_links = []
    for other_name, other_data in nodes.items():
        if other_name == node_name:
            continue
        for port_name, port_links in other_data.get('links', {}).items():
            for link in port_links:
                if link.get('sourceNode') == node_name:
                    outgoing_links.append({
                        "targetNode": other_name,
                        "targetPort": port_name,
                        "outputPort": link.get('outputPort')
                    })

    output_json({
        "file": os.path.abspath(filepath),
        "node": node_name,
        "nodeClass": node_data.get('nodeClass'),
        "color": node_data.get('color'),
        "location": node_data.get('location'),
        "tooltip": node_data.get('tooltip'),
        "hash": node_data.get('hash'),
        "values": node_data.get('values', {}),
        "ports": node_data.get('ports', {}),
        "incomingLinks": node_data.get('links', {}),
        "outgoingLinks": outgoing_links
    })


def cmd_graph_editor_move_node(args):
    """Move a node to a new location in a local graph file.

    Updates the node's x,y coordinates for visual positioning in the graph editor.
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    node_name = require_arg(args, 'node', 'Node name')
    location_str = require_arg(args, 'location', 'Location')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    if node_name not in nodes:
        output_error(f"Node '{node_name}' not found in graph", "NODE_NOT_FOUND")
        return

    # Parse location
    try:
        loc = json.loads(location_str)
        if isinstance(loc, dict) and 'x' in loc and 'y' in loc:
            location = {"x": loc['x'], "y": loc['y']}
        elif isinstance(loc, list) and len(loc) >= 2:
            location = {"x": loc[0], "y": loc[1]}
        else:
            output_error("--location must be {\"x\": N, \"y\": N} or [x, y]", "INVALID_LOCATION")
            return
    except json.JSONDecodeError as e:
        output_error(f"Invalid JSON for --location: {str(e)}", "INVALID_JSON")
        return

    old_location = nodes[node_name].get('location', {})
    nodes[node_name]['location'] = location

    if not save_graph_file(filepath, graph):
        return

    output_json({
        "success": True,
        "file": os.path.abspath(filepath),
        "node": node_name,
        "oldLocation": old_location,
        "newLocation": location
    })


def cmd_graph_editor_clone_node(args):
    """Clone an existing node in a local graph file.

    Creates a copy of a node with a new name. The cloned node has the same
    nodeClass, values, ports, and color, but no links (links must be added
    separately). The location is offset slightly from the original.
    """
    filepath = require_arg(args, 'file', 'Graph file path')
    source_node = require_arg(args, 'source', 'Source node name')

    graph = load_graph_file(filepath)
    if graph is None:
        return

    nodes = graph.get('nodes', {})

    if source_node not in nodes:
        output_error(f"Source node '{source_node}' not found in graph", "NODE_NOT_FOUND")
        return

    source_data = nodes[source_node]
    node_class = source_data.get('nodeClass', 'Unknown')

    # Generate new node name if not provided
    new_name = args.name
    if not new_name:
        counter = 1
        while f"{node_class}_{counter}" in nodes:
            counter += 1
        new_name = f"{node_class}_{counter}"

    if new_name in nodes:
        output_error(f"Node '{new_name}' already exists in graph", "NODE_EXISTS")
        return

    # Parse optional location, or offset from source
    if args.location:
        try:
            loc = json.loads(args.location)
            if isinstance(loc, dict) and 'x' in loc and 'y' in loc:
                location = {"x": loc['x'], "y": loc['y']}
            elif isinstance(loc, list) and len(loc) >= 2:
                location = {"x": loc[0], "y": loc[1]}
            else:
                output_error("--location must be {\"x\": N, \"y\": N} or [x, y]", "INVALID_LOCATION")
                return
        except json.JSONDecodeError as e:
            output_error(f"Invalid JSON for --location: {str(e)}", "INVALID_JSON")
            return
    else:
        # Offset from source location
        source_loc = source_data.get('location', {"x": 0, "y": 0})
        location = {
            "x": source_loc.get('x', 0) + 50,
            "y": source_loc.get('y', 0) + 50
        }

    # Create the cloned node (deep copy values and ports, but not links)
    import copy
    cloned_node = {
        "name": new_name,
        "nodeClass": node_class,
        "color": source_data.get('color', '#808080'),
        "hash": source_data.get('hash'),
        "links": {},  # No links for cloned node
        "location": location,
        "ports": copy.deepcopy(source_data.get('ports', {"inputs": [], "outputs": []})),
        "values": copy.deepcopy(source_data.get('values', {}))
    }

    if source_data.get('tooltip'):
        cloned_node['tooltip'] = source_data['tooltip']

    nodes[new_name] = cloned_node
    graph['nodes'] = nodes

    if not save_graph_file(filepath, graph):
        return

    output_json({
        "success": True,
        "file": os.path.abspath(filepath),
        "sourceNode": source_node,
        "clonedNode": new_name,
        "nodeClass": node_class,
        "location": location
    })


def cmd_graph_editor_status(args):
    """Get the status and validation errors from the graph editor.

    Reads the graph editor status file to check for validation errors in
    open editor sessions. Returns session info including any errors.
    """
    status_path = os.path.join(os.path.expanduser('~'), '.theia', 'graph-editor-status.json')

    if not os.path.exists(status_path):
        output_json({
            "status": "no_status_file",
            "message": "No graph editor status file found. The graph editor may not have been opened yet.",
            "statusPath": status_path,
            "sessions": []
        })
        return

    try:
        with open(status_path, 'r') as f:
            status_data = json.load(f)
    except json.JSONDecodeError as e:
        output_error(f"Failed to parse status file: {str(e)}", "PARSE_ERROR")
        return
    except Exception as e:
        output_error(f"Failed to read status file: {str(e)}", "READ_ERROR")
        return

    sessions = status_data.get('sessions', [])

    # If a specific graph file is requested, filter to that session
    if args.file:
        target_path = os.path.abspath(args.file)
        sessions = [s for s in sessions if s.get('graphPath') == target_path]

    # Build response with error summary
    total_errors = 0
    sessions_with_errors = 0
    for session in sessions:
        errors = session.get('errors', [])
        if errors:
            sessions_with_errors += 1
            total_errors += len(errors)

    output_json({
        "status": "ok",
        "statusPath": status_path,
        "lastUpdated": status_data.get('lastUpdated'),
        "sessionCount": len(sessions),
        "sessionsWithErrors": sessions_with_errors,
        "totalErrors": total_errors,
        "sessions": sessions
    })


# =============================================================================
# CHANNELS
# =============================================================================

def cmd_channels_get(args):
    """Get channels."""
    client = get_client()

    result = client.get_channels(
        workspaceId=args.workspaceid,
        organizationId=args.orgid,
        channelId=args.channelid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_channels_schema(args):
    """Get channel schema."""
    client = get_client()
    channel_id = require_arg(args, 'channelid', 'Channel ID')

    result = client.get_channel_nodes(
        channelId=channel_id,
        fields=parse_list_arg(args.fields) if args.fields else None
    )

    # Apply filters if specified
    if args.category or args.subcategory or args.search:
        filtered = []
        search_term = args.search.lower() if args.search else None

        for node in result:
            # Filter by category
            if args.category and node.get('category', '').lower() != args.category.lower():
                continue

            # Filter by subcategory
            if args.subcategory and node.get('subcategory', '').lower() != args.subcategory.lower():
                continue

            # Search across multiple fields
            if search_term:
                searchable_fields = [
                    node.get('name', ''),
                    node.get('category', ''),
                    node.get('subcategory', ''),
                    node.get('tooltip', ''),
                    node.get('description', ''),
                ]
                # Also search in input/output names and descriptions
                for inp in node.get('inputs', []):
                    searchable_fields.append(inp.get('name', ''))
                    searchable_fields.append(inp.get('description', ''))
                for out in node.get('outputs', []):
                    searchable_fields.append(out.get('name', ''))
                    searchable_fields.append(out.get('description', ''))

                combined = ' '.join(str(f) for f in searchable_fields).lower()
                if search_term not in combined:
                    continue

            filtered.append(node)

        result = filtered

    # List categories and subcategories if requested
    if args.list_categories:
        categories = sorted(set(node.get('category', '') for node in result if node.get('category')))
        subcategories = sorted(set(node.get('subcategory', '') for node in result if node.get('subcategory')))
        output_json({
            "categories": categories,
            "subcategories": subcategories
        })
        return

    # Output names only if requested
    if args.names_only:
        names = sorted([node.get('name', '') for node in result])
        output_json(names)
    else:
        output_json(result)


def cmd_channels_nodes(args):
    """Get node documentation."""
    client = get_client()
    channel_id = require_arg(args, 'channelid', 'Channel ID')
    node = require_arg(args, 'node', 'Node name')

    result = client.get_node_documentation(
        channelId=channel_id,
        node=node,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json({"documentation": result})


def cmd_channels_docs(args):
    """Get channel documentation."""
    client = get_client()
    channel_id = require_arg(args, 'channelid', 'Channel ID')

    result = client.get_channel_documentation(channelId=channel_id)
    output_json({"documentation": result})


def cmd_channels_get_default_graph(args):
    """Get the default graph for a channel."""
    client = get_client()
    channel_id = require_arg(args, 'channelid', 'Channel ID')

    result = client.get_default_graph(
        channelId=channel_id,
        filepath=args.outputfile
    )
    output_json({"filepath": result})


def cmd_channels_set_default_graph(args):
    """Set the default graph for a channel."""
    client = get_client()
    graph_id = require_arg(args, 'graphid', 'Graph ID')

    result = client.set_default_graph(
        graphId=graph_id,
        workspaceId=args.workspaceid
    )
    output_json({"success": result})


# =============================================================================
# SERVICES
# =============================================================================

def cmd_services_get(args):
    """Get services."""
    client = get_client()

    result = client.get_services(
        workspaceId=args.workspaceid,
        organizationId=args.orgid,
        serviceId=args.serviceid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_services_create(args):
    """Create a service."""
    client = get_client()
    org_id = require_arg(args, 'orgid', 'Organization ID')

    result = client.create_service(
        name=args.name,
        description=args.description,
        organizationId=org_id,
        serviceTypeId=args.type,
        volumes=parse_list_arg(args.volumes) if args.volumes else [],
        instance=args.instance,
        tags=parse_list_arg(args.tags) if args.tags else []
    )
    output_json({"serviceId": result})


def cmd_services_edit(args):
    """Edit a service."""
    client = get_client()
    service_id = require_arg(args, 'serviceid', 'Service ID')

    result = client.edit_service(
        serviceId=service_id,
        name=args.name,
        description=args.description,
        volumes=parse_list_arg(args.volumes) if args.volumes else None,
        instance=args.instance,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"success": result})


def cmd_services_delete(args):
    """Delete a service."""
    client = get_client()
    service_id = require_arg(args, 'serviceid', 'Service ID')

    result = client.delete_service(serviceId=service_id)
    output_json({"success": result})


def cmd_services_jobs(args):
    """Get service jobs."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.get_service_jobs(
        workspaceId=workspace_id,
        jobId=args.jobid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_services_delete_job(args):
    """Delete a service job."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    job_id = require_arg(args, 'jobid', 'Job ID')

    result = client.delete_service_job(
        workspaceId=workspace_id,
        jobId=job_id
    )
    output_json({"success": result})


# =============================================================================
# API KEYS
# =============================================================================

def cmd_api_keys_get(args):
    """Get API keys."""
    client = get_client()

    result = client.get_api_keys()
    output_json(result)


def cmd_api_keys_create(args):
    """Create an API key."""
    client = get_client()

    kwargs = {
        'name': args.name,
        'scope': args.scope
    }

    if args.scope == 'organization':
        org_id = require_arg(args, 'orgid', 'Organization ID')
        kwargs['organizationId'] = org_id
    elif args.scope == 'workspace':
        workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
        kwargs['workspaceId'] = workspace_id

    if args.expires:
        kwargs['expiresAt'] = args.expires

    result = client.create_api_key(**kwargs)
    output_json({"apiKey": result})


def cmd_api_keys_delete(args):
    """Delete an API key."""
    client = get_client()
    api_key_id = require_arg(args, 'apikeyid', 'API Key ID')

    result = client.delete_api_key(apiKeyId=api_key_id)
    output_json({"success": result})


# =============================================================================
# ANALYTICS
# =============================================================================

def cmd_analytics_get(args):
    """Get analytics."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.get_analytics(
        workspaceId=workspace_id,
        datasetId=args.datasetid,
        analyticsId=args.analyticsid
    )
    output_json(result)


def cmd_analytics_create(args):
    """Create analytics."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')

    result = client.create_analytics(
        workspaceId=workspace_id,
        datasetId=dataset_id,
        analyticsType=args.type
    )
    output_json({"analyticsId": result})


def cmd_analytics_delete(args):
    """Delete analytics."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    analytics_id = require_arg(args, 'analyticsid', 'Analytics ID')

    result = client.delete_analytics(
        workspaceId=workspace_id,
        analyticsId=analytics_id
    )
    output_json({"success": result})


def cmd_analytics_types(args):
    """Get analytics types."""
    client = get_client()

    result = client.get_analytics_types()
    output_json(result)


def cmd_analytics_download(args):
    """Download analytics results."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    analytics_id = require_arg(args, 'analyticsid', 'Analytics ID')

    result = client.download_analytics(
        workspaceId=workspace_id,
        analyticsId=analytics_id
    )
    output_json(result)


def cmd_analytics_edit(args):
    """Edit analytics tags."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    analytics_id = require_arg(args, 'analyticsid', 'Analytics ID')

    result = client.edit_analytics(
        workspaceId=workspace_id,
        analyticsId=analytics_id,
        tags=parse_list_arg(args.tags) if args.tags else []
    )
    output_json({"success": result})


# =============================================================================
# ANNOTATIONS
# =============================================================================

def cmd_annotations_get(args):
    """Get annotations."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.get_annotations(
        workspaceId=workspace_id,
        datasetId=args.datasetid,
        annotationId=args.annotationid
    )
    output_json(result)


def cmd_annotations_create(args):
    """Create an annotation."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')

    result = client.create_annotation(
        workspaceId=workspace_id,
        datasetId=dataset_id,
        format=args.format,
        mapId=args.mapid
    )
    output_json({"annotationId": result})


def cmd_annotations_delete(args):
    """Delete an annotation."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    annotation_id = require_arg(args, 'annotationid', 'Annotation ID')

    result = client.delete_annotation(
        workspaceId=workspace_id,
        annotationId=annotation_id
    )
    output_json({"success": result})


def cmd_annotations_formats(args):
    """Get annotation formats."""
    client = get_client()

    result = client.get_annotation_formats()
    output_json(result)


def cmd_annotations_download(args):
    """Download an annotation."""
    import zipfile

    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    annotation_id = require_arg(args, 'annotationid', 'Annotation ID')

    result = client.download_annotation(
        workspaceId=workspace_id,
        annotationId=annotation_id
    )

    if args.extract and result and result.endswith('.zip') and os.path.isfile(result):
        extract_dir = os.path.splitext(result)[0]
        with zipfile.ZipFile(result, 'r') as zf:
            zf.extractall(extract_dir)
        os.remove(result)
        output_json({"downloadPath": extract_dir, "extracted": True})
    else:
        output_json({"downloadPath": result})


def cmd_annotations_edit(args):
    """Edit annotation tags."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    annotation_id = require_arg(args, 'annotationid', 'Annotation ID')

    result = client.edit_annotation(
        workspaceId=workspace_id,
        annotationId=annotation_id,
        tags=parse_list_arg(args.tags) if args.tags else []
    )
    output_json({"success": result})


def cmd_annotations_view(args):
    """Generate an image with annotations overlayed.

    This command draws annotations (bounding boxes, 3D boxes, or segmentation outlines)
    on an image from a dataset. The image must be part of a dataset directory structure
    that includes annotations/ and metadata/ folders with ANA-format annotation files.
    """
    from anatools.annotations import annotations

    image_path = require_arg(args, 'imagepath', 'Image path')
    out_dir = require_arg(args, 'outdir', 'Output directory')

    # Parse draw types - can be comma-separated for multiple types
    draw_types = parse_list_arg(args.drawtype) if args.drawtype else ['box_2d']
    valid_types = ['box_2d', 'box_3d', 'segmentation']
    for dt in draw_types:
        if dt not in valid_types:
            output_error(f"Invalid draw type '{dt}'. Must be one of: {', '.join(valid_types)}", "INVALID_DRAW_TYPE")
            sys.exit(1)

    # Parse optional filters
    object_ids = None
    object_types = None
    if args.objectids:
        try:
            object_ids = [int(x) for x in parse_list_arg(args.objectids)]
        except ValueError:
            output_error("Object IDs must be integers", "INVALID_OBJECT_IDS")
            sys.exit(1)
    if args.objecttypes:
        object_types = parse_list_arg(args.objecttypes)

    # Parse colors if provided (JSON format)
    colors = None
    if args.colors:
        colors = parse_json_arg(args.colors)
        # Convert color lists to tuples
        for key in colors:
            if isinstance(colors[key], list):
                colors[key] = tuple(colors[key])

    line_thickness = args.thickness if args.thickness else 1

    ann = annotations()
    output_paths = []

    try:
        for draw_type in draw_types:
            output_path = None
            if draw_type == 'box_2d':
                output_path = ann.bounding_box_2d(
                    image_path=image_path,
                    out_dir=out_dir,
                    object_ids=object_ids,
                    object_types=object_types,
                    line_thickness=line_thickness,
                    colors=colors,
                    quiet=True
                )
            elif draw_type == 'box_3d':
                output_path = ann.bounding_box_3d(
                    image_path=image_path,
                    out_dir=out_dir,
                    object_ids=object_ids,
                    object_types=object_types,
                    line_thickness=line_thickness,
                    colors=colors,
                    quiet=True
                )
            elif draw_type == 'segmentation':
                output_path = ann.segmentation(
                    image_path=image_path,
                    out_dir=out_dir,
                    object_ids=object_ids,
                    object_types=object_types,
                    line_thickness=line_thickness,
                    colors=colors,
                    quiet=True
                )

            if output_path is None:
                output_error(f"Failed to generate annotated image for draw type '{draw_type}'", "ANNOTATION_ERROR")
                sys.exit(1)
            output_paths.append(output_path)

        output_json({
            "success": True,
            "outputPaths": output_paths,
            "drawTypes": draw_types
        })
    except FileNotFoundError as e:
        output_error(f"File not found: {str(e)}", "FILE_NOT_FOUND")
        sys.exit(1)
    except Exception as e:
        output_error(f"Failed to generate annotated image: {str(e)}", "ANNOTATION_ERROR")
        sys.exit(1)


# =============================================================================
# ANNOTATION-MAPS
# =============================================================================

def cmd_annotation_maps_get(args):
    """Get annotation maps."""
    client = get_client()
    org_id = require_arg(args, 'orgid', 'Organization ID')

    result = client.get_annotation_maps(organizationId=org_id)
    output_json(result)


def cmd_annotation_maps_upload(args):
    """Upload an annotation map."""
    client = get_client()
    org_id = require_arg(args, 'orgid', 'Organization ID')
    map_file = require_arg(args, 'mapfile', 'Map file path')

    result = client.upload_annotation_map(
        organizationId=org_id,
        mapfile=map_file,
        name=args.name,
        description=args.description,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"mapId": result})


def cmd_annotation_maps_download(args):
    """Download an annotation map."""
    client = get_client()
    map_id = require_arg(args, 'mapid', 'Map ID')

    result = client.download_annotation_map(
        mapId=map_id,
        localDir=args.outputdir
    )
    output_json({"downloadPath": result})


def cmd_annotation_maps_delete(args):
    """Delete an annotation map."""
    client = get_client()
    map_id = require_arg(args, 'mapid', 'Map ID')

    result = client.delete_annotation_map(mapId=map_id)
    output_json({"success": result})


def cmd_annotation_maps_edit(args):
    """Edit an annotation map."""
    client = get_client()
    map_id = require_arg(args, 'mapid', 'Map ID')

    result = client.edit_annotation_map(
        mapId=map_id,
        name=args.name,
        description=args.description,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"success": result})


# =============================================================================
# GAN
# =============================================================================

def cmd_gan_datasets_get(args):
    """Get GAN datasets."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.get_gan_datasets(
        workspaceId=workspace_id,
        datasetId=args.datasetid,
        gandatasetId=args.gandatasetid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_gan_datasets_create(args):
    """Create a GAN dataset."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')
    model_id = require_arg(args, 'modelid', 'Model ID')

    result = client.create_gan_dataset(
        workspaceId=workspace_id,
        datasetId=dataset_id,
        modelId=model_id,
        name=args.name,
        description=args.description or '',
        tags=parse_list_arg(args.tags) if args.tags else []
    )
    output_json({"datasetId": result})


def cmd_gan_datasets_delete(args):
    """Delete a GAN dataset."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')

    result = client.delete_gan_dataset(
        workspaceId=workspace_id,
        datasetId=dataset_id
    )
    output_json({"success": result})


def cmd_gan_models_get(args):
    """Get GAN models."""
    client = get_client()

    result = client.get_gan_models(
        organizationId=args.orgid,
        workspaceId=args.workspaceid,
        modelId=args.modelid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_gan_models_upload(args):
    """Upload a GAN model."""
    client = get_client()
    org_id = require_arg(args, 'orgid', 'Organization ID')
    model_file = require_arg(args, 'modelfile', 'Model file path')

    result = client.upload_gan_model(
        organizationId=org_id,
        modelfile=model_file,
        name=args.name,
        description=args.description,
        flags=args.flags,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"modelId": result})


def cmd_gan_models_download(args):
    """Download a GAN model."""
    client = get_client()
    model_id = require_arg(args, 'modelid', 'Model ID')

    result = client.download_gan_model(
        modelId=model_id,
        localDir=args.outputdir
    )
    output_json({"downloadPath": result})


# =============================================================================
# UMAP
# =============================================================================

def cmd_umap_get(args):
    """Get UMAPs."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.get_umaps(
        workspaceId=workspace_id,
        umapId=args.umapid,
        datasetId=args.datasetid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_umap_create(args):
    """Create a UMAP."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    dataset_ids = parse_list_arg(require_arg(args, 'datasetids', 'Dataset IDs'))
    samples = [int(s) for s in parse_list_arg(require_arg(args, 'samples', 'Samples'))]

    result = client.create_umap(
        workspaceId=workspace_id,
        name=args.name,
        datasetIds=dataset_ids,
        samples=samples,
        description=args.description,
        seed=args.seed,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"umapId": result})


def cmd_umap_delete(args):
    """Delete a UMAP."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    umap_id = require_arg(args, 'umapid', 'UMAP ID')

    result = client.delete_umap(
        workspaceId=workspace_id,
        umapId=umap_id
    )
    output_json({"success": result})


# =============================================================================
# SERVERS (Editor)
# =============================================================================

def cmd_servers_get(args):
    """Get servers."""
    client = get_client()

    result = client.get_servers(
        organizationId=args.orgid,
        workspaceId=args.workspaceid,
        serverId=args.serverid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_servers_create(args):
    """Create a server."""
    client = get_client()

    result = client.create_server(
        organizationId=args.orgid,
        workspaceId=args.workspaceid,
        instance=args.instance,
        name=args.name
    )
    output_json({"serverId": result})


def cmd_servers_delete(args):
    """Delete a server."""
    client = get_client()
    server_id = require_arg(args, 'serverid', 'Server ID')

    result = client.delete_server(serverId=server_id)
    output_json({"success": result})


def cmd_servers_start(args):
    """Start a server."""
    client = get_client()
    server_id = require_arg(args, 'serverid', 'Server ID')

    result = client.start_server(serverId=server_id)
    output_json({"success": result})


def cmd_servers_stop(args):
    """Stop a server."""
    client = get_client()
    server_id = require_arg(args, 'serverid', 'Server ID')

    result = client.stop_server(serverId=server_id)
    output_json({"success": result})


# =============================================================================
# ML
# =============================================================================

def cmd_ml_architectures(args):
    """Get ML architectures."""
    client = get_client()

    result = client.get_ml_architectures(
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_ml_models_get(args):
    """Get ML models."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.get_ml_models(
        workspaceId=workspace_id,
        datasetId=args.datasetid,
        modelId=args.modelid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_ml_models_create(args):
    """Create an ML model training job."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')
    architecture_id = require_arg(args, 'architectureid', 'Architecture ID')
    parameters = require_arg(args, 'parameters', 'Parameters JSON')

    result = client.create_ml_model(
        workspaceId=workspace_id,
        datasetId=dataset_id,
        architectureId=architecture_id,
        name=args.name,
        parameters=parameters,
        description=args.description,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"modelId": result})


def cmd_ml_models_download(args):
    """Download an ML model."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    model_id = require_arg(args, 'modelid', 'Model ID')

    result = client.download_ml_model(
        workspaceId=workspace_id,
        modelId=model_id,
        checkpoint=args.checkpoint,
        localDir=args.outputdir
    )
    output_json({"downloadPath": result})


def cmd_ml_inferences_get(args):
    """Get ML inferences."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')

    result = client.get_ml_inferences(
        workspaceId=workspace_id,
        inferenceId=args.inferenceid,
        datasetId=args.datasetid,
        modelId=args.modelid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_ml_inferences_create(args):
    """Create an ML inference job."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    dataset_id = require_arg(args, 'datasetid', 'Dataset ID')
    model_id = require_arg(args, 'modelid', 'Model ID')

    result = client.create_ml_inference(
        workspaceId=workspace_id,
        datasetId=dataset_id,
        modelId=model_id,
        mapId=args.mapid,
        tags=parse_list_arg(args.tags) if args.tags else None
    )
    output_json({"inferenceId": result})


# =============================================================================
# INPAINT
# =============================================================================

def cmd_inpaint_get(args):
    """Get inpaint jobs."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')

    result = client.get_inpaints(
        volumeId=volume_id,
        inpaintId=args.inpaintid,
        limit=args.limit,
        cursor=args.cursor,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_inpaint_log(args):
    """Get inpaint job log."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')
    inpaint_id = require_arg(args, 'inpaintid', 'Inpaint ID')

    result = client.get_inpaint_log(
        volumeId=volume_id,
        inpaintId=inpaint_id,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


def cmd_inpaint_create(args):
    """Create an inpaint job."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')
    location = require_arg(args, 'location', 'Location')

    result = client.create_inpaint(
        volumeId=volume_id,
        location=location,
        files=parse_list_arg(args.files) if args.files else [],
        destination=args.destination,
        dilation=args.dilation or 5,
        inputType=args.inputtype or 'MASK',
        outputType=args.outputtype or 'PNG'
    )
    output_json({"inpaintId": result})


def cmd_inpaint_delete(args):
    """Delete an inpaint job."""
    client = get_client()
    volume_id = require_arg(args, 'volumeid', 'Volume ID')
    inpaint_id = require_arg(args, 'inpaintid', 'Inpaint ID')

    result = client.delete_inpaint(
        volumeId=volume_id,
        inpaintId=inpaint_id
    )
    output_json({"success": result})


# =============================================================================
# PREVIEW
# =============================================================================

def cmd_preview_get(args):
    """Get a preview job."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    preview_id = require_arg(args, 'previewid', 'Preview ID')

    if args.download:
        # Fetch preview with thumbnail and status fields for download
        fields = parse_list_arg(args.fields) if args.fields else None
        if fields is not None:
            for f in ['thumbnail', 'status']:
                if f not in fields:
                    fields.append(f)
        result = client.get_preview(
            workspaceId=workspace_id,
            previewId=preview_id,
            fields=fields
        )
        status = result.get('status') if isinstance(result, dict) else None
        thumbnail = result.get('thumbnail') if isinstance(result, dict) else None
        if status != 'success':
            output_error(f"Preview is not complete (status: {status}). Cannot download.", "PREVIEW_NOT_READY")
            sys.exit(1)
        if not thumbnail:
            output_error("Preview has no thumbnail URL available.", "NO_THUMBNAIL")
            sys.exit(1)
        from anatools.lib.download import download_file
        # Derive filename from URL or use default
        from urllib.parse import urlparse
        url_path = urlparse(thumbnail).path
        fname = os.path.basename(url_path) if os.path.basename(url_path) else 'preview.png'
        download_path = download_file(url=thumbnail, fname=fname, localDir=args.outputdir)
        output_json({"downloadPath": download_path})
    else:
        result = client.get_preview(
            workspaceId=workspace_id,
            previewId=preview_id,
            fields=parse_list_arg(args.fields) if args.fields else None
        )
        output_json(result)


def cmd_preview_create(args):
    """Create a preview job."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    graph_id = require_arg(args, 'graphid', 'Graph ID')

    result = client.create_preview(
        workspaceId=workspace_id,
        graphId=graph_id
    )
    output_json({"previewId": result})


def cmd_preview_log(args):
    """Get preview job log."""
    client = get_client()
    workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
    preview_id = require_arg(args, 'previewid', 'Preview ID')

    result = client.get_preview_log(
        workspaceId=workspace_id,
        previewId=preview_id,
        fields=parse_list_arg(args.fields) if args.fields else None
    )
    output_json(result)


# =============================================================================
# AGENTS
# =============================================================================

def cmd_agents_types(args):
    """Get available data types."""
    client = get_client()

    result = client.get_data_types()
    output_json(result)


def cmd_agents_fields(args):
    """Get fields for a data type."""
    client = get_client()
    data_type = require_arg(args, 'type', 'Data type')

    result = client.get_data_fields(type=data_type)
    output_json(result)


# =============================================================================
# RULES
# =============================================================================

def cmd_rules_organization(args):
    """Get organization rules."""
    client = get_client()

    result = client.get_organization_rules(organizationId=args.orgid)
    output_json({"rules": result})


def cmd_rules_workspace(args):
    """Get workspace rules."""
    client = get_client()

    result = client.get_workspace_rules(workspaceId=args.workspaceid)
    output_json({"rules": result})


def cmd_rules_service(args):
    """Get service rules."""
    client = get_client()
    service_id = require_arg(args, 'serviceid', 'Service ID')

    result = client.get_service_rules(serviceId=service_id)
    output_json({"rules": result})


def cmd_rules_user(args):
    """Get user rules."""
    client = get_client()

    result = client.get_user_rules()
    output_json({"rules": result})


def cmd_rules_edit_organization(args):
    """Edit organization rules."""
    client = get_client()
    rules = require_arg(args, 'rules', 'Rules')

    result = client.edit_organization_rules(
        organizationId=args.orgid,
        rules=rules
    )
    output_json({"success": result})


def cmd_rules_edit_workspace(args):
    """Edit workspace rules."""
    client = get_client()
    rules = require_arg(args, 'rules', 'Rules')

    result = client.edit_workspace_rules(
        workspaceId=args.workspaceid,
        rules=rules
    )
    output_json({"success": result})


def cmd_rules_edit_service(args):
    """Edit service rules."""
    client = get_client()
    service_id = require_arg(args, 'serviceid', 'Service ID')
    rules = require_arg(args, 'rules', 'Rules')

    result = client.edit_service_rules(
        serviceId=service_id,
        rules=rules
    )
    output_json({"success": result})


def cmd_rules_edit_user(args):
    """Edit user rules."""
    client = get_client()
    rules = require_arg(args, 'rules', 'Rules')

    result = client.edit_user_rules(rules=rules)
    output_json({"success": result})


# =============================================================================
# MAIN PARSER
# =============================================================================

def create_parser():
    """Create the argument parser with all subcommands."""
    parser = argparse.ArgumentParser(
        prog='renderedai',
        description='Rendered.ai Platform CLI - JSON output for automation and AI agents',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Environment Variables:
  RENDEREDAI_API_KEY        API key for authentication (required)
  RENDEREDAI_ENVIRONMENT    Environment: prod, test, dev (default: prod)
  RENDEREDAI_ENDPOINT       Custom API endpoint URL

Examples:
  renderedai workspaces get --orgid abc123
  renderedai datasets get --workspaceid xyz789 --limit 10
  renderedai volumes create --name "My Volume" --orgid abc123
  renderedai graphs get --workspaceid xyz789 --graphid graph123
"""
    )

    subparsers = parser.add_subparsers(dest='resource', help='Resource to manage')

    # -------------------------------------------------------------------------
    # WORKSPACES
    # -------------------------------------------------------------------------
    workspaces = subparsers.add_parser('workspaces', help='Manage workspaces')
    workspaces_sub = workspaces.add_subparsers(dest='action', help='Action')

    # workspaces get
    ws_get = workspaces_sub.add_parser('get', help='Get workspaces')
    ws_get.add_argument('--workspaceid', help='Filter by workspace ID')
    ws_get.add_argument('--orgid', help='Filter by organization ID')
    ws_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    ws_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    ws_get.add_argument('--fields', help='Comma-separated fields to return')
    ws_get.set_defaults(func=cmd_workspaces_get)

    # workspaces create
    ws_create = workspaces_sub.add_parser('create', help='Create a workspace')
    ws_create.add_argument('--name', required=True, help='Workspace name')
    ws_create.add_argument('--description', help='Description')
    ws_create.add_argument('--orgid', required=True, help='Organization ID')
    ws_create.add_argument('--channelids', help='Comma-separated channel IDs')
    ws_create.add_argument('--volumeids', help='Comma-separated volume IDs')
    ws_create.add_argument('--tags', help='Comma-separated tags')
    ws_create.set_defaults(func=cmd_workspaces_create)

    # workspaces edit
    ws_edit = workspaces_sub.add_parser('edit', help='Edit a workspace')
    ws_edit.add_argument('--workspaceid', required=True, help='Workspace ID')
    ws_edit.add_argument('--name', help='New name')
    ws_edit.add_argument('--description', help='New description')
    ws_edit.add_argument('--channelids', help='Comma-separated channel IDs')
    ws_edit.add_argument('--volumeids', help='Comma-separated volume IDs')
    ws_edit.add_argument('--tags', help='Comma-separated tags')
    ws_edit.set_defaults(func=cmd_workspaces_edit)

    # workspaces delete
    ws_delete = workspaces_sub.add_parser('delete', help='Delete a workspace')
    ws_delete.add_argument('--workspaceid', required=True, help='Workspace ID')
    ws_delete.set_defaults(func=cmd_workspaces_delete)

    # workspaces mount
    ws_mount = workspaces_sub.add_parser('mount', help='Mount a workspace to local filesystem')
    ws_mount.add_argument('--workspaceid', required=True, help='Workspace ID')
    ws_mount.add_argument('--path', help='Local path to mount to (default: current directory)')
    ws_mount.add_argument('--mountexec', choices=['goofys', 's3fs', 'mount-s3'], help='Mount executable')
    ws_mount.set_defaults(func=cmd_workspaces_mount)

    # workspaces unmount
    ws_unmount = workspaces_sub.add_parser('unmount', help='Unmount a workspace from local filesystem')
    ws_unmount.add_argument('--workspaceid', required=True, help='Workspace ID')
    ws_unmount.set_defaults(func=cmd_workspaces_unmount)

    # -------------------------------------------------------------------------
    # ORGANIZATIONS
    # -------------------------------------------------------------------------
    organizations = subparsers.add_parser('organizations', help='Manage organizations')
    organizations_sub = organizations.add_subparsers(dest='action', help='Action')

    # organizations get
    org_get = organizations_sub.add_parser('get', help='Get organizations')
    org_get.add_argument('--orgid', help='Filter by organization ID')
    org_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    org_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    org_get.add_argument('--fields', help='Comma-separated fields to return')
    org_get.set_defaults(func=cmd_organizations_get)

    # -------------------------------------------------------------------------
    # MEMBERS
    # -------------------------------------------------------------------------
    members = subparsers.add_parser('members', help='Manage organization members')
    members_sub = members.add_subparsers(dest='action', help='Action')

    # members get
    members_get = members_sub.add_parser('get', help='Get organization members')
    members_get.add_argument('--orgid', required=True, help='Organization ID')
    members_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    members_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    members_get.add_argument('--fields', help='Comma-separated fields to return')
    members_get.set_defaults(func=cmd_members_get)

    # -------------------------------------------------------------------------
    # DATASETS
    # -------------------------------------------------------------------------
    datasets = subparsers.add_parser('datasets', help='Manage datasets')
    datasets_sub = datasets.add_subparsers(dest='action', help='Action')

    # datasets get
    ds_get = datasets_sub.add_parser('get', help='Get datasets')
    ds_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_get.add_argument('--datasetid', help='Filter by dataset ID')
    ds_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    ds_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    ds_get.add_argument('--fields', help='Comma-separated fields to return')
    ds_get.set_defaults(func=cmd_datasets_get)

    # datasets create
    ds_create = datasets_sub.add_parser('create', help='Create a dataset')
    ds_create.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_create.add_argument('--name', required=True, help='Dataset name')
    ds_create.add_argument('--graphid', required=True, help='Graph ID')
    ds_create.add_argument('--description', help='Description')
    ds_create.add_argument('--runs', type=int, default=1, help='Number of runs')
    ds_create.add_argument('--seed', type=int, default=1, help='Seed')
    ds_create.add_argument('--priority', type=int, default=1, help='Priority (1-3)')
    ds_create.add_argument('--tags', help='Comma-separated tags')
    ds_create.set_defaults(func=cmd_datasets_create)

    # datasets edit
    ds_edit = datasets_sub.add_parser('edit', help='Edit a dataset')
    ds_edit.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_edit.add_argument('--datasetid', required=True, help='Dataset ID')
    ds_edit.add_argument('--name', help='New name')
    ds_edit.add_argument('--description', help='New description')
    ds_edit.add_argument('--tags', help='Comma-separated tags')
    ds_edit.add_argument('--pause', action='store_true', help='Pause the job')
    ds_edit.add_argument('--priority', type=int, help='Priority (1-3)')
    ds_edit.set_defaults(func=cmd_datasets_edit)

    # datasets delete
    ds_delete = datasets_sub.add_parser('delete', help='Delete a dataset')
    ds_delete.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_delete.add_argument('--datasetid', required=True, help='Dataset ID')
    ds_delete.set_defaults(func=cmd_datasets_delete)

    # datasets cancel
    ds_cancel = datasets_sub.add_parser('cancel', help='Cancel a running job')
    ds_cancel.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_cancel.add_argument('--datasetid', required=True, help='Dataset ID')
    ds_cancel.set_defaults(func=cmd_datasets_cancel)

    # datasets download
    ds_download = datasets_sub.add_parser('download', help='Download a dataset or a single file from a dataset')
    ds_download.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_download.add_argument('--datasetid', required=True, help='Dataset ID')
    ds_download.add_argument('--filepath', help='Relative path to a specific file within the dataset (e.g., "images/000000-1-image.png"). If not provided, downloads the entire dataset.')
    ds_download.add_argument('--outputdir', help='Output directory')
    ds_download.add_argument('--extract', action='store_true', help='Extract the downloaded zip file and remove the archive')
    ds_download.set_defaults(func=cmd_datasets_download)

    # datasets upload
    ds_upload = datasets_sub.add_parser('upload', help='Upload a dataset')
    ds_upload.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_upload.add_argument('--file', required=True, help='File to upload')
    ds_upload.add_argument('--description', help='Description')
    ds_upload.add_argument('--tags', help='Comma-separated tags')
    ds_upload.set_defaults(func=cmd_datasets_upload)

    # datasets runs
    ds_runs = datasets_sub.add_parser('runs', help='Get dataset runs')
    ds_runs.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_runs.add_argument('--datasetid', required=True, help='Dataset ID')
    ds_runs.add_argument('--state', help='Filter by state')
    ds_runs.add_argument('--fields', help='Comma-separated fields to return')
    ds_runs.set_defaults(func=cmd_datasets_runs)

    # datasets log
    ds_log = datasets_sub.add_parser('log', help='Get dataset run log')
    ds_log.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_log.add_argument('--datasetid', required=True, help='Dataset ID')
    ds_log.add_argument('--runid', required=True, help='Run ID')
    ds_log.add_argument('--fields', help='Comma-separated fields to return')
    ds_log.set_defaults(func=cmd_datasets_log)

    # datasets files
    ds_files = datasets_sub.add_parser('files', help='Get dataset files')
    ds_files.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_files.add_argument('--datasetid', required=True, help='Dataset ID')
    ds_files.add_argument('--path', help='Path within dataset')
    ds_files.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    ds_files.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    ds_files.set_defaults(func=cmd_datasets_files)

    # datasets jobs
    ds_jobs = datasets_sub.add_parser('jobs', help='Get dataset jobs')
    ds_jobs.add_argument('--workspaceid', help='Workspace ID')
    ds_jobs.add_argument('--orgid', help='Organization ID')
    ds_jobs.add_argument('--datasetid', help='Filter by dataset ID')
    ds_jobs.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    ds_jobs.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    ds_jobs.add_argument('--fields', help='Comma-separated fields to return')
    ds_jobs.set_defaults(func=cmd_datasets_jobs)

    # datasets create-mixed
    ds_create_mixed = datasets_sub.add_parser('create-mixed', help='Create a mixed dataset')
    ds_create_mixed.add_argument('--workspaceid', required=True, help='Workspace ID')
    ds_create_mixed.add_argument('--name', required=True, help='Dataset name')
    ds_create_mixed.add_argument('--parameters', required=True, help='JSON parameters: {"datasetId1": {"samples": N, "classes": [...]}, ...}')
    ds_create_mixed.add_argument('--description', help='Description')
    ds_create_mixed.add_argument('--seed', type=int, help='Seed')
    ds_create_mixed.add_argument('--tags', help='Comma-separated tags')
    ds_create_mixed.set_defaults(func=cmd_datasets_create_mixed)

    # -------------------------------------------------------------------------
    # VOLUMES
    # -------------------------------------------------------------------------
    volumes = subparsers.add_parser('volumes', help='Manage volumes')
    volumes_sub = volumes.add_subparsers(dest='action', help='Action')

    # volumes get
    vol_get = volumes_sub.add_parser('get', help='Get volumes')
    vol_get.add_argument('--volumeid', help='Filter by volume ID')
    vol_get.add_argument('--workspaceid', help='Filter by workspace ID')
    vol_get.add_argument('--orgid', help='Filter by organization ID')
    vol_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    vol_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    vol_get.add_argument('--fields', help='Comma-separated fields to return')
    vol_get.set_defaults(func=cmd_volumes_get)

    # volumes create
    vol_create = volumes_sub.add_parser('create', help='Create a volume')
    vol_create.add_argument('--name', required=True, help='Volume name')
    vol_create.add_argument('--description', help='Description')
    vol_create.add_argument('--orgid', required=True, help='Organization ID')
    vol_create.add_argument('--permission', choices=['read', 'write', 'view'], help='Permission')
    vol_create.add_argument('--tags', help='Comma-separated tags')
    vol_create.set_defaults(func=cmd_volumes_create)

    # volumes edit
    vol_edit = volumes_sub.add_parser('edit', help='Edit a volume')
    vol_edit.add_argument('--volumeid', required=True, help='Volume ID')
    vol_edit.add_argument('--name', help='New name')
    vol_edit.add_argument('--description', help='New description')
    vol_edit.add_argument('--permission', choices=['read', 'write', 'view'], help='Permission')
    vol_edit.add_argument('--tags', help='Comma-separated tags')
    vol_edit.set_defaults(func=cmd_volumes_edit)

    # volumes delete
    vol_delete = volumes_sub.add_parser('delete', help='Delete a volume')
    vol_delete.add_argument('--volumeid', required=True, help='Volume ID')
    vol_delete.set_defaults(func=cmd_volumes_delete)

    # volumes mount
    vol_mount = volumes_sub.add_parser('mount', help='Mount a volume to local filesystem')
    vol_mount.add_argument('--volumeid', required=True, help='Volume ID')
    vol_mount.add_argument('--path', help='Local path to mount to (default: current directory)')
    vol_mount.add_argument('--mountexec', choices=['goofys', 's3fs', 'mount-s3'], help='Mount executable')
    vol_mount.set_defaults(func=cmd_volumes_mount)

    # volumes unmount
    vol_unmount = volumes_sub.add_parser('unmount', help='Unmount a volume from local filesystem')
    vol_unmount.add_argument('--volumeid', required=True, help='Volume ID')
    vol_unmount.set_defaults(func=cmd_volumes_unmount)

    # -------------------------------------------------------------------------
    # VOLUME-DATA
    # -------------------------------------------------------------------------
    volume_data = subparsers.add_parser('volume-data', help='Manage volume data')
    volume_data_sub = volume_data.add_subparsers(dest='action', help='Action')

    # volume-data get
    vd_get = volume_data_sub.add_parser('get', help='Get volume data')
    vd_get.add_argument('--volumeid', required=True, help='Volume ID')
    vd_get.add_argument('--dir', help='Directory path')
    vd_get.add_argument('--files', help='Comma-separated file paths')
    vd_get.add_argument('--recursive', action='store_true', help='Recursive listing')
    vd_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    vd_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    vd_get.set_defaults(func=cmd_volume_data_get)

    # volume-data upload
    vd_upload = volume_data_sub.add_parser('upload', help='Upload data to a volume')
    vd_upload.add_argument('--volumeid', required=True, help='Volume ID')
    vd_upload.add_argument('--localdir', help='Local directory')
    vd_upload.add_argument('--files', help='Comma-separated files to upload')
    vd_upload.add_argument('--destdir', help='Destination directory in volume')
    vd_upload.add_argument('--sync', action='store_true', help='Sync mode')
    vd_upload.set_defaults(func=cmd_volume_data_upload)

    # volume-data download
    vd_download = volume_data_sub.add_parser('download', help='Download data from a volume')
    vd_download.add_argument('--volumeid', required=True, help='Volume ID')
    vd_download.add_argument('--outputdir', help='Output directory')
    vd_download.add_argument('--files', help='Comma-separated files to download')
    vd_download.add_argument('--recursive', action='store_true', default=True, help='Recursive download')
    vd_download.add_argument('--sync', action='store_true', help='Sync mode')
    vd_download.set_defaults(func=cmd_volume_data_download)

    # volume-data delete
    vd_delete = volume_data_sub.add_parser('delete', help='Delete data from a volume')
    vd_delete.add_argument('--volumeid', required=True, help='Volume ID')
    vd_delete.add_argument('--files', required=True, help='Comma-separated files to delete')
    vd_delete.set_defaults(func=cmd_volume_data_delete)

    # volume-data search
    vd_search = volume_data_sub.add_parser('search', help='Search a volume')
    vd_search.add_argument('--volumeid', required=True, help='Volume ID')
    vd_search.add_argument('--dir', help='Directory to search')
    vd_search.add_argument('--recursive', action='store_true', default=True, help='Recursive search')
    vd_search.add_argument('--keywords', help='Comma-separated keywords')
    vd_search.add_argument('--formats', help='Comma-separated file formats (e.g., png,jpg)')
    vd_search.add_argument('--types', help='Comma-separated file types (e.g., Image,3D)')
    vd_search.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    vd_search.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    vd_search.set_defaults(func=cmd_volume_data_search)

    # -------------------------------------------------------------------------
    # GRAPHS
    # -------------------------------------------------------------------------
    graphs = subparsers.add_parser('graphs', help='Manage graphs')
    graphs_sub = graphs.add_subparsers(dest='action', help='Action')

    # graphs get
    gr_get = graphs_sub.add_parser('get', help='Get graphs')
    gr_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    gr_get.add_argument('--graphid', help='Filter by graph ID')
    gr_get.add_argument('--staged', action='store_true', help='Only staged graphs')
    gr_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    gr_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    gr_get.add_argument('--fields', help='Comma-separated fields to return')
    gr_get.set_defaults(func=cmd_graphs_get)

    # graphs create
    gr_create = graphs_sub.add_parser('create', help='Create a graph (editable)')
    gr_create.add_argument('--workspaceid', required=True, help='Workspace ID')
    gr_create.add_argument('--file', required=True, help='Graph file (JSON or YAML)')
    gr_create.add_argument('--channelid', required=True, help='Channel ID')
    gr_create.add_argument('--name', required=True, help='Graph name')
    gr_create.add_argument('--description', help='Description')
    gr_create.set_defaults(func=cmd_graphs_create)

    # graphs edit
    gr_edit = graphs_sub.add_parser('edit', help='Edit a graph (metadata and/or contents)')
    gr_edit.add_argument('--workspaceid', required=True, help='Workspace ID')
    gr_edit.add_argument('--graphid', required=True, help='Graph ID')
    gr_edit.add_argument('--name', help='New name')
    gr_edit.add_argument('--description', help='New description')
    gr_edit.add_argument('--tags', help='Comma-separated tags')
    gr_edit.add_argument('--file', help='Graph file (JSON or YAML) to upload as new graph contents')
    gr_edit.set_defaults(func=cmd_graphs_edit)

    # graphs delete
    gr_delete = graphs_sub.add_parser('delete', help='Delete a graph')
    gr_delete.add_argument('--workspaceid', required=True, help='Workspace ID')
    gr_delete.add_argument('--graphid', required=True, help='Graph ID')
    gr_delete.set_defaults(func=cmd_graphs_delete)

    # graphs download
    gr_download = graphs_sub.add_parser('download', help='Download a graph to a file')
    gr_download.add_argument('--workspaceid', required=True, help='Workspace ID')
    gr_download.add_argument('--graphid', required=True, help='Graph ID')
    gr_download.add_argument('--outputfile', help='Output file path (default: <graphid>.yaml)')
    gr_download.set_defaults(func=cmd_graphs_download)

    # graphs stage
    gr_stage = graphs_sub.add_parser('stage', help='Stage an existing graph (creates read-only copy)')
    gr_stage.add_argument('--workspaceid', required=True, help='Workspace ID')
    gr_stage.add_argument('--graphid', required=True, help='Graph ID to stage')
    gr_stage.add_argument('--name', help='Name for staged graph (default: original-name-staged)')
    gr_stage.add_argument('--description', help='Description for staged graph')
    gr_stage.set_defaults(func=cmd_graphs_stage)

    # -------------------------------------------------------------------------
    # GRAPH-EDITOR
    # -------------------------------------------------------------------------
    graph_editor = subparsers.add_parser('graph-editor', help='Graph editor integration')
    graph_editor_sub = graph_editor.add_subparsers(dest='action', help='Action')

    # graph-editor open
    ge_open = graph_editor_sub.add_parser('open', help='Open graph in editor (download from platform or use local files)')
    # Option 1: Download from platform
    ge_open.add_argument('--workspaceid', help='Workspace ID (use with --graphid to download from platform)')
    ge_open.add_argument('--graphid', help='Graph ID (use with --workspaceid to download from platform)')
    ge_open.add_argument('--outputdir', help='Output directory for downloaded files (default: current directory)')
    # Option 2: Use local files
    ge_open.add_argument('--graphfile', help='Path to local graph file (use with --schemafile)')
    ge_open.add_argument('--schemafile', help='Path to local schema file (use with --graphfile)')
    ge_open.set_defaults(func=cmd_graph_editor_open)

    # graph-editor edit-node
    ge_edit_node = graph_editor_sub.add_parser('edit-node', help='Edit a node\'s values in a local graph file')
    ge_edit_node.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_edit_node.add_argument('--node', required=True, help='Name of the node to edit')
    ge_edit_node.add_argument('--values', required=True, help='JSON object with values to update (e.g., \'{"param": "value"}\')')
    ge_edit_node.set_defaults(func=cmd_graph_editor_edit_node)

    # graph-editor add-node
    ge_add_node = graph_editor_sub.add_parser('add-node', help='Add a new node to a local graph file')
    ge_add_node.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_add_node.add_argument('--nodeclass', required=True, help='Node class to instantiate')
    ge_add_node.add_argument('--name', help='Custom node name (default: nodeClass_N)')
    ge_add_node.add_argument('--values', help='JSON object with initial values')
    ge_add_node.add_argument('--location', help='Position as {"x": N, "y": N} or [x, y]')
    ge_add_node.add_argument('--color', help='Node color as hex (default: #808080)')
    ge_add_node.add_argument('--tooltip', help='Node tooltip/description')
    ge_add_node.set_defaults(func=cmd_graph_editor_add_node)

    # graph-editor add-link
    ge_add_link = graph_editor_sub.add_parser('add-link', help='Add a link between two nodes')
    ge_add_link.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_add_link.add_argument('--source', required=True, help='Source node name')
    ge_add_link.add_argument('--output', required=True, help='Output port name on source node')
    ge_add_link.add_argument('--target', required=True, help='Target node name')
    ge_add_link.add_argument('--input', required=True, help='Input port name on target node')
    ge_add_link.set_defaults(func=cmd_graph_editor_add_link)

    # graph-editor remove-node
    ge_remove_node = graph_editor_sub.add_parser('remove-node', help='Remove a node and its links from a graph')
    ge_remove_node.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_remove_node.add_argument('--node', required=True, help='Name of the node to remove')
    ge_remove_node.set_defaults(func=cmd_graph_editor_remove_node)

    # graph-editor remove-link
    ge_remove_link = graph_editor_sub.add_parser('remove-link', help='Remove a link between two nodes')
    ge_remove_link.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_remove_link.add_argument('--source', required=True, help='Source node name')
    ge_remove_link.add_argument('--output', required=True, help='Output port name on source node')
    ge_remove_link.add_argument('--target', required=True, help='Target node name')
    ge_remove_link.add_argument('--input', required=True, help='Input port name on target node')
    ge_remove_link.set_defaults(func=cmd_graph_editor_remove_link)

    # graph-editor add-volume-file
    ge_add_vol_file = graph_editor_sub.add_parser('add-volume-file', help='Add a VolumeFile node referencing a file in a volume')
    ge_add_vol_file.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_add_vol_file.add_argument('--volumeid', required=True, help='Volume UUID')
    ge_add_vol_file.add_argument('--path', required=True, help='File path within the volume (e.g., /models/model.blend)')
    ge_add_vol_file.add_argument('--name', help='Custom node name (default: VolumeFile_N)')
    ge_add_vol_file.add_argument('--volumename', help='Volume display name for tooltip (default: uses volumeid)')
    ge_add_vol_file.add_argument('--location', help='Position as {"x": N, "y": N} or [x, y]')
    ge_add_vol_file.set_defaults(func=cmd_graph_editor_add_volume_file)

    # graph-editor add-volume-directory
    ge_add_vol_dir = graph_editor_sub.add_parser('add-volume-directory', help='Add a VolumeDirectory node referencing a directory in a volume')
    ge_add_vol_dir.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_add_vol_dir.add_argument('--volumeid', required=True, help='Volume UUID')
    ge_add_vol_dir.add_argument('--path', help='Directory path within the volume (default: /)')
    ge_add_vol_dir.add_argument('--name', help='Custom node name (default: VolumeDirectory_N)')
    ge_add_vol_dir.add_argument('--volumename', help='Volume display name for tooltip (default: uses volumeid)')
    ge_add_vol_dir.add_argument('--location', help='Position as {"x": N, "y": N} or [x, y]')
    ge_add_vol_dir.set_defaults(func=cmd_graph_editor_add_volume_directory)

    # graph-editor list-nodes
    ge_list_nodes = graph_editor_sub.add_parser('list-nodes', help='List all nodes in a local graph file')
    ge_list_nodes.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_list_nodes.add_argument('--verbose', '-v', action='store_true', help='Include link details')
    ge_list_nodes.set_defaults(func=cmd_graph_editor_list_nodes)

    # graph-editor get-node
    ge_get_node = graph_editor_sub.add_parser('get-node', help='Get detailed info about a specific node')
    ge_get_node.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_get_node.add_argument('--node', required=True, help='Name of the node to inspect')
    ge_get_node.set_defaults(func=cmd_graph_editor_get_node)

    # graph-editor move-node
    ge_move_node = graph_editor_sub.add_parser('move-node', help='Move a node to a new location')
    ge_move_node.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_move_node.add_argument('--node', required=True, help='Name of the node to move')
    ge_move_node.add_argument('--location', required=True, help='New position as {"x": N, "y": N} or [x, y]')
    ge_move_node.set_defaults(func=cmd_graph_editor_move_node)

    # graph-editor clone-node
    ge_clone_node = graph_editor_sub.add_parser('clone-node', help='Clone an existing node')
    ge_clone_node.add_argument('--file', required=True, help='Path to graph file (.yaml, .yml, or .json)')
    ge_clone_node.add_argument('--source', required=True, help='Name of the node to clone')
    ge_clone_node.add_argument('--name', help='Name for the cloned node (default: nodeClass_N)')
    ge_clone_node.add_argument('--location', help='Position as {"x": N, "y": N} or [x, y] (default: offset from source)')
    ge_clone_node.set_defaults(func=cmd_graph_editor_clone_node)

    # graph-editor status
    ge_status = graph_editor_sub.add_parser('status', help='Get graph editor status and validation errors')
    ge_status.add_argument('--file', help='Filter to a specific graph file path')
    ge_status.set_defaults(func=cmd_graph_editor_status)

    # -------------------------------------------------------------------------
    # DATASET-VIEWER
    # -------------------------------------------------------------------------
    dataset_viewer = subparsers.add_parser('dataset-viewer', help='Dataset annotation viewer integration')
    dv_sub = dataset_viewer.add_subparsers(dest='action', help='Action')

    # dataset-viewer open
    dv_open = dv_sub.add_parser('open', help='Open a dataset folder in the Annotation Viewer')
    dv_open.add_argument('--path', required=True, help='Path to dataset directory (must contain images/ subdirectory)')
    dv_open.add_argument('--index', type=int, default=0, help='Initial image index (default: 0)')
    dv_open.set_defaults(func=cmd_dataset_viewer_open)

    # dataset-viewer next
    dv_next = dv_sub.add_parser('next', help='Navigate to the next image')
    dv_next.set_defaults(func=cmd_dataset_viewer_next)

    # dataset-viewer prev
    dv_prev = dv_sub.add_parser('prev', help='Navigate to the previous image')
    dv_prev.set_defaults(func=cmd_dataset_viewer_prev)

    # dataset-viewer goto
    dv_goto = dv_sub.add_parser('goto', help='Navigate to a specific image by index or name')
    dv_goto.add_argument('--index', type=int, help='Image index (0-based)')
    dv_goto.add_argument('--name', help='Image filename (or partial match)')
    dv_goto.set_defaults(func=cmd_dataset_viewer_goto)

    # dataset-viewer annotations
    dv_annotations = dv_sub.add_parser('annotations', help='Set which annotation types are displayed')
    dv_annotations.add_argument('--types', required=True, help='Comma-separated annotation types: bbox,bbox3d,segmentation,centroid,mask')
    dv_annotations.set_defaults(func=cmd_dataset_viewer_annotations)

    # dataset-viewer filter
    dv_filter = dv_sub.add_parser('filter', help='Filter visible objects by type (omit --types to show all)')
    dv_filter.add_argument('--types', help='Comma-separated object type names to show (omit to clear filter)')
    dv_filter.set_defaults(func=cmd_dataset_viewer_filter)

    # dataset-viewer status
    dv_status = dv_sub.add_parser('status', help='Get dataset viewer status')
    dv_status.add_argument('--path', help='Filter to a specific dataset path')
    dv_status.set_defaults(func=cmd_dataset_viewer_status)

    # -------------------------------------------------------------------------
    # CHANNELS
    # -------------------------------------------------------------------------
    channels = subparsers.add_parser('channels', help='Manage channels')
    channels_sub = channels.add_subparsers(dest='action', help='Action')

    # channels get
    ch_get = channels_sub.add_parser('get', help='Get channels')
    ch_get.add_argument('--workspaceid', help='Filter by workspace ID')
    ch_get.add_argument('--orgid', help='Filter by organization ID')
    ch_get.add_argument('--channelid', help='Filter by channel ID')
    ch_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    ch_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    ch_get.add_argument('--fields', help='Comma-separated fields to return')
    ch_get.set_defaults(func=cmd_channels_get)

    # channels schema
    ch_schema = channels_sub.add_parser('schema', help='Get channel schema')
    ch_schema.add_argument('--channelid', required=True, help='Channel ID')
    ch_schema.add_argument('--fields', help='Comma-separated fields to return')
    ch_schema.add_argument('--category', help='Filter by category (e.g., Objects, Backgrounds, Sensors)')
    ch_schema.add_argument('--subcategory', help='Filter by subcategory (e.g., Aircraft, Vehicles, Ships)')
    ch_schema.add_argument('--search', help='Case-insensitive search across name, category, subcategory, tooltip, and descriptions')
    ch_schema.add_argument('--names-only', action='store_true', help='Output only node names (sorted)')
    ch_schema.add_argument('--list-categories', action='store_true', help='List all available categories and subcategories')
    ch_schema.set_defaults(func=cmd_channels_schema)

    # channels nodes
    ch_nodes = channels_sub.add_parser('nodes', help='Get node documentation')
    ch_nodes.add_argument('--channelid', required=True, help='Channel ID')
    ch_nodes.add_argument('--node', required=True, help='Node name')
    ch_nodes.add_argument('--fields', help='Comma-separated fields to return')
    ch_nodes.set_defaults(func=cmd_channels_nodes)

    # channels docs
    ch_docs = channels_sub.add_parser('docs', help='Get channel documentation')
    ch_docs.add_argument('--channelid', required=True, help='Channel ID')
    ch_docs.set_defaults(func=cmd_channels_docs)

    # channels get-default-graph
    ch_get_default = channels_sub.add_parser('get-default-graph', help='Get the default graph for a channel')
    ch_get_default.add_argument('--channelid', required=True, help='Channel ID')
    ch_get_default.add_argument('--outputfile', help='Output file path (default: default.yaml)')
    ch_get_default.set_defaults(func=cmd_channels_get_default_graph)

    # channels set-default-graph
    ch_set_default = channels_sub.add_parser('set-default-graph', help='Set the default graph for a channel')
    ch_set_default.add_argument('--graphid', required=True, help='Graph ID')
    ch_set_default.add_argument('--workspaceid', help='Workspace ID')
    ch_set_default.set_defaults(func=cmd_channels_set_default_graph)

    # -------------------------------------------------------------------------
    # SERVICES
    # -------------------------------------------------------------------------
    services = subparsers.add_parser('services', help='Manage services')
    services_sub = services.add_subparsers(dest='action', help='Action')

    # services get
    svc_get = services_sub.add_parser('get', help='Get services')
    svc_get.add_argument('--workspaceid', help='Filter by workspace ID')
    svc_get.add_argument('--orgid', help='Filter by organization ID')
    svc_get.add_argument('--serviceid', help='Filter by service ID')
    svc_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    svc_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    svc_get.add_argument('--fields', help='Comma-separated fields to return')
    svc_get.set_defaults(func=cmd_services_get)

    # services create
    svc_create = services_sub.add_parser('create', help='Create a service')
    svc_create.add_argument('--name', required=True, help='Service name')
    svc_create.add_argument('--description', help='Description')
    svc_create.add_argument('--orgid', required=True, help='Organization ID')
    svc_create.add_argument('--type', default='custom', help='Service type ID')
    svc_create.add_argument('--volumes', help='Comma-separated volume IDs')
    svc_create.add_argument('--instance', help='AWS instance type')
    svc_create.add_argument('--tags', help='Comma-separated tags')
    svc_create.set_defaults(func=cmd_services_create)

    # services edit
    svc_edit = services_sub.add_parser('edit', help='Edit a service')
    svc_edit.add_argument('--serviceid', required=True, help='Service ID')
    svc_edit.add_argument('--name', help='New name')
    svc_edit.add_argument('--description', help='New description')
    svc_edit.add_argument('--volumes', help='Comma-separated volume IDs')
    svc_edit.add_argument('--instance', help='AWS instance type')
    svc_edit.add_argument('--tags', help='Comma-separated tags')
    svc_edit.set_defaults(func=cmd_services_edit)

    # services delete
    svc_delete = services_sub.add_parser('delete', help='Delete a service')
    svc_delete.add_argument('--serviceid', required=True, help='Service ID')
    svc_delete.set_defaults(func=cmd_services_delete)

    # -------------------------------------------------------------------------
    # SERVICE-JOBS
    # -------------------------------------------------------------------------
    service_jobs = subparsers.add_parser('service-jobs', help='Manage service jobs')
    service_jobs_sub = service_jobs.add_subparsers(dest='action', help='Action')

    # service-jobs get
    svc_jobs_get = service_jobs_sub.add_parser('get', help='Get service jobs')
    svc_jobs_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    svc_jobs_get.add_argument('--jobid', help='Filter by job ID')
    svc_jobs_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    svc_jobs_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    svc_jobs_get.add_argument('--fields', help='Comma-separated fields to return')
    svc_jobs_get.set_defaults(func=cmd_services_jobs)

    # service-jobs delete
    svc_jobs_delete = service_jobs_sub.add_parser('delete', help='Delete a service job')
    svc_jobs_delete.add_argument('--workspaceid', required=True, help='Workspace ID')
    svc_jobs_delete.add_argument('--jobid', required=True, help='Job ID')
    svc_jobs_delete.set_defaults(func=cmd_services_delete_job)

    # -------------------------------------------------------------------------
    # API-KEYS
    # -------------------------------------------------------------------------
    api_keys = subparsers.add_parser('api-keys', help='Manage API keys')
    api_keys_sub = api_keys.add_subparsers(dest='action', help='Action')

    # api-keys get
    ak_get = api_keys_sub.add_parser('get', help='Get API keys')
    ak_get.set_defaults(func=cmd_api_keys_get)

    # api-keys create
    ak_create = api_keys_sub.add_parser('create', help='Create an API key')
    ak_create.add_argument('--name', required=True, help='API key name')
    ak_create.add_argument('--scope', required=True, choices=['user', 'organization', 'workspace'], help='Scope')
    ak_create.add_argument('--orgid', help='Organization ID (for org scope)')
    ak_create.add_argument('--workspaceid', help='Workspace ID (for workspace scope)')
    ak_create.add_argument('--expires', help='Expiration date (ISO format)')
    ak_create.set_defaults(func=cmd_api_keys_create)

    # api-keys delete
    ak_delete = api_keys_sub.add_parser('delete', help='Delete an API key')
    ak_delete.add_argument('--apikeyid', required=True, help='API key ID')
    ak_delete.set_defaults(func=cmd_api_keys_delete)

    # -------------------------------------------------------------------------
    # ANALYTICS
    # -------------------------------------------------------------------------
    analytics = subparsers.add_parser('analytics', help='Manage analytics')
    analytics_sub = analytics.add_subparsers(dest='action', help='Action')

    # analytics get
    an_get = analytics_sub.add_parser('get', help='Get analytics')
    an_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    an_get.add_argument('--datasetid', help='Dataset ID')
    an_get.add_argument('--analyticsid', help='Analytics ID')
    an_get.set_defaults(func=cmd_analytics_get)

    # analytics create
    an_create = analytics_sub.add_parser('create', help='Create analytics')
    an_create.add_argument('--workspaceid', required=True, help='Workspace ID')
    an_create.add_argument('--datasetid', required=True, help='Dataset ID')
    an_create.add_argument('--type', required=True, help='Analytics type')
    an_create.set_defaults(func=cmd_analytics_create)

    # analytics delete
    an_delete = analytics_sub.add_parser('delete', help='Delete analytics')
    an_delete.add_argument('--workspaceid', required=True, help='Workspace ID')
    an_delete.add_argument('--analyticsid', required=True, help='Analytics ID')
    an_delete.set_defaults(func=cmd_analytics_delete)

    # analytics types
    an_types = analytics_sub.add_parser('types', help='Get analytics types')
    an_types.set_defaults(func=cmd_analytics_types)

    # analytics download
    an_download = analytics_sub.add_parser('download', help='Download analytics results')
    an_download.add_argument('--workspaceid', required=True, help='Workspace ID')
    an_download.add_argument('--analyticsid', required=True, help='Analytics ID')
    an_download.set_defaults(func=cmd_analytics_download)

    # analytics edit
    an_edit = analytics_sub.add_parser('edit', help='Edit analytics tags')
    an_edit.add_argument('--workspaceid', required=True, help='Workspace ID')
    an_edit.add_argument('--analyticsid', required=True, help='Analytics ID')
    an_edit.add_argument('--tags', required=True, help='Comma-separated tags')
    an_edit.set_defaults(func=cmd_analytics_edit)

    # -------------------------------------------------------------------------
    # ANNOTATIONS
    # -------------------------------------------------------------------------
    annotations = subparsers.add_parser('annotations', help='Manage annotations')
    annotations_sub = annotations.add_subparsers(dest='action', help='Action')

    # annotations get
    ann_get = annotations_sub.add_parser('get', help='Get annotations')
    ann_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    ann_get.add_argument('--datasetid', help='Dataset ID')
    ann_get.add_argument('--annotationid', help='Annotation ID')
    ann_get.set_defaults(func=cmd_annotations_get)

    # annotations create
    ann_create = annotations_sub.add_parser('create', help='Create an annotation')
    ann_create.add_argument('--workspaceid', required=True, help='Workspace ID')
    ann_create.add_argument('--datasetid', required=True, help='Dataset ID')
    ann_create.add_argument('--format', required=True, help='Annotation format')
    ann_create.add_argument('--mapid', help='Annotation map ID')
    ann_create.set_defaults(func=cmd_annotations_create)

    # annotations download
    ann_download = annotations_sub.add_parser('download', help='Download an annotation')
    ann_download.add_argument('--workspaceid', required=True, help='Workspace ID')
    ann_download.add_argument('--annotationid', required=True, help='Annotation ID')
    ann_download.add_argument('--extract', action='store_true', help='Extract the downloaded zip file and remove the archive')
    ann_download.set_defaults(func=cmd_annotations_download)

    # annotations formats
    ann_formats = annotations_sub.add_parser('formats', help='Get annotation formats')
    ann_formats.set_defaults(func=cmd_annotations_formats)

    # annotations delete
    ann_delete = annotations_sub.add_parser('delete', help='Delete an annotation')
    ann_delete.add_argument('--workspaceid', required=True, help='Workspace ID')
    ann_delete.add_argument('--annotationid', required=True, help='Annotation ID')
    ann_delete.set_defaults(func=cmd_annotations_delete)

    # annotations edit
    ann_edit = annotations_sub.add_parser('edit', help='Edit annotation tags')
    ann_edit.add_argument('--workspaceid', required=True, help='Workspace ID')
    ann_edit.add_argument('--annotationid', required=True, help='Annotation ID')
    ann_edit.add_argument('--tags', required=True, help='Comma-separated tags')
    ann_edit.set_defaults(func=cmd_annotations_edit)

    # annotations view
    ann_view = annotations_sub.add_parser('view', help='Generate image with annotations overlayed')
    ann_view.add_argument('--imagepath', required=True, help='Path to the image file in the dataset directory')
    ann_view.add_argument('--outdir', required=True, help='Output directory for the annotated image')
    ann_view.add_argument('--drawtype', default='box_2d', help='Annotation type(s) to draw: box_2d, box_3d, segmentation (comma-separated for multiple)')
    ann_view.add_argument('--objectids', help='Comma-separated list of object IDs to annotate (filter)')
    ann_view.add_argument('--objecttypes', help='Comma-separated list of object types to annotate (filter)')
    ann_view.add_argument('--thickness', type=int, default=1, help='Line thickness for annotations (default: 1)')
    ann_view.add_argument('--colors', help='JSON dict of object type to RGB color, e.g. \'{"Car": [255, 0, 0]}\'')
    ann_view.set_defaults(func=cmd_annotations_view)

    # -------------------------------------------------------------------------
    # ANNOTATION MAPS
    # -------------------------------------------------------------------------
    annotation_maps = subparsers.add_parser('annotation-maps', help='Manage annotation maps')
    annotation_maps_sub = annotation_maps.add_subparsers(dest='action', help='Action')

    # annotation-maps get
    ann_maps_get = annotation_maps_sub.add_parser('get', help='Get annotation maps')
    ann_maps_get.add_argument('--orgid', required=True, help='Organization ID')
    ann_maps_get.set_defaults(func=cmd_annotation_maps_get)

    # annotation-maps upload
    ann_maps_upload = annotation_maps_sub.add_parser('upload', help='Upload an annotation map')
    ann_maps_upload.add_argument('--orgid', required=True, help='Organization ID')
    ann_maps_upload.add_argument('--mapfile', required=True, help='Path to map file')
    ann_maps_upload.add_argument('--name', required=True, help='Map name')
    ann_maps_upload.add_argument('--description', help='Description')
    ann_maps_upload.add_argument('--tags', help='Comma-separated tags')
    ann_maps_upload.set_defaults(func=cmd_annotation_maps_upload)

    # annotation-maps download
    ann_maps_download = annotation_maps_sub.add_parser('download', help='Download an annotation map')
    ann_maps_download.add_argument('--mapid', required=True, help='Map ID')
    ann_maps_download.add_argument('--outputdir', help='Output directory')
    ann_maps_download.set_defaults(func=cmd_annotation_maps_download)

    # annotation-maps delete
    ann_maps_delete = annotation_maps_sub.add_parser('delete', help='Delete an annotation map')
    ann_maps_delete.add_argument('--mapid', required=True, help='Map ID')
    ann_maps_delete.set_defaults(func=cmd_annotation_maps_delete)

    # annotation-maps edit
    ann_maps_edit = annotation_maps_sub.add_parser('edit', help='Edit an annotation map')
    ann_maps_edit.add_argument('--mapid', required=True, help='Map ID')
    ann_maps_edit.add_argument('--name', help='New name')
    ann_maps_edit.add_argument('--description', help='New description')
    ann_maps_edit.add_argument('--tags', help='Comma-separated tags')
    ann_maps_edit.set_defaults(func=cmd_annotation_maps_edit)

    # -------------------------------------------------------------------------
    # GAN MODELS
    # -------------------------------------------------------------------------
    gan_models = subparsers.add_parser('gan-models', help='Manage GAN models')
    gan_models_sub = gan_models.add_subparsers(dest='action', help='Action')

    # gan-models get
    gan_models_get = gan_models_sub.add_parser('get', help='Get GAN models')
    gan_models_get.add_argument('--orgid', help='Organization ID')
    gan_models_get.add_argument('--workspaceid', help='Workspace ID')
    gan_models_get.add_argument('--modelid', help='Model ID')
    gan_models_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    gan_models_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    gan_models_get.add_argument('--fields', help='Comma-separated fields')
    gan_models_get.set_defaults(func=cmd_gan_models_get)

    # gan-models upload
    gan_models_upload = gan_models_sub.add_parser('upload', help='Upload a GAN model')
    gan_models_upload.add_argument('--orgid', required=True, help='Organization ID')
    gan_models_upload.add_argument('--modelfile', required=True, help='Path to model file')
    gan_models_upload.add_argument('--name', required=True, help='Model name')
    gan_models_upload.add_argument('--description', help='Description')
    gan_models_upload.add_argument('--flags', help='Model flags')
    gan_models_upload.add_argument('--tags', help='Comma-separated tags')
    gan_models_upload.set_defaults(func=cmd_gan_models_upload)

    # gan-models download
    gan_models_download = gan_models_sub.add_parser('download', help='Download a GAN model')
    gan_models_download.add_argument('--modelid', required=True, help='Model ID')
    gan_models_download.add_argument('--outputdir', help='Output directory')
    gan_models_download.set_defaults(func=cmd_gan_models_download)

    # -------------------------------------------------------------------------
    # GAN DATASETS
    # -------------------------------------------------------------------------
    gan_datasets = subparsers.add_parser('gan-datasets', help='Manage GAN datasets')
    gan_datasets_sub = gan_datasets.add_subparsers(dest='action', help='Action')

    # gan-datasets get
    gan_ds_get = gan_datasets_sub.add_parser('get', help='Get GAN datasets')
    gan_ds_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    gan_ds_get.add_argument('--datasetid', help='Dataset ID')
    gan_ds_get.add_argument('--gandatasetid', help='GAN dataset ID')
    gan_ds_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    gan_ds_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    gan_ds_get.add_argument('--fields', help='Comma-separated fields')
    gan_ds_get.set_defaults(func=cmd_gan_datasets_get)

    # gan-datasets create
    gan_ds_create = gan_datasets_sub.add_parser('create', help='Create a GAN dataset')
    gan_ds_create.add_argument('--workspaceid', required=True, help='Workspace ID')
    gan_ds_create.add_argument('--datasetid', required=True, help='Input dataset ID')
    gan_ds_create.add_argument('--modelid', required=True, help='GAN model ID')
    gan_ds_create.add_argument('--name', required=True, help='Dataset name')
    gan_ds_create.add_argument('--description', help='Description')
    gan_ds_create.add_argument('--tags', help='Comma-separated tags')
    gan_ds_create.set_defaults(func=cmd_gan_datasets_create)

    # gan-datasets delete
    gan_ds_delete = gan_datasets_sub.add_parser('delete', help='Delete a GAN dataset')
    gan_ds_delete.add_argument('--workspaceid', required=True, help='Workspace ID')
    gan_ds_delete.add_argument('--datasetid', required=True, help='Dataset ID')
    gan_ds_delete.set_defaults(func=cmd_gan_datasets_delete)

    # -------------------------------------------------------------------------
    # UMAP
    # -------------------------------------------------------------------------
    umap = subparsers.add_parser('umap', help='Manage UMAP visualizations')
    umap_sub = umap.add_subparsers(dest='action', help='Action')

    # umap get
    umap_get = umap_sub.add_parser('get', help='Get UMAPs')
    umap_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    umap_get.add_argument('--umapid', help='UMAP ID')
    umap_get.add_argument('--datasetid', help='Dataset ID')
    umap_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    umap_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    umap_get.add_argument('--fields', help='Comma-separated fields')
    umap_get.set_defaults(func=cmd_umap_get)

    # umap create
    umap_create = umap_sub.add_parser('create', help='Create a UMAP')
    umap_create.add_argument('--workspaceid', required=True, help='Workspace ID')
    umap_create.add_argument('--name', required=True, help='UMAP name')
    umap_create.add_argument('--datasetids', required=True, help='Comma-separated dataset IDs')
    umap_create.add_argument('--samples', required=True, help='Comma-separated sample counts')
    umap_create.add_argument('--description', help='Description')
    umap_create.add_argument('--seed', type=int, help='Seed')
    umap_create.add_argument('--tags', help='Comma-separated tags')
    umap_create.set_defaults(func=cmd_umap_create)

    # umap delete
    umap_delete = umap_sub.add_parser('delete', help='Delete a UMAP')
    umap_delete.add_argument('--workspaceid', required=True, help='Workspace ID')
    umap_delete.add_argument('--umapid', required=True, help='UMAP ID')
    umap_delete.set_defaults(func=cmd_umap_delete)

    # -------------------------------------------------------------------------
    # SERVERS
    # -------------------------------------------------------------------------
    servers = subparsers.add_parser('servers', help='Manage development servers')
    servers_sub = servers.add_subparsers(dest='action', help='Action')

    # servers get
    srv_get = servers_sub.add_parser('get', help='Get servers')
    srv_get.add_argument('--orgid', help='Organization ID')
    srv_get.add_argument('--workspaceid', help='Workspace ID')
    srv_get.add_argument('--serverid', help='Server ID')
    srv_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    srv_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    srv_get.add_argument('--fields', help='Comma-separated fields')
    srv_get.set_defaults(func=cmd_servers_get)

    # servers create
    srv_create = servers_sub.add_parser('create', help='Create a server')
    srv_create.add_argument('--orgid', help='Organization ID')
    srv_create.add_argument('--workspaceid', help='Workspace ID')
    srv_create.add_argument('--instance', help='Instance type')
    srv_create.add_argument('--name', help='Server name')
    srv_create.set_defaults(func=cmd_servers_create)

    # servers delete
    srv_delete = servers_sub.add_parser('delete', help='Delete a server')
    srv_delete.add_argument('--serverid', required=True, help='Server ID')
    srv_delete.set_defaults(func=cmd_servers_delete)

    # servers start
    srv_start = servers_sub.add_parser('start', help='Start a server')
    srv_start.add_argument('--serverid', required=True, help='Server ID')
    srv_start.set_defaults(func=cmd_servers_start)

    # servers stop
    srv_stop = servers_sub.add_parser('stop', help='Stop a server')
    srv_stop.add_argument('--serverid', required=True, help='Server ID')
    srv_stop.set_defaults(func=cmd_servers_stop)

    # -------------------------------------------------------------------------
    # ML MODELS
    # -------------------------------------------------------------------------
    ml_models = subparsers.add_parser('ml-models', help='Manage ML models')
    ml_models_sub = ml_models.add_subparsers(dest='action', help='Action')

    # ml-models architectures
    ml_arch = ml_models_sub.add_parser('architectures', help='Get ML architectures')
    ml_arch.add_argument('--fields', help='Comma-separated fields')
    ml_arch.set_defaults(func=cmd_ml_architectures)

    # ml-models get
    ml_models_get = ml_models_sub.add_parser('get', help='Get ML models')
    ml_models_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    ml_models_get.add_argument('--datasetid', help='Dataset ID')
    ml_models_get.add_argument('--modelid', help='Model ID')
    ml_models_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    ml_models_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    ml_models_get.add_argument('--fields', help='Comma-separated fields')
    ml_models_get.set_defaults(func=cmd_ml_models_get)

    # ml-models create
    ml_models_create = ml_models_sub.add_parser('create', help='Create ML model training job')
    ml_models_create.add_argument('--workspaceid', required=True, help='Workspace ID')
    ml_models_create.add_argument('--datasetid', required=True, help='Dataset ID')
    ml_models_create.add_argument('--architectureid', required=True, help='Architecture ID')
    ml_models_create.add_argument('--name', required=True, help='Model name')
    ml_models_create.add_argument('--parameters', required=True, help='JSON parameters')
    ml_models_create.add_argument('--description', help='Description')
    ml_models_create.add_argument('--tags', help='Comma-separated tags')
    ml_models_create.set_defaults(func=cmd_ml_models_create)

    # ml-models download
    ml_models_download = ml_models_sub.add_parser('download', help='Download ML model')
    ml_models_download.add_argument('--workspaceid', required=True, help='Workspace ID')
    ml_models_download.add_argument('--modelid', required=True, help='Model ID')
    ml_models_download.add_argument('--checkpoint', help='Checkpoint to download')
    ml_models_download.add_argument('--outputdir', help='Output directory')
    ml_models_download.set_defaults(func=cmd_ml_models_download)

    # -------------------------------------------------------------------------
    # ML INFERENCES
    # -------------------------------------------------------------------------
    ml_inferences = subparsers.add_parser('ml-inferences', help='Manage ML inferences')
    ml_inferences_sub = ml_inferences.add_subparsers(dest='action', help='Action')

    # ml-inferences get
    ml_inf_get = ml_inferences_sub.add_parser('get', help='Get ML inferences')
    ml_inf_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    ml_inf_get.add_argument('--inferenceid', help='Inference ID')
    ml_inf_get.add_argument('--datasetid', help='Dataset ID')
    ml_inf_get.add_argument('--modelid', help='Model ID')
    ml_inf_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    ml_inf_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    ml_inf_get.add_argument('--fields', help='Comma-separated fields')
    ml_inf_get.set_defaults(func=cmd_ml_inferences_get)

    # ml-inferences create
    ml_inf_create = ml_inferences_sub.add_parser('create', help='Create ML inference job')
    ml_inf_create.add_argument('--workspaceid', required=True, help='Workspace ID')
    ml_inf_create.add_argument('--datasetid', required=True, help='Dataset ID')
    ml_inf_create.add_argument('--modelid', required=True, help='Model ID')
    ml_inf_create.add_argument('--mapid', help='Map ID')
    ml_inf_create.add_argument('--tags', help='Comma-separated tags')
    ml_inf_create.set_defaults(func=cmd_ml_inferences_create)

    # -------------------------------------------------------------------------
    # INPAINT
    # -------------------------------------------------------------------------
    inpaint = subparsers.add_parser('inpaint', help='Manage inpaint jobs')
    inpaint_sub = inpaint.add_subparsers(dest='action', help='Action')

    # inpaint get
    inp_get = inpaint_sub.add_parser('get', help='Get inpaint jobs')
    inp_get.add_argument('--volumeid', required=True, help='Volume ID')
    inp_get.add_argument('--inpaintid', help='Inpaint ID')
    inp_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
    inp_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
    inp_get.add_argument('--fields', help='Comma-separated fields')
    inp_get.set_defaults(func=cmd_inpaint_get)

    # inpaint log
    inp_log = inpaint_sub.add_parser('log', help='Get inpaint job log')
    inp_log.add_argument('--volumeid', required=True, help='Volume ID')
    inp_log.add_argument('--inpaintid', required=True, help='Inpaint ID')
    inp_log.add_argument('--fields', help='Comma-separated fields')
    inp_log.set_defaults(func=cmd_inpaint_log)

    # inpaint create
    inp_create = inpaint_sub.add_parser('create', help='Create an inpaint job')
    inp_create.add_argument('--volumeid', required=True, help='Volume ID')
    inp_create.add_argument('--location', required=True, help='Input location')
    inp_create.add_argument('--files', help='Comma-separated files to inpaint')
    inp_create.add_argument('--destination', help='Output destination')
    inp_create.add_argument('--dilation', type=int, default=5, help='Dilation (default: 5)')
    inp_create.add_argument('--inputtype', default='MASK', choices=['MASK', 'GEOJSON', 'COCO', 'KITTI', 'PASCAL', 'YOLO'], help='Input type')
    inp_create.add_argument('--outputtype', default='PNG', choices=['SATRGB_BACKGROUND', 'PNG', 'JPG'], help='Output type')
    inp_create.set_defaults(func=cmd_inpaint_create)

    # inpaint delete
    inp_delete = inpaint_sub.add_parser('delete', help='Delete an inpaint job')
    inp_delete.add_argument('--volumeid', required=True, help='Volume ID')
    inp_delete.add_argument('--inpaintid', required=True, help='Inpaint ID')
    inp_delete.set_defaults(func=cmd_inpaint_delete)

    # -------------------------------------------------------------------------
    # PREVIEW
    # -------------------------------------------------------------------------
    preview = subparsers.add_parser('preview', help='Manage preview jobs')
    preview_sub = preview.add_subparsers(dest='action', help='Action')

    # preview get
    prv_get = preview_sub.add_parser('get', help='Get a preview job')
    prv_get.add_argument('--workspaceid', required=True, help='Workspace ID')
    prv_get.add_argument('--previewid', required=True, help='Preview ID')
    prv_get.add_argument('--fields', help='Comma-separated fields')
    prv_get.add_argument('--download', action='store_true', help='Download the preview thumbnail image')
    prv_get.add_argument('--outputdir', help='Output directory for downloaded preview (default: current directory)')
    prv_get.set_defaults(func=cmd_preview_get)

    # preview create
    prv_create = preview_sub.add_parser('create', help='Create a preview job')
    prv_create.add_argument('--workspaceid', required=True, help='Workspace ID')
    prv_create.add_argument('--graphid', required=True, help='Graph ID')
    prv_create.set_defaults(func=cmd_preview_create)

    # preview log
    prv_log = preview_sub.add_parser('log', help='Get preview job log')
    prv_log.add_argument('--workspaceid', required=True, help='Workspace ID')
    prv_log.add_argument('--previewid', required=True, help='Preview ID')
    prv_log.add_argument('--fields', help='Comma-separated fields')
    prv_log.set_defaults(func=cmd_preview_log)

    # -------------------------------------------------------------------------
    # AGENTS
    # -------------------------------------------------------------------------
    agents = subparsers.add_parser('agents', help='Agent helper functions')
    agents_sub = agents.add_subparsers(dest='action', help='Action')

    # agents types
    ag_types = agents_sub.add_parser('types', help='Get available data types')
    ag_types.set_defaults(func=cmd_agents_types)

    # agents fields
    ag_fields = agents_sub.add_parser('fields', help='Get fields for a data type')
    ag_fields.add_argument('--type', required=True, help='Data type')
    ag_fields.set_defaults(func=cmd_agents_fields)

    # -------------------------------------------------------------------------
    # RULES
    # -------------------------------------------------------------------------
    rules = subparsers.add_parser('rules', help='Manage platform rules')
    rules_sub = rules.add_subparsers(dest='action', help='Action')

    # rules get-organization
    rules_get_org = rules_sub.add_parser('get-organization', help='Get organization rules')
    rules_get_org.add_argument('--orgid', help='Organization ID')
    rules_get_org.set_defaults(func=cmd_rules_organization)

    # rules edit-organization
    rules_edit_org = rules_sub.add_parser('edit-organization', help='Edit organization rules')
    rules_edit_org.add_argument('--orgid', help='Organization ID')
    rules_edit_org.add_argument('--rules', required=True, help='Rules string')
    rules_edit_org.set_defaults(func=cmd_rules_edit_organization)

    # rules get-workspace
    rules_get_ws = rules_sub.add_parser('get-workspace', help='Get workspace rules')
    rules_get_ws.add_argument('--workspaceid', help='Workspace ID')
    rules_get_ws.set_defaults(func=cmd_rules_workspace)

    # rules edit-workspace
    rules_edit_ws = rules_sub.add_parser('edit-workspace', help='Edit workspace rules')
    rules_edit_ws.add_argument('--workspaceid', help='Workspace ID')
    rules_edit_ws.add_argument('--rules', required=True, help='Rules string')
    rules_edit_ws.set_defaults(func=cmd_rules_edit_workspace)

    # rules get-service
    rules_get_svc = rules_sub.add_parser('get-service', help='Get service rules')
    rules_get_svc.add_argument('--serviceid', required=True, help='Service ID')
    rules_get_svc.set_defaults(func=cmd_rules_service)

    # rules edit-service
    rules_edit_svc = rules_sub.add_parser('edit-service', help='Edit service rules')
    rules_edit_svc.add_argument('--serviceid', required=True, help='Service ID')
    rules_edit_svc.add_argument('--rules', required=True, help='Rules string')
    rules_edit_svc.set_defaults(func=cmd_rules_edit_service)

    # rules get-user
    rules_get_user = rules_sub.add_parser('get-user', help='Get user rules')
    rules_get_user.set_defaults(func=cmd_rules_user)

    # rules edit-user
    rules_edit_user = rules_sub.add_parser('edit-user', help='Edit user rules')
    rules_edit_user.add_argument('--rules', required=True, help='Rules string')
    rules_edit_user.set_defaults(func=cmd_rules_edit_user)

    return parser


def main():
    """Main entry point."""
    parser = create_parser()
    args = parser.parse_args()

    # Check if resource and action are provided
    if not args.resource:
        parser.print_help()
        sys.exit(1)

    if not args.action:
        # Print help for the resource
        parser.parse_args([args.resource, '--help'])
        sys.exit(1)

    # Execute the command
    if hasattr(args, 'func'):
        try:
            args.func(args)
        except Exception as e:
            output_error(str(e), "EXECUTION_ERROR")
            sys.exit(1)
    else:
        parser.print_help()
        sys.exit(1)


if __name__ == '__main__':
    main()
