Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions CLI-COMMANDS.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,50 @@ roboflow download my-workspace/my-project/3 -f coco # alias
roboflow infer photo.jpg -m my-project/3
```

### Train, monitor, cancel, stop

```bash
# Start training (any architecture). For NAS sweeps, use a NAS parent modelType:
roboflow train start -p my-project -v 3 --type rfdetr-base
roboflow train start -p my-project -v 3 --type rfdetr-nas-parent # NAS sweep
roboflow train start -p my-project -v 3 --type rfdetr-nas-base-parent # NAS Base sweep
roboflow train start -p my-project -v 3 --type rfdetr-nas-seg-parent # NAS instance-segmentation

# Cancel an in-flight training (any architecture; NAS-aware):
roboflow train cancel my-project/3
# Pass --continue-if-no-refund to cancel even past the refund window:
roboflow train cancel my-project/3 --continue-if-no-refund

# Graceful early-stop:
roboflow train stop my-project/3

# Run-level training results bundle (NAS leaderboard for NAS runs,
# minimal bundle for non-NAS):
roboflow train results my-project/3
```

NAS sweeps require the version's validation split to have at least 15 images;
the server returns `code: "insufficient_validation_images_for_nas"` otherwise.

### NAS models — list, star, deploy

```bash
# Get a NAS run's modelGroup from training results:
roboflow --json train results my-project/3 | jq -r .modelGroup
# → rfdetrNasGroup-3

# List every model from one NAS run, with hardware/latency/mAP columns:
roboflow model list -p my-project --group rfdetrNasGroup-3

# Star a NAS-trained model (triggers TRT compile for its recommended hardware):
# --json train results … gives you the modelId per row.
roboflow model star <modelId>
roboflow model star <modelId> --unstar
```

`model star` is NAS-only by server-side design; non-NAS modelTypes return
`code: "MODEL_NOT_NAS"`.

### Search and export

```bash
Expand Down
93 changes: 93 additions & 0 deletions roboflow/adapters/rfapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,99 @@ def start_version_training(
return True


def cancel_version_training(
api_key: str,
workspace_url: str,
project_url: str,
version: str,
*,
continue_if_no_refund: bool = False,
):
"""Cancel an in-flight training run.

Backend handler is canonical for both vanilla and NAS trainings — it
accepts ``mining`` status, so this works for NAS sweeps too.
"""
url = f"{API_URL}/{workspace_url}/{project_url}/{version}/train/cancel?api_key={api_key}"
body: Dict[str, Union[str, int, bool]] = {}
if continue_if_no_refund:
body["continueIfNoRefund"] = True
response = requests.post(url, json=body)
if not response.ok:
raise RoboflowError(response.text)
return response.json() if response.content else {"success": True}


def stop_version_training(api_key: str, workspace_url: str, project_url: str, version: str):
"""Request an early stop on an in-flight training run.

The backend flips ``train.requestedStop``; the run finishes the current
phase gracefully (mining or training).
"""
url = f"{API_URL}/{workspace_url}/{project_url}/{version}/train/stop?api_key={api_key}"
response = requests.post(url, json={})
if not response.ok:
raise RoboflowError(response.text)
return response.json() if response.content else {"success": True}


def get_training_results(api_key: str, workspace_url: str, project_url: str, version: str):
"""Run-level training results bundle.

For NAS runs returns ``{ trainingId, status, modelGroup, modelCount,
recommendedByHardware, mining?, models: [...] }``. For non-NAS runs
returns a minimal bundle with the produced model(s).
"""
url = f"{API_URL}/{workspace_url}/{project_url}/{version}/training/results?api_key={api_key}"
response = requests.get(url)
if not response.ok:
raise RoboflowError(response.text)
return response.json()


def list_project_models(
api_key: str,
workspace_url: str,
project_url: str,
*,
group: Optional[str] = None,
):
"""List models for a project; pass ``group`` to scope to one NAS run."""
url = f"{API_URL}/{workspace_url}/{project_url}/models?api_key={api_key}"
if group:
url += f"&group={urllib.parse.quote(group, safe='')}"
response = requests.get(url)
if not response.ok:
raise RoboflowError(response.text)
return response.json()


def get_model_by_url(api_key: str, workspace_url: str, model_url: str):
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Claude-only — [Medium] get_model_by_url is dead code

  • File: roboflow/adapters/rfapi.py:154-161.
  • No CLI command calls it. Either remove or comment as scaffolding for a follow-on PR.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can delete this

"""Fetch a single model by its URL slug."""
encoded = urllib.parse.quote(model_url, safe="/")
url = f"{API_URL}/models/{workspace_url}/{encoded}?api_key={api_key}"
response = requests.get(url)
if not response.ok:
raise RoboflowError(response.text)
return response.json()


def favorite_nas_model(api_key: str, workspace_url: str, model_id: str, *, starred: bool = True):
"""Star or unstar a NAS-trained model.

``model_id`` is the opaque public model id (e.g. ``my-project-3-nas-gpu-b``),
the same value the public API returns as ``models[].modelId`` on
``GET /:workspace/:project/:version/training/results``. NAS-only on the
server side.
"""
encoded = urllib.parse.quote(model_id, safe="")
url = f"{API_URL}/{workspace_url}/models/{encoded}/favorite?api_key={api_key}"
response = requests.post(url, json={"starred": bool(starred)})
if not response.ok:
raise RoboflowError(response.text)
return response.json()


def get_version(api_key: str, workspace_url: str, project_url: str, version: str, nocache: bool = False):
"""
Fetch detailed information about a specific dataset version.
Expand Down
152 changes: 150 additions & 2 deletions roboflow/cli/handlers/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,23 @@
def list_models(
ctx: typer.Context,
project: Annotated[str, typer.Option("-p", "--project", help="Project ID or shorthand (e.g. my-ws/my-project)")],
group: Annotated[
Optional[str],
typer.Option(
"-g",
"--group",
help=(
"NAS modelGroup to scope the list to a single NAS run. "
"Get the value from 'roboflow train results <project>/<version>'."
),
),
] = None,
) -> None:
"""List trained models for a project."""
args = ctx_to_args(ctx, project=project)
"""List trained models for a project.

Pass --group <modelGroup> to filter to a single NAS run.
"""
args = ctx_to_args(ctx, project=project, group=group)
_list_models(args)


Expand All @@ -31,6 +45,30 @@ def get_model(
_get_model(args)


@model_app.command("star")
def star_model(
ctx: typer.Context,
model_id: Annotated[
str,
typer.Argument(
help=(
"Model id (e.g. workspace/model-id, or just the bare id if -w is set). "
"Get it from 'roboflow train results <project>/<version>' (models[].modelId)."
),
),
],
unstar: Annotated[bool, typer.Option("--unstar", help="Unstar instead of starring")] = False,
) -> None:
"""Star or unstar a NAS-trained model.

NAS-only by design — the server rejects non-NAS modelTypes with a
Comment thread
probicheaux marked this conversation as resolved.
MODEL_NOT_NAS error. Starring triggers TRT compilation for the model's
recommended hardware so the model becomes deployable as an edge target.
"""
args = ctx_to_args(ctx, model_id=model_id, starred=not unstar)
_star_model(args)


@model_app.command("infer")
def model_infer(
ctx: typer.Context,
Expand Down Expand Up @@ -86,16 +124,64 @@ def upload_model(

def _list_models(args): # noqa: ANN001
import roboflow
from roboflow.adapters import rfapi
from roboflow.cli._output import output, output_error, suppress_sdk_output
from roboflow.cli._resolver import resolve_resource
from roboflow.cli._table import format_table
from roboflow.config import load_roboflow_api_key

try:
workspace_url, project_slug, _version = resolve_resource(args.project, workspace_override=args.workspace)
except ValueError as exc:
output_error(args, str(exc))
return

group = getattr(args, "group", None)

if group:
# NAS path — hit the public /models endpoint with ?group= filter.
# Surfaces full per-row NAS metadata (nasFamily, group,
# train.results.{hardware,latency,map5095,paretoOptimalFor},
# favorites, recommended).
api_key = args.api_key or load_roboflow_api_key(workspace_url)
if not api_key:
output_error(
args,
"No API key found.",
hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.",
exit_code=2,
)
return
try:
rows = rfapi.list_project_models(api_key, workspace_url, project_slug, group=group)
except rfapi.RoboflowError as exc:
output_error(args, str(exc), exit_code=3)
return
if not isinstance(rows, list):
rows = []
# Project a leaderboard view for the text table; full row stays in JSON.
table_rows = []
for r in rows:
metrics = r.get("metrics") or {}
table_rows.append(
{
"url": r.get("url", ""),
"type": r.get("modelType", ""),
"hardware": metrics.get("hardware", ""),
"latency": metrics.get("latency", ""),
"map50": metrics.get("map50", ""),
"map5095": metrics.get("map5095", ""),
"recommended": "★" if r.get("recommended") else "",
}
)
table = format_table(
table_rows,
columns=["url", "type", "hardware", "latency", "map50", "map5095", "recommended"],
headers=["URL", "TYPE", "HARDWARE", "LATENCY", "MAP50", "MAP5095", "REC"],
)
output(args, rows, text=table)
return

api_key = args.api_key or None

try:
Expand Down Expand Up @@ -130,6 +216,68 @@ def _list_models(args): # noqa: ANN001
output(args, models, text=table)


def _star_model(args): # noqa: ANN001
from roboflow.adapters import rfapi
from roboflow.cli._output import output, output_error
from roboflow.config import load_roboflow_api_key

# Accept either "workspace/model-id" or just "model-id" (when -w is
# set). Mirrors the parsing pattern used by `roboflow model get`.
raw = args.model_id.strip("/")
if "/" in raw:
ws_from_arg, _sep, public_model_id = raw.partition("/")
else:
ws_from_arg, public_model_id = None, raw

workspace_url = args.workspace or ws_from_arg
if not workspace_url:
from roboflow.cli._resolver import resolve_default_workspace

workspace_url = resolve_default_workspace(args.api_key)
if not workspace_url:
output_error(
args,
"Could not determine workspace.",
hint=(
"Pass -w/--workspace, prefix the model id (workspace/id), or run 'roboflow auth set-workspace <ws>'."
),
exit_code=2,
)
return

api_key = args.api_key or load_roboflow_api_key(workspace_url)
if not api_key:
output_error(
args,
"No API key found.",
hint="Set ROBOFLOW_API_KEY or run 'roboflow auth login'.",
exit_code=2,
)
return

try:
result = rfapi.favorite_nas_model(api_key, workspace_url, public_model_id, starred=args.starred)
except rfapi.RoboflowError as exc:
msg = str(exc)
hint = None
if "MODEL_NOT_NAS" in msg or "non-NAS" in msg:
hint = "Star is NAS-only. Use 'roboflow train results' to find NAS model ids (models[].modelId)."
elif "MODEL_NOT_IN_WORKSPACE" in msg:
hint = (
"Verify the model id and workspace. The id is the same value "
"'roboflow train results' returns as models[].modelId."
)
output_error(args, msg, hint=hint, exit_code=3)
return

verb = "starred" if args.starred else "unstarred"
output(
args,
result,
text=f"Model {workspace_url}/{public_model_id} {verb}.",
)


def _get_model(args): # noqa: ANN001
import json

Expand Down
Loading
Loading