Coverage for src / sql_tool / cli / main.py: 92%
260 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-02-14 15:28 -0500
« prev ^ index » next coverage.py v7.13.4, created at 2026-02-14 15:28 -0500
1"""SQL Tool main entry point and command registration."""
3from __future__ import annotations
5import atexit
6from pathlib import Path # noqa: TC003
7from typing import TYPE_CHECKING, Annotated, Any
9import sentry_sdk
10import typer
12from sql_tool.__about__ import __version__
13from sql_tool.cli.commands._shared import (
14 apply_local_format_options,
15 get_client,
16 output_result,
17 parse_table_arg,
18 preprocess_optional_int_flags,
19 size_formatter,
20)
21from sql_tool.cli.commands.config import config_app
22from sql_tool.cli.commands.query import query_command
23from sql_tool.cli.commands.service import service_app
24from sql_tool.cli.commands.ts import ts_app
25from sql_tool.cli.helpers import format_duration_human, format_relative_time
26from sql_tool.cli.output import OutputFormat # noqa: TC001
27from sql_tool.core.exceptions import SqlToolError
28from sql_tool.core.logging import setup_logging
29from sql_tool.core.models import ColumnMeta, QueryResult
30from sql_tool.core.monitoring import setup_sentry
31from sql_tool.core.postgres import (
32 connections_summary,
33 describe_table,
34 get_time_column,
35 get_timestamp_range,
36 list_all_database_names,
37 list_connections,
38 list_databases,
39 list_schemas,
40 list_schemas_all_databases,
41 list_tables,
42 preview_table,
43)
45if TYPE_CHECKING:
46 from sql_tool.core.client import PgClient
49class SentryTestError(RuntimeError):
50 """Test exception for validating Sentry integration."""
53app = typer.Typer(
54 help="SQL Tool - PostgreSQL query and administration tool",
55 no_args_is_help=True,
56)
58app.add_typer(config_app, name="config")
59app.add_typer(service_app, name="service")
60app.add_typer(ts_app, name="ts")
61app.command("query")(query_command)
64def version_callback(value: bool) -> None:
65 if value:
66 typer.echo(f"sql-tool {__version__}")
67 raise typer.Exit()
70@app.callback()
71def main(
72 ctx: typer.Context,
73 version: Annotated[
74 bool,
75 typer.Option(
76 "--version",
77 "-V",
78 help="Show version and exit",
79 callback=version_callback,
80 is_eager=True,
81 ),
82 ] = False,
83 verbose: Annotated[
84 bool,
85 typer.Option("--verbose", help="Enable verbose logging"),
86 ] = False,
87 profile: Annotated[
88 str | None,
89 typer.Option("--profile", "-P", help="Named connection profile"),
90 ] = None,
91 host: Annotated[
92 str | None,
93 typer.Option("--host", "-H", help="PostgreSQL host"),
94 ] = None,
95 port: Annotated[
96 int | None,
97 typer.Option("--port", "-p", help="PostgreSQL port"),
98 ] = None,
99 database: Annotated[
100 str | None,
101 typer.Option("--database", "-d", help="Database name"),
102 ] = None,
103 user: Annotated[
104 str | None,
105 typer.Option("--user", "-U", help="User name"),
106 ] = None,
107 password: Annotated[
108 str | None,
109 typer.Option("--password", "-W", help="Password"),
110 ] = None,
111 dsn: Annotated[
112 str | None,
113 typer.Option("--dsn", help="Connection DSN"),
114 ] = None,
115 config_file: Annotated[
116 Path | None,
117 typer.Option("--config", help="Path to config file"),
118 ] = None,
119 schema: Annotated[
120 str | None,
121 typer.Option("--schema", "-s", help="Default schema for queries"),
122 ] = None,
123 format: Annotated[
124 OutputFormat | None,
125 typer.Option("--format", "-f", help="Output format: table|json|csv"),
126 ] = None,
127 table: Annotated[
128 bool,
129 typer.Option("--table", help="Shorthand for --format table"),
130 ] = False,
131 compact: Annotated[
132 bool,
133 typer.Option("--compact", help="Compact JSON output (no indentation)"),
134 ] = False,
135 width: Annotated[
136 int,
137 typer.Option("--width", help="Column width for table format"),
138 ] = 40,
139 no_header: Annotated[
140 bool,
141 typer.Option("--no-header", help="Suppress header row in CSV output"),
142 ] = False,
143) -> None:
144 """SQL Tool - PostgreSQL query and administration tool."""
145 setup_logging(verbose)
146 setup_sentry()
148 transaction = sentry_sdk.start_transaction(
149 op="cli", name=ctx.invoked_subcommand or "sql-tool"
150 )
151 transaction.__enter__()
153 def cleanup() -> None:
154 transaction.__exit__(None, None, None)
155 sentry_sdk.flush(timeout=2)
157 atexit.register(cleanup)
159 ctx.ensure_object(dict)
160 ctx.obj["verbose"] = verbose
161 ctx.obj["profile"] = profile
162 ctx.obj["host"] = host
163 ctx.obj["port"] = port
164 ctx.obj["database"] = database
165 ctx.obj["user"] = user
166 ctx.obj["password"] = password
167 ctx.obj["dsn"] = dsn
168 ctx.obj["config_file"] = config_file
169 ctx.obj["schema"] = schema
171 # Format options (global)
172 fmt = "table" if table else (format.value if format else None)
173 ctx.obj["format"] = fmt
174 ctx.obj["compact"] = compact
175 ctx.obj["width"] = width
176 ctx.obj["no_header"] = no_header
179def run() -> None:
180 """Entry point with global error handling."""
181 preprocess_optional_int_flags()
182 try:
183 app()
184 except SqlToolError as e:
185 sentry_sdk.capture_exception(e)
186 typer.echo(f"Error: {e.message}", err=True)
187 raise SystemExit(e.exit_code) from None
188 except SystemExit:
189 raise
190 except KeyboardInterrupt:
191 raise SystemExit(130) from None
192 except Exception as e:
193 sentry_sdk.capture_exception(e)
194 typer.echo(f"Error: {e}", err=True)
195 raise SystemExit(1) from None
198@app.command("test-sentry")
199def test_sentry() -> None:
200 """Send test events to Sentry for validation."""
201 typer.echo("Sending test error to Sentry...")
202 try:
203 raise SentryTestError("sql-tool Phase 1 Sentry test error")
204 except Exception as e:
205 sentry_sdk.capture_exception(e)
207 typer.echo("Sending test performance span to Sentry...")
208 with sentry_sdk.start_transaction(op="test", name="test_sentry") as txn:
209 with (
210 sentry_sdk.start_span(op="test.parent", description="Parent span"),
211 sentry_sdk.start_span(op="test.child", description="Child span"),
212 ):
213 pass
214 txn.set_status("ok")
216 sentry_sdk.flush(timeout=5)
217 typer.echo("Test events sent. Check Sentry console:")
218 typer.echo(" - Issues: Verify 'sql-tool Phase 1 Sentry test error' appears")
219 typer.echo(" - Performance: Verify 'test_sentry' transaction appears")
222# ---------------------------------------------------------------------------
223# Commands: databases, schema, table, connections
224# ---------------------------------------------------------------------------
227@app.command("databases")
228def databases_command(
229 ctx: typer.Context,
230 format: Annotated[
231 OutputFormat | None,
232 typer.Option("--format", "-f", help="Output format: table|json|csv"),
233 ] = None,
234 table: Annotated[
235 bool,
236 typer.Option("--table", help="Shorthand for --format table"),
237 ] = False,
238 compact: Annotated[
239 bool,
240 typer.Option("--compact", help="Compact JSON output (no indentation)"),
241 ] = False,
242 width: Annotated[
243 int | None,
244 typer.Option("--width", help="Column width for table format"),
245 ] = None,
246 no_header: Annotated[
247 bool,
248 typer.Option("--no-header", help="Suppress header row in CSV output"),
249 ] = False,
250) -> None:
251 """
252 List all databases with size and owner information.
254 Queries pg_catalog.pg_database to show all databases accessible
255 to the current user, sorted by size descending. Includes a TOTAL row.
256 """
257 apply_local_format_options(
258 ctx,
259 format=format,
260 table=table,
261 compact=compact,
262 width=width,
263 no_header=no_header,
264 )
266 with get_client(ctx) as client:
267 result = list_databases(client)
269 fmt, _ = size_formatter(ctx)
270 total_bytes = sum(row[3] or 0 for row in result.rows)
271 rows: list[tuple[Any, ...]] = [
272 (name, owner, enc, fmt(size_bytes))
273 for name, owner, enc, size_bytes in result.rows
274 ]
275 rows.append(("TOTAL", "", "", fmt(total_bytes)))
277 result = QueryResult(
278 columns=[
279 ColumnMeta(name="name", type_oid=25, type_name="text"),
280 ColumnMeta(name="owner", type_oid=25, type_name="text"),
281 ColumnMeta(name="encoding", type_oid=25, type_name="text"),
282 ColumnMeta(name="size", type_oid=25, type_name="text"),
283 ],
284 rows=rows,
285 row_count=len(rows),
286 status_message=result.status_message,
287 )
288 output_result(ctx, result)
291def _schemas_all_databases(ctx: typer.Context) -> None:
292 """Format and output schemas across all databases."""
293 with get_client(ctx) as client:
294 db_names = list_all_database_names(client)
296 def make_client(db_name: str) -> PgClient:
297 ctx.ensure_object(dict)["database"] = db_name
298 return get_client(ctx)
300 raw_data, has_chunks = list_schemas_all_databases(db_names, make_client)
302 fmt, _ = size_formatter(ctx)
304 grand_tables = sum(r[2] for r in raw_data)
305 grand_total = sum(r[3] for r in raw_data)
306 grand_before = sum(r[4] for r in raw_data)
307 grand_after = sum(r[5] for r in raw_data)
308 grand_ht_total = sum(r[6] for r in raw_data)
310 all_rows: list[tuple[Any, ...]]
311 if has_chunks:
312 all_rows = [
313 (
314 db,
315 schema,
316 tables,
317 fmt(total),
318 fmt(before) if before else "-",
319 fmt(after) if after else "-",
320 fmt(ht) if ht else "-",
321 )
322 for db, schema, tables, total, before, after, ht in raw_data
323 ]
324 all_rows.append(
325 (
326 "TOTAL",
327 "",
328 grand_tables,
329 fmt(grand_total),
330 fmt(grand_before) if grand_before else "-",
331 fmt(grand_after) if grand_after else "-",
332 fmt(grand_ht_total) if grand_ht_total else "-",
333 )
334 )
335 columns = [
336 ColumnMeta(name="database", type_oid=25, type_name="text"),
337 ColumnMeta(name="schema", type_oid=25, type_name="text"),
338 ColumnMeta(name="tables", type_oid=20, type_name="int8"),
339 ColumnMeta(name="total_size", type_oid=25, type_name="text"),
340 ColumnMeta(name="before_compr", type_oid=25, type_name="text"),
341 ColumnMeta(name="after_compr", type_oid=25, type_name="text"),
342 ColumnMeta(name="ht_size", type_oid=25, type_name="text"),
343 ]
344 else:
345 all_rows = [
346 (db, schema, tables, fmt(total))
347 for db, schema, tables, total, _, _, _ in raw_data
348 ]
349 all_rows.append(("TOTAL", "", grand_tables, fmt(grand_total)))
350 columns = [
351 ColumnMeta(name="database", type_oid=25, type_name="text"),
352 ColumnMeta(name="schema", type_oid=25, type_name="text"),
353 ColumnMeta(name="tables", type_oid=20, type_name="int8"),
354 ColumnMeta(name="total_size", type_oid=25, type_name="text"),
355 ]
357 result = QueryResult(
358 columns=columns,
359 rows=all_rows,
360 row_count=len(all_rows),
361 status_message=f"SELECT {len(all_rows)}",
362 )
363 output_result(ctx, result)
366@app.command("schema")
367def schema_command(
368 ctx: typer.Context,
369 database: Annotated[
370 str | None,
371 typer.Option("--database", "-d", help="Database name"),
372 ] = None,
373 all_databases: Annotated[
374 bool,
375 typer.Option("--all-databases", help="Show schemas across all databases"),
376 ] = False,
377 format: Annotated[
378 OutputFormat | None,
379 typer.Option("--format", "-f", help="Output format: table|json|csv"),
380 ] = None,
381 table: Annotated[
382 bool,
383 typer.Option("--table", help="Shorthand for --format table"),
384 ] = False,
385 compact: Annotated[
386 bool,
387 typer.Option("--compact", help="Compact JSON output (no indentation)"),
388 ] = False,
389 width: Annotated[
390 int | None,
391 typer.Option("--width", help="Column width for table format"),
392 ] = None,
393 no_header: Annotated[
394 bool,
395 typer.Option("--no-header", help="Suppress header row in CSV output"),
396 ] = False,
397) -> None:
398 """
399 List schemas with space usage including TimescaleDB chunk breakdown.
401 Shows each schema with table size and chunk usage from hypertables.
402 Sorted by total footprint (tables + chunks) descending. Includes TOTAL row.
403 """
404 if database is not None:
405 ctx.ensure_object(dict)["database"] = database
406 apply_local_format_options(
407 ctx,
408 format=format,
409 table=table,
410 compact=compact,
411 width=width,
412 no_header=no_header,
413 )
415 if all_databases:
416 _schemas_all_databases(ctx)
417 return
419 with get_client(ctx) as client:
420 schema_result, chunk_map = list_schemas(client)
421 db_name = client.execute_query("SELECT current_database()").rows[0][0]
423 fmt, is_table = size_formatter(ctx)
425 rows: list[tuple[Any, ...]]
426 if chunk_map:
427 decorated = []
428 grand_total = 0
429 grand_tables = 0
430 grand_before = 0
431 grand_after = 0
432 grand_ht_total = 0
433 for schema, tables, total_bytes in schema_result.rows:
434 before_b, after_b, ht_total = chunk_map.get(schema, (0, 0, 0))
435 footprint = (total_bytes or 0) + ht_total
436 grand_total += total_bytes or 0
437 grand_tables += tables or 0
438 grand_before += before_b
439 grand_after += after_b
440 grand_ht_total += ht_total
441 decorated.append(
442 (footprint, schema, tables, total_bytes, before_b, after_b, ht_total)
443 )
445 decorated.sort(key=lambda r: r[0], reverse=True)
446 rows = [
447 (
448 schema,
449 tables,
450 fmt(total_bytes),
451 fmt(before_b) if before_b else "-",
452 fmt(after_b) if after_b else "-",
453 fmt(ht_total) if ht_total else "-",
454 )
455 for _, schema, tables, total_bytes, before_b, after_b, ht_total in decorated
456 ]
457 rows.append(
458 (
459 "TOTAL",
460 grand_tables,
461 fmt(grand_total),
462 fmt(grand_before) if grand_before else "-",
463 fmt(grand_after) if grand_after else "-",
464 fmt(grand_ht_total) if grand_ht_total else "-",
465 )
466 )
468 result = QueryResult(
469 columns=[
470 ColumnMeta(name="schema", type_oid=25, type_name="text"),
471 ColumnMeta(name="tables", type_oid=20, type_name="int8"),
472 ColumnMeta(name="total_size", type_oid=25, type_name="text"),
473 ColumnMeta(name="before_compr", type_oid=25, type_name="text"),
474 ColumnMeta(name="after_compr", type_oid=25, type_name="text"),
475 ColumnMeta(name="ht_size", type_oid=25, type_name="text"),
476 ],
477 rows=rows,
478 row_count=len(rows),
479 status_message=schema_result.status_message,
480 )
481 else:
482 sorted_data = sorted(schema_result.rows, key=lambda r: r[2] or 0, reverse=True)
483 grand_total = sum(r[2] or 0 for r in sorted_data)
484 grand_tables = sum(r[1] or 0 for r in sorted_data)
485 rows = [
486 (schema, tables, fmt(total_bytes))
487 for schema, tables, total_bytes in sorted_data
488 ]
489 rows.append(("TOTAL", grand_tables, fmt(grand_total)))
490 result = QueryResult(
491 columns=[
492 ColumnMeta(name="schema", type_oid=25, type_name="text"),
493 ColumnMeta(name="tables", type_oid=20, type_name="int8"),
494 ColumnMeta(name="total_size", type_oid=25, type_name="text"),
495 ],
496 rows=rows,
497 row_count=len(rows),
498 status_message=schema_result.status_message,
499 )
501 if is_table:
502 typer.echo(f"Schemas of: {db_name}", err=True)
503 output_result(ctx, result)
506def _table_list(
507 ctx: typer.Context,
508 *,
509 schema_filter: str | None = None,
510 include_internal_tables: bool = False,
511) -> None:
512 """Format and output table list with size breakdown."""
513 with get_client(ctx) as client:
514 result, ht_map = list_tables(
515 client,
516 schema_filter=schema_filter,
517 include_internal_tables=include_internal_tables,
518 )
520 fmt, _ = size_formatter(ctx)
521 has_schema = not schema_filter
523 def _ratio(before: int | None, after: int | None) -> str:
524 if not before or not after:
525 return "-"
526 return f"{before / after:.1f}x"
528 new_rows: list[tuple[Any, ...]] = []
529 totals_table = 0
530 totals_index = 0
531 totals_total = 0
532 totals_before = 0
533 totals_after = 0
535 for row in result.rows:
536 if has_schema:
537 schema_val, name_val, table_b, index_b, total_b = row
538 base: tuple[Any, ...] = (
539 schema_val,
540 name_val,
541 fmt(table_b),
542 fmt(index_b),
543 fmt(total_b),
544 )
545 else:
546 name_val, table_b, index_b, total_b = row
547 schema_val = schema_filter or "public"
548 base = (name_val, fmt(table_b), fmt(index_b), fmt(total_b))
550 totals_table += table_b or 0
551 totals_index += index_b or 0
552 totals_total += total_b or 0
554 if ht_map:
555 ht_info = ht_map.get((schema_val, name_val))
556 if ht_info:
557 uncompr_c, compr_c, before_b, after_b = ht_info
558 totals_before += before_b or 0
559 totals_after += after_b or 0
560 new_rows.append(
561 (
562 *base,
563 f"{uncompr_c}/{compr_c}",
564 fmt(before_b) if before_b else "-",
565 fmt(after_b) if after_b else "-",
566 _ratio(before_b, after_b),
567 )
568 )
569 else:
570 new_rows.append((*base, "-", "-", "-", "-"))
571 else:
572 new_rows.append(base)
574 if has_schema:
575 columns = [
576 ColumnMeta(name="schema", type_oid=25, type_name="text"),
577 ColumnMeta(name="name", type_oid=25, type_name="text"),
578 ColumnMeta(name="table_size", type_oid=25, type_name="text"),
579 ColumnMeta(name="index_size", type_oid=25, type_name="text"),
580 ColumnMeta(name="total", type_oid=25, type_name="text"),
581 ]
582 else:
583 columns = [
584 ColumnMeta(name="name", type_oid=25, type_name="text"),
585 ColumnMeta(name="table_size", type_oid=25, type_name="text"),
586 ColumnMeta(name="index_size", type_oid=25, type_name="text"),
587 ColumnMeta(name="total", type_oid=25, type_name="text"),
588 ]
590 if ht_map:
591 columns.extend(
592 [
593 ColumnMeta(name="chunks_u/c", type_oid=25, type_name="text"),
594 ColumnMeta(name="before_size", type_oid=25, type_name="text"),
595 ColumnMeta(name="after_size", type_oid=25, type_name="text"),
596 ColumnMeta(name="ratio", type_oid=25, type_name="text"),
597 ]
598 )
600 if has_schema:
601 totals_row: tuple[Any, ...] = (
602 "TOTAL",
603 "",
604 fmt(totals_table),
605 fmt(totals_index),
606 fmt(totals_total),
607 )
608 else:
609 totals_row = ("TOTAL", fmt(totals_table), fmt(totals_index), fmt(totals_total))
610 if ht_map:
611 totals_row = (
612 *totals_row,
613 "",
614 fmt(totals_before) if totals_before else "-",
615 fmt(totals_after) if totals_after else "-",
616 _ratio(totals_before, totals_after),
617 )
618 new_rows.append(totals_row)
620 result = QueryResult(
621 columns=columns,
622 rows=new_rows,
623 row_count=len(new_rows),
624 status_message=result.status_message,
625 )
626 output_result(ctx, result)
629@app.command("table")
630def table_command(
631 ctx: typer.Context,
632 table_arg: Annotated[
633 str | None,
634 typer.Argument(
635 help="Table name (schema.table or table). Omit to list all tables."
636 ),
637 ] = None,
638 database: Annotated[
639 str | None,
640 typer.Option("--database", "-d", help="Database name"),
641 ] = None,
642 schema_filter: Annotated[
643 str | None,
644 typer.Option("--schema", "-s", help="Filter tables by schema"),
645 ] = None,
646 include_internal_tables: Annotated[
647 bool,
648 typer.Option(
649 "--include-internal-tables", help="Include TimescaleDB internal tables"
650 ),
651 ] = False,
652 show_range: Annotated[
653 bool,
654 typer.Option("--range", help="Show timestamp range (min/max) for hypertables"),
655 ] = False,
656 head: Annotated[
657 int | None,
658 typer.Option(
659 "--head", help="Show first N rows ordered by time ASC (default: 10)"
660 ),
661 ] = None,
662 tail: Annotated[
663 int | None,
664 typer.Option(
665 "--tail", help="Show last N rows ordered by time DESC (default: 10)"
666 ),
667 ] = None,
668 sample: Annotated[
669 int | None,
670 typer.Option("--sample", help="Show N random rows (default: 10)"),
671 ] = None,
672 format: Annotated[
673 OutputFormat | None,
674 typer.Option("--format", "-f", help="Output format: table|json|csv"),
675 ] = None,
676 table: Annotated[
677 bool,
678 typer.Option("--table", help="Shorthand for --format table"),
679 ] = False,
680 compact: Annotated[
681 bool,
682 typer.Option("--compact", help="Compact JSON output (no indentation)"),
683 ] = False,
684 width: Annotated[
685 int | None,
686 typer.Option("--width", help="Column width for table format"),
687 ] = None,
688 no_header: Annotated[
689 bool,
690 typer.Option("--no-header", help="Suppress header row in CSV output"),
691 ] = False,
692) -> None:
693 """
694 List tables or show table details.
696 Without a table name, lists all tables with size breakdown.
697 With a table name, shows column definitions with optional data preview.
699 List mode: Use --schema to filter, --include-internal-tables for TimescaleDB internals.
700 Detail mode: Use --range for timestamp range, --head/--tail/--sample for data preview.
701 """
702 if database is not None:
703 ctx.ensure_object(dict)["database"] = database
704 apply_local_format_options(
705 ctx,
706 format=format,
707 table=table,
708 compact=compact,
709 width=width,
710 no_header=no_header,
711 )
713 if table_arg is None:
714 _table_list(
715 ctx,
716 schema_filter=schema_filter,
717 include_internal_tables=include_internal_tables,
718 )
719 return
721 schema_name, table_name = parse_table_arg(table_arg)
723 has_preview = head is not None or tail is not None or sample is not None
725 time_column = None
726 needs_time_col = show_range or head is not None or tail is not None
727 if needs_time_col:
728 with get_client(ctx) as client:
729 time_column = get_time_column(client, schema_name, table_name)
731 if not has_preview:
732 with get_client(ctx) as client:
733 result = describe_table(client, schema_name, table_name)
734 output_result(ctx, result)
736 if show_range and time_column:
737 with get_client(ctx) as client:
738 range_result = get_timestamp_range(
739 client, schema_name, table_name, time_column
740 )
741 if range_result.rows:
742 typer.echo("\nTimestamp Range:", err=True)
743 output_result(ctx, range_result)
744 else:
745 with get_client(ctx) as client:
746 preview_result = preview_table(
747 client,
748 schema_name,
749 table_name,
750 head=head,
751 tail=tail,
752 sample=sample,
753 time_column=time_column,
754 )
755 if preview_result:
756 output_result(ctx, preview_result)
759@app.command("connections")
760def connections_command(
761 ctx: typer.Context,
762 database: Annotated[
763 str | None,
764 typer.Option("--database", "-d", help="Database name"),
765 ] = None,
766 summary: Annotated[
767 bool,
768 typer.Option(
769 "--summary", help="Show connection counts and memory configuration"
770 ),
771 ] = False,
772 include_all: Annotated[
773 bool,
774 typer.Option("--all", help="Include idle connections"),
775 ] = False,
776 min_duration: Annotated[
777 float | None,
778 typer.Option(
779 "--min-duration", help="Filter queries running longer than N seconds"
780 ),
781 ] = None,
782 filter_user: Annotated[
783 str | None,
784 typer.Option("--filter-user", help="Filter by username"),
785 ] = None,
786 filter_db: Annotated[
787 str | None,
788 typer.Option("--filter-db", help="Filter by database name"),
789 ] = None,
790 filter_state: Annotated[
791 str | None,
792 typer.Option(
793 "--filter-state", help="Filter by connection state (active, idle, etc.)"
794 ),
795 ] = None,
796 format: Annotated[
797 OutputFormat | None,
798 typer.Option("--format", "-f", help="Output format: table|json|csv"),
799 ] = None,
800 table: Annotated[
801 bool,
802 typer.Option("--table", help="Shorthand for --format table"),
803 ] = False,
804 compact: Annotated[
805 bool,
806 typer.Option("--compact", help="Compact JSON output (no indentation)"),
807 ] = False,
808 width: Annotated[
809 int | None,
810 typer.Option("--width", help="Column width for table format"),
811 ] = None,
812 no_header: Annotated[
813 bool,
814 typer.Option("--no-header", help="Suppress header row in CSV output"),
815 ] = False,
816) -> None:
817 """
818 Show database connections with details and filtering.
820 By default shows non-idle connections with full details including
821 application name, client address, wait events, and query duration.
822 Use --all to include idle connections.
823 Use --summary for connection counts and memory configuration.
824 Use --filter-user, --filter-db, --filter-state to narrow results.
825 """
826 if database is not None:
827 ctx.ensure_object(dict)["database"] = database
828 apply_local_format_options(
829 ctx,
830 format=format,
831 table=table,
832 compact=compact,
833 width=width,
834 no_header=no_header,
835 )
837 if summary:
838 with get_client(ctx) as client:
839 result = connections_summary(client)
840 output_result(ctx, result)
841 return
843 with get_client(ctx) as client:
844 raw_result = list_connections(
845 client,
846 include_all=include_all,
847 min_duration=min_duration,
848 filter_user=filter_user,
849 filter_db=filter_db,
850 filter_state=filter_state,
851 )
853 _, is_table = size_formatter(ctx)
855 rows: list[tuple[Any, ...]]
856 if is_table:
857 rows = [
858 (
859 pid,
860 user,
861 db,
862 app,
863 addr,
864 state,
865 wait_event,
866 format_relative_time(conn_secs),
867 format_relative_time(q_secs),
868 format_duration_human(q_secs),
869 query,
870 )
871 for pid, user, db, app, addr, state, wait_event, _, conn_secs, _, q_secs, query in raw_result.rows
872 ]
873 else:
874 rows = [
875 (
876 pid,
877 user,
878 db,
879 app,
880 addr,
881 state,
882 wait_event,
883 connected_since,
884 query_start,
885 query,
886 )
887 for pid, user, db, app, addr, state, wait_event, connected_since, _, query_start, _, query in raw_result.rows
888 ]
890 if is_table:
891 columns = [
892 ColumnMeta(name="pid", type_oid=23, type_name="int4"),
893 ColumnMeta(name="user", type_oid=25, type_name="text"),
894 ColumnMeta(name="database", type_oid=25, type_name="text"),
895 ColumnMeta(name="app", type_oid=25, type_name="text"),
896 ColumnMeta(name="client", type_oid=25, type_name="text"),
897 ColumnMeta(name="state", type_oid=25, type_name="text"),
898 ColumnMeta(name="wait_event", type_oid=25, type_name="text"),
899 ColumnMeta(name="connected", type_oid=25, type_name="text"),
900 ColumnMeta(name="query_start", type_oid=25, type_name="text"),
901 ColumnMeta(name="duration", type_oid=25, type_name="text"),
902 ColumnMeta(name="query", type_oid=25, type_name="text"),
903 ]
904 else:
905 columns = [
906 ColumnMeta(name="pid", type_oid=23, type_name="int4"),
907 ColumnMeta(name="user", type_oid=25, type_name="text"),
908 ColumnMeta(name="database", type_oid=25, type_name="text"),
909 ColumnMeta(name="application_name", type_oid=25, type_name="text"),
910 ColumnMeta(name="client_address", type_oid=25, type_name="text"),
911 ColumnMeta(name="state", type_oid=25, type_name="text"),
912 ColumnMeta(name="wait_event", type_oid=25, type_name="text"),
913 ColumnMeta(name="connected_since", type_oid=25, type_name="text"),
914 ColumnMeta(name="query_start", type_oid=25, type_name="text"),
915 ColumnMeta(name="query", type_oid=25, type_name="text"),
916 ]
918 result = QueryResult(
919 columns=columns,
920 rows=rows,
921 row_count=len(rows),
922 status_message=raw_result.status_message,
923 )
924 output_result(ctx, result)