Skip to content

Commit c925324

Browse files
committed
Fix up docs and add manpage entries
Signed-off-by: Joshua Stone <[email protected]>
1 parent 7c81174 commit c925324

5 files changed

+20
-4
lines changed

docs/ramalama-bench.1.md

+3
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ URL support means if a model is on a web site or even on your local system, you
2828
#### **--help**, **-h**
2929
show this help message and exit
3030

31+
#### **--network-mode**=*none*
32+
set the network mode for the container
33+
3134
## DESCRIPTION
3235
Benchmark specified AI Model.
3336

docs/ramalama-convert.1.md

+3
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ type of OCI Model Image to convert.
2525
| car | Includes base image with the model stored in a /models subdir |
2626
| raw | Only the model and a link file model.file to it stored at / |
2727

28+
#### **--network-mode**=*none*
29+
sets the configuration for network namespaces when handling RUN instructions
30+
2831
## EXAMPLE
2932

3033
Generate an oci model out of an Ollama model.

docs/ramalama-run.1.md

+3
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,9 @@ llama.cpp explains this as:
5353
#### **--tls-verify**=*true*
5454
require HTTPS and verify certificates when contacting OCI registries
5555

56+
#### **--network-mode**=*none*
57+
set the network mode for the container
58+
5659
## DESCRIPTION
5760
Run specified AI Model as a chat bot. RamaLama pulls specified AI Model from
5861
registry if it does not exist in local storage. By default a prompt for a chat

docs/ramalama-serve.1.md

+3
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,9 @@ IP address for llama.cpp to listen on.
6464
#### **--name**, **-n**
6565
Name of the container to run the Model in.
6666

67+
#### **--network-mode**=*bridge*
68+
set the network mode for the container
69+
6770
#### **--port**, **-p**
6871
port for AI Model server to listen on
6972

ramalama/cli.py

+8-4
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ def bench_parser(subparsers):
383383
"--network-mode",
384384
type=str,
385385
default="none",
386-
help="Set the network mode for the container.",
386+
help="set the network mode for the container",
387387
)
388388
parser.add_argument("MODEL") # positional argument
389389
parser.set_defaults(func=bench_cli)
@@ -611,7 +611,7 @@ def convert_parser(subparsers):
611611
"--network-mode",
612612
type=str,
613613
default="none",
614-
help="Sets the configuration for network namespaces when handling RUN instructions.",
614+
help="sets the configuration for network namespaces when handling RUN instructions",
615615
)
616616
parser.add_argument("SOURCE") # positional argument
617617
parser.add_argument("TARGET") # positional argument
@@ -737,7 +737,7 @@ def run_parser(subparsers):
737737
"--network-mode",
738738
type=str,
739739
default="none",
740-
help="Set the network mode for the container.",
740+
help="set the network mode for the container",
741741
)
742742
parser.add_argument("MODEL") # positional argument
743743
parser.add_argument(
@@ -764,11 +764,15 @@ def serve_parser(subparsers):
764764
parser.add_argument(
765765
"-p", "--port", default=config.get('port', "8080"), help="port for AI Model server to listen on"
766766
)
767+
# --network-mode=bridge lets the container listen on localhost, and is an option that's compatible
768+
# with podman and docker:
769+
# https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net
770+
# https://docs.docker.com/engine/network/#drivers
767771
parser.add_argument(
768772
"--network-mode",
769773
type=str,
770774
default="bridge",
771-
help="Set the network mode for the container.",
775+
help="set the network mode for the container",
772776
)
773777
parser.add_argument("MODEL") # positional argument
774778
parser.set_defaults(func=serve_cli)

0 commit comments

Comments
 (0)