llmserve 0.0.3

TUI for serving local LLM models. Pick a model, pick a backend, serve it.
Documentation
.PHONY: build release run test check fmt clippy clean install

build:
	cargo build

release:
	cargo build --release

run:
	cargo run

test:
	cargo test

test-local:
	cargo test -- --include-ignored

check:
	cargo check --all-targets --all-features

fmt:
	cargo fmt --all

clippy:
	cargo clippy --all-targets --all-features

clean:
	cargo clean

install:
	cargo install --path .