ask_llm 2.2.2

make a request to whatever llm is the best these days, without hardcoding model/provider
Documentation
use ask_llm::{Client, Model};

#[tokio::main]
async fn main() {
	v_utils::clientside!();

	let extensionless_codeblock_resp = Client::default()
		.model(Model::Fast)
		.ask("Translate ```How do you do``` to German. Return translation inside a codeblock.")
		.await
		.unwrap();
	println!("{:#?}", extensionless_codeblock_resp.extract_codeblock(None));

	let py_codeblock_resp = Client::default().model(Model::Fast).ask("How to print hello world in python").await.unwrap();
	println!("{:#?}", py_codeblock_resp.extract_codeblocks(Some(vec!["python", "py"])));
}

#[cfg(test)]
#[test]
fn test_main() {
	main();
}