[dependencies.character_converter]
version = "2.1.2"
[package]
authors = ["Preston Wang-Stosur-Bassett <p.wanstobas@gmail.com>"]
categories = ["text-processing", "localization", "internationalization", "value-formatting"]
description = "Tokenize Chinese sentences using a dictionary-driven largest first matching approach."
edition = "2021"
keywords = ["chinese", "hanzi", "segment", "tokenize"]
license = "MIT"
name = "chinese_segmenter"
readme = "README.md"
repository = "https://github.com/sotch-pr35mac/chinese_segmenter"
resolver = "2"
version = "1.0.1"