block_graph/burn/
layer.rs

1fn derror<D:Display,E:Derror>(msg:D)->E{E::custom(msg)}
2fn deserialize_batch_norm<'a,B:Backend,D:Deserializer<'a>>(deserializer:D)->Result<BatchNorm<B,1>,D::Error>{
3	let record:BatchNormRecord<B>=BatchNormRecord::deserialize(deserializer)?;
4
5	let (beta,epsilon,gamma,mean,momentum,variance)=(record.beta,record.epsilon,record.gamma,record.mean,record.momentum,record.variance);
6	let (beta,gamma)=if let (Ok(b),Ok(g))=(beta.try_into(),gamma.try_into()){(Param::from_tensor(b),Param::from_tensor(g))}else{return Err(derror("batch norm beta and gamma parameters must be rank 1 floats"))};
7	let (mean,variance)=if let (Ok(m),Ok(v))=(mean.try_into(),variance.try_into()){(RunningState::new(m),RunningState::new(v))}else{return Err(derror("batch norm mean and variance states must be rank 1 floats"))};
8
9	Ok(BatchNorm{beta,epsilon,gamma,momentum,running_mean:mean,running_var:variance})
10}
11fn deserialize_conv2d<'a,B:Backend,D:Deserializer<'a>>(deserializer:D)->Result<Conv2d<B>,D::Error>{
12	let record=Conv2dRecord::deserialize(deserializer)?;
13
14	let (dilation,groups,kernelsize,stride)=(record.dilation,record.groups,record.kernelsize,record.stride);
15	let bias=if let Some(b)=record.bias{
16		if let Ok(b)=b.try_into(){Some(Param::from_tensor(b))}else{return Err(derror("linear bias parameter must be a rank 1 float"))}
17	}else{
18		None
19	};
20	let padding=record.padding.clone();
21	let weight=Param::from_tensor(if let Ok(w)=record.weight.try_into(){w}else{return Err(derror("linear weight parameter must be a rank 2 float"))});
22
23	Ok(Conv2d{bias,dilation,groups,kernel_size:kernelsize,padding,stride,weight})
24}
25fn deserialize_cross_entropy<'a,B:Backend,D:Deserializer<'a>>(deserializer:D)->Result<CrossEntropyLoss<B>,D::Error>{
26	let record=CrossEntropyRecord::deserialize(deserializer)?;
27
28	let (logits,pad,smoothing)=(record.logits,record.pad,record.smoothing);
29	let weights=if let Some(s)=record.weights{
30		if let Ok(s)=s.try_into(){Some(s)}else{return Err(derror("cross entropy weights parameter must be a rank 1 float"))}
31	}else{
32		None
33	};
34
35	Ok(CrossEntropyLoss{logits,pad_tokens:pad,smoothing,weights})
36}
37fn deserialize_dropout<'a,D:Deserializer<'a>>(deserializer:D)->Result<Dropout,D::Error>{
38	Ok(Dropout{prob:f64::deserialize(deserializer)?})
39}
40fn deserialize_embedding<'a,B:Backend,D:Deserializer<'a>>(deserializer:D)->Result<Embedding<B>,D::Error>{
41	let weight=deserialize_param(deserializer)?;
42	Ok(Embedding{weight})
43}
44fn deserialize_ignored<'a,D:Deserializer<'a>,T:Deserialize<'a>>(deserializer:D)->Result<Ignored<T>,D::Error>{
45	let data:T=T::deserialize(deserializer)?;
46	Ok(Ignored(data))
47}
48fn deserialize_layer_norm<'a,B:Backend,D:Deserializer<'a>>(deserializer:D)->Result<LayerNorm<B>,D::Error>{
49	let mut layer=LayerNormConfig::new(1).init(&Default::default());
50	let record=LayerNormRecord::deserialize(deserializer)?;
51
52	if let Ok(b)=record.beta.try_into(){layer.beta=Param::from_tensor(b)}else{return Err(derror("beta parameter must be a rank 1 float"))}
53	if let Ok(g)=record.gamma.try_into(){layer.gamma=Param::from_tensor(g)}else{return Err(derror("gamma parameter must be a rank 1 float"))}
54
55	Ok(layer)
56}
57fn deserialize_linear<'a,B:Backend,D:Deserializer<'a>>(deserializer:D)->Result<Linear<B>,D::Error>{
58	let record=LinearRecord::deserialize(deserializer)?;
59
60	let bias=if let Some(b)=record.bias{
61		if let Ok(b)=b.try_into(){Some(Param::from_tensor(b))}else{return Err(derror("linear bias parameter must be a rank 1 float"))}
62	}else{
63		None
64	};
65	let weight=Param::from_tensor(if let Ok(w)=record.weight.try_into(){w}else{return Err(derror("linear weight parameter must be a rank 2 float"))});
66
67	Ok(Linear{bias,weight})
68}
69fn deserialize_max_pool_2d<'a,D:Deserializer<'a>>(deserializer:D)->Result<MaxPool2d,D::Error>{
70	let config=MaxPool2dConfig::deserialize(deserializer)?;
71	Ok(config.init())
72}
73fn deserialize_nothing<'a,D:Deserializer<'a>,T:Default>(_deserializer:D)->Result<T,D::Error>{Ok(T::default())}
74fn deserialize_param<'a,B:Backend,D:Deserializer<'a>,const N:usize>(deserializer:D)->Result<Param<Tensor<B,N>>,D::Error>{
75	let data:Value<B>=Value::deserialize(deserializer)?;
76	if let Ok(t)=data.try_into(){Ok(Param::from_tensor(t))}else{Err(derror(format!("expected parameter to be a rank {N} float")))}
77}
78fn deserialize_rotary<'a,B:Backend,D:Deserializer<'a>>(deserializer:D)->Result<RotaryEncoding<B>,D::Error>{Ok(RotaryEncodingConfig::deserialize(deserializer)?.init(&Default::default()))}
79fn serialize_batch_norm<B:Backend,S:Serializer>(layer:&BatchNorm<B,1>,serializer:S)->Result<S::Ok,S::Error>{
80	let (beta,gamma)=(Value::from(layer.beta.val()),Value::from(layer.gamma.val()));
81	let (epsilon,momentum)=(layer.epsilon,layer.momentum);
82	let (mean,variance)=(Value::from(layer.running_mean.value()),Value::from(layer.running_var.value()));
83
84	BatchNormRecord{beta,epsilon,gamma,mean,momentum,variance}.serialize(serializer)
85}
86fn serialize_conv2d<B:Backend,S:Serializer>(layer:&Conv2d<B>,serializer:S)->Result<S::Ok,S::Error>{
87	let (dilation,groups,kernelsize,stride)=(layer.dilation,layer.groups,layer.kernel_size,layer.stride);
88	let bias=layer.bias.as_ref().map(|b|b.val().into());
89	let padding=layer.padding.clone();
90	let weight=layer.weight.val().into();
91
92	Conv2dRecord{bias,dilation,groups,kernelsize,padding,stride,weight}.serialize(serializer)
93}
94fn serialize_cross_entropy<'a,B:Backend,S:Serializer>(layer:&CrossEntropyLoss<B>,serializer:S)->Result<S::Ok,S::Error>{
95	let (logits,pad,smoothing)=(layer.logits.clone(),layer.pad_tokens.clone(),layer.smoothing.clone());
96	let weights=layer.weights.clone().map(Into::into);
97
98	CrossEntropyRecord{logits,pad,smoothing,weights}.serialize(serializer)
99}
100fn serialize_dropout<S:Serializer>(data:&Dropout,serializer:S)->Result<S::Ok,S::Error>{data.prob.serialize(serializer)}
101fn serialize_embedding<B:Backend,S:Serializer>(layer:&Embedding<B>,serializer:S)->Result<S::Ok,S::Error>{serialize_param(&layer.weight,serializer)}
102fn serror<D:Display,E:Serror>(msg:D)->E{E::custom(msg)}
103fn serialize_ignored<S:Serializer,T:Serialize>(data:&Ignored<T>,serializer:S)->Result<S::Ok,S::Error>{
104	let data:&T=data;
105	data.serialize(serializer)
106}
107fn serialize_layer_norm<B:Backend,S:Serializer>(layer:&LayerNorm<B>,serializer:S)->Result<S::Ok,S::Error>{
108	LayerNormRecord{beta:layer.beta.val().into(),gamma:layer.gamma.val().into()}.serialize(serializer)
109}
110fn serialize_linear<B:Backend,S:Serializer>(layer:&Linear<B>,serializer:S)->Result<S::Ok,S::Error>{
111	let bias=layer.bias.as_ref().map(|b|b.val().into());
112	let weight=layer.weight.val().into();
113
114	LinearRecord{bias,weight}.serialize(serializer)
115}
116fn serialize_max_pool_2d<S:Serializer>(layer:&MaxPool2d,serializer:S)->Result<S::Ok,S::Error>{
117	MaxPool2dConfig{kernel_size:layer.kernel_size,strides:layer.stride,padding:layer.padding.0.clone(),dilation:layer.dilation}.serialize(serializer)
118}
119fn serialize_nothing<S:Serializer,T:Default>(_data:&T,serializer:S)->Result<S::Ok,S::Error>{().serialize(serializer)}
120fn serialize_param<B:Backend,S:Serializer,const N:usize>(data:&Param<Tensor<B,N>>,serializer:S)->Result<S::Ok,S::Error>{
121	if N>8{return Err(serror("tensor rank greater than 8 is not currently supported"))}
122	let data:Value<B>=data.val().into();
123	data.serialize(serializer)
124}
125fn serialize_rotary<B:Backend,S:Serializer>(data:&RotaryEncoding<B>,serializer:S)->Result<S::Ok,S::Error>{
126	let [distance,head,_2]=data.freq_complex.dims();
127	let theta:f32=data.theta.clone().into_scalar().elem();
128
129	RotaryEncodingConfig::new(distance,head).with_theta(theta).serialize(serializer)
130}
131impl AttentionConfig{
132	pub fn init<B:Backend>(&self,_device:&B::Device)->Attention<B>{
133		let (dropout,heads,mask)=(self.dropout,self.heads,self.mask);
134		let mask=Ignored(mask);
135		let phantom=PhantomData;
136
137		Attention{dropout,heads,mask,phantom}
138	}
139}
140impl BiasConfig{
141	pub fn init<B:Backend>(&self,device:&B::Device)->Bias<B>{
142		let dim=self.dim;
143		let shape=[dim];
144
145		Bias{bias:self.initializer.init_with(shape,None,Some(dim),device)}
146	}
147}
148impl Config{
149	/// creates an attention config
150	pub fn attention(heads:usize,mask:AttentionMask)->Self{Self::Attention(AttentionConfig::new(heads,mask))}
151	/// creates a batch norm config
152	pub fn batch_norm(countfeatures:usize,epsilon:f32,momentum:f32)->Self{Self::BatchNorm(BatchNormConfig::new(countfeatures).with_epsilon(epsilon as f64).with_momentum(momentum as f64))}
153	/// creates a bias config
154	pub fn bias(dim:usize)->Self{Self::Bias(BiasConfig::new(dim))}
155	/// creates a embedding config
156	pub fn embedding(input:usize,output:usize)->Self{Self::Embedding(EmbeddingConfig::new(input,output))}
157	/// creates a flatten config
158	pub fn flatten<R:RangeBounds<isize>>(dims:R)->Self{
159		let a=match dims.start_bound(){Excluded(&n)=>n+1,Included(&n)=>n,Unbounded=>0};
160		let b=match dims.end_bound(){Excluded(&n)=>n,Included(n)=>n+1,Unbounded=>0};
161		Self::Flatten(FlattenLayer::new(a..b))
162	}
163	/// initializes the layer
164	pub fn init<B:Backend>(&self,device:&B::Device)->Layer<B>{
165		match self{Config::Attention(c)=>Layer::Attention(c.init(device)),Config::BatchNorm(c)=>Layer::BatchNorm(c.init(device)),Config::Bias(c)=>Layer::Bias(c.init(device)),Config::CacheKV=>Layer::CacheKV(CacheKV::default()),Config::Cat(c)=>Layer::Cat(Ignored(*c)),Config::Conv2d(c)=>Layer::Conv2d(c.init(device)),Config::Dropout(c)=>Layer::Dropout(c.init()),Config::Embedding(c)=>Layer::Embedding(c.init(device)),Config::Flatten(c)=>Layer::Flatten(Ignored(c.clone())),Config::LayerNorm(c)=>Layer::LayerNorm(c.init(device)),Config::Linear(c)=>Layer::Linear(c.init(device)),Config::KQV(c)=>Layer::KQV(c.init(device)),Config::CrossEntropy(c)=>Layer::CrossEntropy(c.init(device)),Config::MaxPool2d(c)=>Layer::MaxPool2d(c.init()),Config::Mse=>Layer::Mse(MseLoss),Config::Relu=>Layer::Relu(Relu::new()),Config::Reshape(c)=>Layer::Reshape(Ignored(c.clone())),Config::Rotary(c)=>Layer::Rotary(c.init(device)),Config::ScaleShift(c)=>Layer::ScaleShift(c.init(device)),Config::Stack(d)=>Layer::Stack(Ignored(*d)),Config::Squeeze(c)=>Layer::Squeeze(Ignored(*c)),Config::Sum(c)=>Layer::Sum(Ignored(*c)),Config::Tanh=>Layer::Tanh(Tanh::new()),Config::Unsqueeze(c)=>Layer::Unsqueeze(Ignored(*c))}
166	}
167	/// creates a layer norm config
168	pub fn layer_norm(dim:usize)->Self{Self::LayerNorm(LayerNormConfig::new(dim))}
169	/// creates a linear config
170	pub fn linear(bias:bool,input:usize,output:usize)->Self{Self::Linear(LinearConfig::new(input,output).with_bias(bias))}
171	/// creates a max pool 2d config
172	pub fn max_pool_2d(kernel:[usize;2],strides:[usize;2])->Self{MaxPool2dConfig::new(kernel).with_strides(strides).into()}
173	/// creates a relu config
174	pub fn relu()->Self{Self::Relu}
175	/// creates a reshape config
176	pub fn reshape<R:Into<Reshape>>(args:R)->Self{Self::Reshape(ReshapeLayer::new(args.into()))}
177	/// creates a rotary config
178	pub fn rotary(distance:usize,head:usize)->Self{Self::Rotary(RotaryEncodingConfig::new(distance,head))}
179	/// creates a scale shift config
180	pub fn scale_shift()->Self{Self::ScaleShift(ScaleShiftConfig::new())}
181	/// creates a tanh config
182	pub fn tanh()->Self{Self::Tanh}
183	/// scales the initializer
184	pub fn w_scale(mut self,r:f32)->Self{
185		match &mut self{Config::Attention(_c)=>(),Config::BatchNorm(_c)=>(),Config::Bias(c)=>w_scale_mut(&mut c.initializer,r),Config::CacheKV=>(),Config::Cat(_c)=>(),Config::Conv2d(c)=>w_scale_mut(&mut c.initializer,r),Config::CrossEntropy(_c)=>(),Config::Dropout(_c)=>(),Config::Embedding(c)=>w_scale_mut(&mut c.initializer,r),Config::Flatten(_c)=>(),Config::KQV(c)=>w_scale_mut(&mut c.initializer,r),Config::LayerNorm(_c)=>(),Config::Linear(c)=>w_scale_mut(&mut c.initializer,r),Config::MaxPool2d(_c)=>(),Config::Mse=>(),Config::Relu=>(),Config::Reshape(_c)=>(),Config::Rotary(_c)=>(),Config::ScaleShift(c)=>c.initializer.as_mut().into_iter().for_each(|i|w_scale_mut(i,r)),Config::Squeeze(_d)=>(),Config::Stack(_d)=>(),Config::Sum(_c)=>(),Config::Tanh=>(),Config::Unsqueeze(_c)=>()}
186		self
187	}
188}
189impl Decompose for Config{
190	fn compose(decomposition:Self::Decomposition)->Self{decomposition}
191	fn decompose(self)->Self::Decomposition{self}
192	fn decompose_cloned(&self)->Self::Decomposition{self.clone()}
193	type Decomposition=Self;
194}
195impl From<AttentionConfig> for Config{
196	fn from(value:AttentionConfig)->Self{Self::Attention(value)}
197}
198impl From<BatchNormConfig> for Config{
199	fn from(value:BatchNormConfig)->Self{Self::BatchNorm(value)}
200}
201impl From<BiasConfig> for Config{
202	fn from(value:BiasConfig)->Self{Self::Bias(value)}
203}
204impl From<CatLayer> for Config{
205	fn from(value:CatLayer)->Self{Config::Cat(value)}
206}
207impl From<CrossEntropyLossConfig> for Config{
208	fn from(value:CrossEntropyLossConfig)->Self{Config::CrossEntropy(value)}
209}
210impl From<DropoutConfig> for Config{
211	fn from(value:DropoutConfig)->Self{Config::Dropout(value)}
212}
213impl From<EmbeddingConfig> for Config{
214	fn from(value:EmbeddingConfig)->Self{Config::Embedding(value)}
215}
216impl From<FlattenLayer<Range<isize>>> for Config{
217	fn from(value:FlattenLayer<Range<isize>>)->Self{Config::Flatten(value)}
218}
219impl From<LayerNormConfig> for Config{
220	fn from(value:LayerNormConfig)->Self{Config::LayerNorm(value)}
221}
222impl From<LinearConfig> for Config{
223	fn from(value:LinearConfig)->Self{Config::Linear(value)}
224}
225impl From<MaxPool2dConfig> for Config{
226	fn from(value:MaxPool2dConfig)->Self{Config::MaxPool2d(value)}
227}
228impl From<MseLoss> for Config{
229	fn from(_value:MseLoss)->Self{Config::Mse}
230}
231impl From<Relu> for Config{
232	fn from(_value:Relu)->Self{Config::Relu}
233}
234impl From<ReshapeLayer<Reshape>> for Config{
235	fn from(value:ReshapeLayer<Reshape>)->Self{Config::Reshape(value)}
236}
237impl From<RotaryEncodingConfig> for Config{
238	fn from(value:RotaryEncodingConfig)->Self{Config::Rotary(value)}
239}
240impl From<SqueezeLayer> for Config{
241	fn from(value:SqueezeLayer)->Self{Config::Squeeze(value)}
242}
243impl From<StackLayer> for Config{
244	fn from(value:StackLayer)->Self{Config::Stack(value)}
245}
246impl From<SumLayer> for Config{
247	fn from(value:SumLayer)->Self{Config::Sum(value)}
248}
249impl From<Tanh> for Config{
250	fn from(_value:Tanh)->Self{Config::Tanh}
251}
252impl From<UnsqueezeLayer> for Config{
253	fn from(value:UnsqueezeLayer)->Self{Config::Unsqueeze(value)}
254}
255impl KQVConfig{
256	pub fn init<B:Backend>(&self,device:&B::Device)->KQV<B>{
257		let (embed,initializer,kdim,vdim)=(self.embed.clone(),self.initializer.clone(),self.kdim.clone(),self.vdim.clone());
258		let (key,value)=(LinearConfig::new(embed,kdim).with_initializer(initializer.clone()).init(device),LinearConfig::new(embed,vdim).with_initializer(initializer.clone()).init(device));
259		let query=LinearConfig::new(embed,kdim).with_initializer(initializer).init(device);
260
261		KQV{key,query,value}
262	}
263}
264impl ScaleShiftConfig{
265	pub fn init<B:Backend>(&self,device:&B::Device)->ScaleShift<B>{
266		let initializer=&self.initializer;
267
268		let a=if let Some(i)=initializer{i.init_with([1],None,None,device)}else{Initializer::Constant{value:1.0}.init_with([1],None,None,device)};
269		let b=if let Some(i)=initializer{i.init_with([1],None,None,device)}else{Initializer::Constant{value:0.0}.init_with([1],None,None,device)};
270		ScaleShift{a,b}
271	}
272}
273impl<B:Backend,M:AI<M::Output,M::Output>+Op> IntoSequence<M> for Layer<B> where Layer<B>:Into<M>{
274	fn into_sequence(self)->Sequential<Vec<M>>{vec![self.into()].sequential()}
275}
276impl<B:Backend,const N:usize> From<BatchNorm<B,N>> for Layer<B>{
277	fn from(value:BatchNorm<B,N>)->Self{
278		Self::BatchNorm(BatchNorm{beta:value.beta,epsilon:value.epsilon,gamma:value.gamma,momentum:value.momentum,running_mean:value.running_mean,running_var:value.running_var})
279	}
280}
281impl<B:Backend> AI<(Value<B>,Value<B>),(Value<B>,Value<B>)> for CacheKV<B>{
282	fn forward(&self,(k,v):(Value<B>,Value<B>))->(Value<B>,Value<B>){
283		let (keys,values)=(self.keys.clone(),self.values.clone());
284		(if keys.is_empty(){k}else{Value::from(vec![keys,k]).cat(1)},if values.is_empty(){v}else{Value::from(vec![values,v]).cat(1)})
285	}
286	fn forward_mut(&mut self,(k,v):(Value<B>,Value<B>))->(Value<B>,Value<B>){
287		let (keys,values)=(mem::take(&mut self.keys),mem::take(&mut self.values));
288
289		let (keys,values)=(if keys.is_empty(){k}else{Value::from(vec![keys,k]).cat(1)},if values.is_empty(){v}else{Value::from(vec![values,v]).cat(1)});
290		(self.keys,self.values)=if keys.is_incompatible()||values.is_incompatible(){Default::default()}else{(keys.clone(),values.clone())};
291
292		(keys,values)
293	}
294}
295impl<B:Backend> AI<(Value<B>,Value<B>,Value<B>),Value<B>> for Attention<B>{
296	fn forward(&self,(k,q,v):(Value<B>,Value<B>,Value<B>))->Value<B>{// TODO support for other numbers of dimensions
297		fn apply_mask<B:Backend,const D:usize>(a:Tensor<B,D>,mask:AttentionMask,value:f32)->Tensor<B,D>{
298			match mask{AttentionMask::Causal=>mask_causal(a,value as f64),AttentionMask::None=>a,AttentionMask::Window(n)=>mask_window(a,n,value as f64)}
299		}
300		fn f_3d<B:Backend>(dropout:f32,heads:usize,mask:AttentionMask,k:Tensor<B,3>,q:Tensor<B,3>,v:Tensor<B,3>)->Result<Tensor<B,3>,String>{
301			let (kdims,qdims,vdims)=(k.dims(),q.dims(),v.dims());
302
303			if kdims!=qdims{return Err("mismatched dims".into())}
304			if kdims!=vdims{return Err("mismatched dims".into())}
305			let [batch,sequence,embed]=kdims;
306			let dropout=Dropout{prob:dropout as f64};
307			let head=if embed%heads==0{embed/heads}else{return Err("embed must be a multiple of heads".into())};
308
309			let (k,q,v)=(k.reshape([batch,sequence,heads,head]).swap_dims(1,2),q.reshape([batch,sequence,heads,head]).swap_dims(1,2),v.reshape([batch,sequence,heads,head]).swap_dims(1,2));
310			let a=activation::softmax(apply_mask(q.matmul(k.transpose())/(head as f32).sqrt(),mask,-9999.0),3);
311			let a=dropout.forward(a);
312			let s=a.matmul(v).swap_dims(1,2).reshape([0,0,-1]);
313
314			Ok(s)
315		}
316		fn mask_causal<B:Backend,const D:usize>(a:Tensor<B,D>,value:f64)->Tensor<B,D>{
317			if D<2{return mask_causal::<B,2>(a.unsqueeze(),value).squeeze(0)}									// shouldn't actually happen but if the dimension is less than 2 we can just treat it like it has a second dimension of size 1
318
319			let (device,dims)=(a.device(),a.dims());
320			let (key,query)=(dims[D-1],dims[D-2]);
321			let extrakeys=key.saturating_sub(query);															// due to caching, there might be more keys than queries
322
323			let causal:Tensor<B,2,Bool>=Tensor::tril_mask([query,key],extrakeys as i64,&device);
324			let a=a.mask_fill(causal.unsqueeze(),value);
325			a
326		}
327		/// fills the attention tensor with the value where the query position is less than the key position minus length, or greater than the key position. Assumes attention dimensions are [.., query, key]
328		fn mask_window<B:Backend,const D:usize>(a:Tensor<B,D>,length:usize,value:f64)->Tensor<B,D>{
329			if D<2{return mask_window::<B,2>(a.unsqueeze(),length,value).squeeze(0)}							// shouldn't actually happen but if the dimension is less than 2 we can just treat it like it has a second dimension of size 1
330
331			let (device,dims)=(a.device(),a.dims());
332			let (key,query)=(dims[D-1],dims[D-2]);
333			let extrakeys=key.saturating_sub(query);															// due to caching, there might be more keys than queries
334
335			let causal:Tensor<B,2,Bool>=Tensor::tril_mask([query,key],extrakeys as i64,&device);
336			let window:Tensor<B,2,Bool>=Tensor::triu_mask([query,key],extrakeys as i64-length as i64,&device);
337			let a=a.mask_fill(causal.unsqueeze(),value).mask_fill(window.unsqueeze(),value);
338			a
339		}
340		let (dropout,heads,mask)=(self.dropout,self.heads,self.mask.0);
341
342		match match (k.float(),q.float(),v.float()){
343			(Value::F3(k),Value::F3(q),Value::F3(v))=>f_3d(dropout,heads,mask,k,q,v).map(Into::into),
344			(Value::Multi(k),Value::Multi(q),Value::Multi(v))=>if k.len()==q.len()&&q.len()==v.len(){Ok(k.into_iter().zip(q).zip(v).map(|((k,q),v)|self.forward((k,q,v))).collect())}else{Err("incompatible lengths".into())}
345			_=>Err("attention is currently only supported for 3d float inputs [batch, seq, embed]".into())
346		}{
347			Err(e)=>e.into(),
348			Ok(x)=>x
349		}
350	}
351}
352impl<B:Backend> AI<Value<B>,(Value<B>,Value<B>,Value<B>)> for KQV<B>{
353	fn forward(&self,input:Value<B>)->(Value<B>,Value<B>,Value<B>){
354		let (k,q)=(input.clone(),input.clone());
355		let v=input;
356
357		(AI::forward(&self.key,k),AI::forward(&self.query,q),AI::forward(&self.value,v))
358	}
359}
360impl<B:Backend> AI<Value<B>,Value<B>> for Attention<B>{
361	fn forward(&self,input:Value<B>)->Value<B>{
362		match input{
363			Value::Incompatible(e)=>e.into(),
364			Value::Multi(v) if v.len()>=3=>if v.len()==3{
365				let [k,q,v]=v.try_into().unwrap();
366				self.forward((k,q,v))
367			}else{
368				v.into_iter().map(|x|self.forward(x)).collect()
369			},
370			_=>"attention inputs must be in triples".into()
371		}
372	}
373}
374impl<B:Backend> AI<Value<B>,Value<B>> for Bias<B>{
375	fn forward(&self,input:Value<B>)->Value<B>{input+Value::from(self.bias.val())}
376}
377impl<B:Backend> AI<Value<B>,Value<B>> for CacheKV<B>{
378	fn forward(&self,input:Value<B>)->Value<B>{
379		match input{
380			Value::Incompatible(e)=>e.into(),
381			Value::Multi(v) if v.len()>=2=>match v.len(){
382				2=>{
383					let [k,v]=v.try_into().unwrap();
384
385					let (k,v)=self.forward((k,v));
386					vec![k,v].into()
387				},
388				3=>{
389					let [k,q,v]=v.try_into().unwrap();
390
391					let (k,v)=self.forward((k,v));
392					vec![k,q,v].into()
393				},
394				_=>{
395					v.into_iter().map(|x|self.forward(x)).collect()
396				}
397			},
398			_=>"cache kv inputs must be in pairs or triples".into()
399		}
400	}
401	fn forward_mut(&mut self,input:Value<B>)->Value<B>{
402		match input{
403			Value::Incompatible(e)=>e.into(),
404			Value::Multi(v) if v.len()>=2=>match v.len(){
405				2=>{
406					let [k,v]=v.try_into().unwrap();
407
408					let (k,v)=self.forward_mut((k,v));
409					vec![k,v].into()
410				},
411				3=>{
412					let [k,q,v]=v.try_into().unwrap();
413
414					let (k,v)=self.forward_mut((k,v));
415					vec![k,q,v].into()
416				},
417				_=>{
418					v.into_iter().map(|x|self.forward_mut(x)).collect()
419				}
420			},
421			_=>"cache kv inputs must be in pairs or triples".into()
422		}
423	}
424}
425impl<B:Backend> AI<Value<B>,Value<B>> for KQV<B>{
426	fn forward(&self,input:Value<B>)->Value<B>{
427		let (k,q,v)=self.forward(input);
428		vec![k,q,v].into()
429	}
430}
431impl<B:Backend> AI<Value<B>,Value<B>> for Layer<B>{
432	fn forward(&self,input:Value<B>)->Value<B>{
433		match self{
434			Layer::Attention(f)=>f.forward(input),
435			Layer::BatchNorm(f)=>AI::forward(f,input),
436			Layer::Bias(f)=>f.forward(input),
437			Layer::CacheKV(f)=>f.forward(input),
438			Layer::Cat(f)=>f.forward(input),
439			Layer::Conv2d(f)=>AI::forward(f,input),
440			Layer::CrossEntropy(f)=>AI::forward(f,input),
441			Layer::Dropout(f)=>AI::forward(f,input),
442			Layer::Embedding(f)=>AI::forward(f,input),
443			Layer::Flatten(f)=>f.0.forward(input),
444			Layer::KQV(f)=>f.forward(input),
445			Layer::LayerNorm(f)=>AI::forward(f,input),
446			Layer::Linear(f)=>AI::forward(f,input),
447			Layer::MaxPool2d(f)=>AI::forward(f,input),
448			Layer::Mse(f)=>AI::forward(f,input),
449			Layer::Relu(f)=>AI::forward(f,input),
450			Layer::Reshape(f)=>f.0.forward(input),
451			Layer::Rotary(f)=>AI::forward(f,input),
452			Layer::ScaleShift(f)=>f.forward(input),
453			Layer::Squeeze(f)=>f.forward(input),
454			Layer::Stack(f)=>f.forward(input),
455			Layer::Sum(f)=>f.forward(input),
456			Layer::Tanh(f)=>AI::forward(f,input),
457			Layer::Unsqueeze(f)=>f.forward(input),
458		}
459	}
460	fn forward_mut(&mut self,input:Value<B>)->Value<B>{
461		match self{
462			Layer::Attention(f)=>f.forward_mut(input),
463			Layer::BatchNorm(f)=>AI::forward_mut(f,input),
464			Layer::Bias(f)=>f.forward_mut(input),
465			Layer::CacheKV(f)=>f.forward_mut(input),
466			Layer::Cat(f)=>f.0.forward_mut(input),
467			Layer::Conv2d(f)=>f.forward_mut(input),
468			Layer::CrossEntropy(f)=>AI::forward_mut(f,input),
469			Layer::Dropout(f)=>AI::forward_mut(f,input),
470			Layer::Embedding(f)=>AI::forward_mut(f,input),
471			Layer::Flatten(f)=>f.0.forward_mut(input),
472			Layer::KQV(f)=>f.forward_mut(input),
473			Layer::LayerNorm(f)=>AI::forward_mut(f,input),
474			Layer::Linear(f)=>AI::forward_mut(f,input),
475			Layer::MaxPool2d(f)=>AI::forward_mut(f,input),
476			Layer::Mse(f)=>AI::forward_mut(f,input),
477			Layer::Relu(f)=>AI::forward_mut(f,input),
478			Layer::Reshape(f)=>f.0.forward_mut(input),
479			Layer::Rotary(f)=>AI::forward_mut(f,input),
480			Layer::ScaleShift(f)=>f.forward_mut(input),
481			Layer::Squeeze(f)=>f.0.forward_mut(input),
482			Layer::Stack(f)=>f.0.forward_mut(input),
483			Layer::Sum(f)=>f.0.forward_mut(input),
484			Layer::Tanh(f)=>AI::forward_mut(f,input),
485			Layer::Unsqueeze(f)=>f.0.forward_mut(input),
486		}
487	}
488}
489impl<B:Backend> AI<Value<B>,Value<B>> for ScaleShift<B>{
490	fn forward(&self,input:Value<B>)->Value<B>{
491		let (a,b)=(Value::from(self.a.val()),Value::from(self.b.val()));
492		input*a+b
493	}
494}
495impl<B:Backend> Decompose for Layer<B>{
496	fn compose(decomposition:Self::Decomposition)->Self{decomposition}
497	fn decompose(self)->Self::Decomposition{self}
498	fn decompose_cloned(&self)->Self::Decomposition{self.clone()}
499	type Decomposition=Self;
500}
501impl<B:Backend> From<CatLayer> for Layer<B>{
502	fn from(value:CatLayer)->Self{Layer::Cat(Ignored(value))}
503}
504impl<B:Backend> From<CrossEntropyLoss<B>> for Layer<B>{
505	fn from(value:CrossEntropyLoss<B>)->Self{Layer::CrossEntropy(value)}
506}
507impl<B:Backend> From<Dropout> for Layer<B>{
508	fn from(value:Dropout)->Self{Layer::Dropout(value)}
509}
510impl<B:Backend> From<Embedding<B>> for Layer<B>{
511	fn from(value:Embedding<B>)->Self{Layer::Embedding(value)}
512}
513impl<B:Backend> From<FlattenLayer<Range<isize>>> for Layer<B>{
514	fn from(value:FlattenLayer<Range<isize>>)->Self{Layer::Flatten(Ignored(value))}
515}
516impl<B:Backend> From<LayerNorm<B>> for Layer<B>{
517	fn from(value:LayerNorm<B>)->Self{Layer::LayerNorm(value)}
518}
519impl<B:Backend> From<Linear<B>> for Layer<B>{
520	fn from(value:Linear<B>)->Self{Layer::Linear(value)}
521}
522impl<B:Backend> From<MaxPool2d> for Layer<B>{
523	fn from(value:MaxPool2d)->Self{Layer::MaxPool2d(value)}
524}
525impl<B:Backend> From<MseLoss> for Layer<B>{
526	fn from(value:MseLoss)->Self{Layer::Mse(value)}
527}
528impl<B:Backend> From<Relu> for Layer<B>{
529	fn from(value:Relu)->Self{Layer::Relu(value)}
530}
531impl<B:Backend> From<ReshapeLayer<Reshape>> for Layer<B>{
532	fn from(value:ReshapeLayer<Reshape>)->Self{Layer::Reshape(Ignored(value))}
533}
534impl<B:Backend> From<RotaryEncoding<B>> for Layer<B>{
535	fn from(value:RotaryEncoding<B>)->Self{Layer::Rotary(value)}
536}
537impl<B:Backend> From<SqueezeLayer> for Layer<B>{
538	fn from(value:SqueezeLayer)->Self{Layer::Squeeze(Ignored(value))}
539}
540impl<B:Backend> From<StackLayer> for Layer<B>{
541	fn from(value:StackLayer)->Self{Layer::Stack(Ignored(value))}
542}
543impl<B:Backend> From<SumLayer> for Layer<B>{
544	fn from(value:SumLayer)->Self{Layer::Sum(Ignored(value))}
545}
546impl<B:Backend> From<Tanh> for Layer<B>{
547	fn from(value:Tanh)->Self{Layer::Tanh(value)}
548}
549impl<B:Backend> From<UnsqueezeLayer> for Layer<B>{
550	fn from(value:UnsqueezeLayer)->Self{Layer::Unsqueeze(Ignored(value))}
551}
552impl<B:Backend> Layer<B>{
553	/// creates a batch norm layer
554	pub fn batch_norm(countfeatures:usize,epsilon:f32,momentum:f32)->Self{Config::batch_norm(countfeatures,epsilon,momentum).init(&Default::default())}
555	/// creates a embedding layer
556	pub fn embedding(input:usize,output:usize,wscale:f32)->Self{
557		let mut l=EmbeddingConfig::new(input,output);
558		if wscale!=1.0{l.initializer=w_scale(l.initializer,wscale)}
559		let l=l.init(&Default::default());
560		Self::Embedding(l)
561	}
562	/// creates a flatten layer
563	pub fn flatten<R:RangeBounds<isize>>(dims:R)->Self{
564		let a=match dims.start_bound(){Excluded(&n)=>n+1,Included(&n)=>n,Unbounded=>0};
565		let b=match dims.end_bound(){Excluded(&n)=>n,Included(n)=>n+1,Unbounded=>0};
566		Self::Flatten(Ignored(FlattenLayer::new(a..b)))
567	}
568	/// creates a layer norm layer
569	pub fn layer_norm(dim:usize)->Self{Self::LayerNorm(LayerNormConfig::new(dim).init(&Default::default()))}
570	/// creates a linear layer
571	pub fn linear(bias:bool,input:usize,output:usize,wscale:f32)->Self{
572		let mut l=LinearConfig::new(input,output).with_bias(bias);
573		if wscale!=1.0{l.initializer=w_scale(l.initializer,wscale)}
574		let l=l.init(&Default::default());
575		Self::Linear(l)
576	}
577	/// creates a max pool 2d layer
578	pub fn max_pool_2d(kernel:[usize;2],strides:[usize;2])->Self{MaxPool2dConfig::new(kernel).with_strides(strides).init().into()}
579	/// creates a relu layer
580	pub fn relu()->Self{Self::Relu(Relu)}
581	/// creates a reshape layer
582	pub fn reshape<R:Into<Reshape>>(args:R)->Self{Self::Reshape(Ignored(ReshapeLayer::new(args.into())))}
583	/// creates a rotary layer
584	pub fn rotary(distance:usize,head:usize)->Self{Self::Rotary(RotaryEncodingConfig::new(distance,head).init(&Default::default()))}
585	/// creates a scale shift layer
586	pub fn scale_shift()->Self{Self::ScaleShift(ScaleShiftConfig::new().init(&Default::default()))}
587	/// creates a tanh layer
588	pub fn tanh()->Self{Self::Tanh(Tanh)}
589}
590impl<B:Backend> Op for Layer<B>{
591	type Output=Value<B>;
592}
593#[derive(Clone,Copy,Debug,Deserialize,Serialize)]
594pub enum AttentionMask{Causal,None,Window(usize)}
595#[derive(Config)]
596/// enumerates config for some burn layers
597pub enum Config{Attention(AttentionConfig),BatchNorm(BatchNormConfig),Bias(BiasConfig),CacheKV,Cat(CatLayer),Conv2d(Conv2dConfig),CrossEntropy(CrossEntropyLossConfig),Dropout(DropoutConfig),Embedding(EmbeddingConfig),Flatten(FlattenLayer<Range<isize>>),KQV(KQVConfig),LayerNorm(LayerNormConfig),Linear(LinearConfig),MaxPool2d(MaxPool2dConfig),Mse,Relu,Reshape(ReshapeLayer<Reshape>),Rotary(RotaryEncodingConfig),ScaleShift(ScaleShiftConfig),Squeeze(SqueezeLayer),Stack(StackLayer),Sum(SumLayer),Tanh,Unsqueeze(UnsqueezeLayer)}
598#[derive(Debug,Deserialize,Module,Serialize)]//TODO more layers
599#[serde(bound="")]
600/// enumerates some burn layers
601pub enum Layer<B:Backend>{
602	Attention(Attention<B>),
603	Bias(Bias<B>),
604	#[serde(deserialize_with="deserialize_batch_norm")]
605	#[serde(serialize_with="serialize_batch_norm")]
606	BatchNorm(BatchNorm<B,1>),
607	CacheKV(CacheKV<B>),
608	#[serde(deserialize_with="deserialize_ignored")]
609	#[serde(serialize_with="serialize_ignored")]
610	Cat(Ignored<CatLayer>),
611	#[serde(deserialize_with="deserialize_conv2d")]
612	#[serde(serialize_with="serialize_conv2d")]
613	Conv2d(Conv2d<B>),
614	#[serde(deserialize_with="deserialize_cross_entropy")]
615	#[serde(serialize_with="serialize_cross_entropy")]
616	CrossEntropy(CrossEntropyLoss<B>),
617	#[serde(deserialize_with="deserialize_dropout")]
618	#[serde(serialize_with="serialize_dropout")]
619	Dropout(Dropout),
620	#[serde(deserialize_with="deserialize_embedding")]
621	#[serde(serialize_with="serialize_embedding")]
622	Embedding(Embedding<B>),
623	#[serde(deserialize_with="deserialize_ignored")]
624	#[serde(serialize_with="serialize_ignored")]
625	Flatten(Ignored<FlattenLayer<Range<isize>>>),
626	KQV(KQV<B>),
627	#[serde(deserialize_with="deserialize_layer_norm")]
628	#[serde(serialize_with="serialize_layer_norm")]
629	LayerNorm(LayerNorm<B>),
630	#[serde(deserialize_with="deserialize_linear")]
631	#[serde(serialize_with="serialize_linear")]
632	Linear(Linear<B>),
633	#[serde(deserialize_with="deserialize_max_pool_2d")]
634	#[serde(serialize_with="serialize_max_pool_2d")]
635	MaxPool2d(MaxPool2d),
636	#[serde(deserialize_with="deserialize_nothing")]
637	#[serde(serialize_with="serialize_nothing")]
638	Mse(MseLoss),
639	#[serde(deserialize_with="deserialize_nothing")]
640	#[serde(serialize_with="serialize_nothing")]
641	Relu(Relu),
642	#[serde(deserialize_with="deserialize_ignored")]
643	#[serde(serialize_with="serialize_ignored")]
644	Reshape(Ignored<ReshapeLayer<Reshape>>),
645	#[serde(deserialize_with="deserialize_rotary")]
646	#[serde(serialize_with="serialize_rotary")]
647	Rotary(RotaryEncoding<B>),
648	ScaleShift(ScaleShift<B>),
649	#[serde(deserialize_with="deserialize_ignored")]
650	#[serde(serialize_with="serialize_ignored")]
651	Squeeze(Ignored<SqueezeLayer>),
652	#[serde(deserialize_with="deserialize_ignored")]
653	#[serde(serialize_with="serialize_ignored")]
654	Stack(Ignored<StackLayer>),
655	#[serde(deserialize_with="deserialize_ignored")]
656	#[serde(serialize_with="serialize_ignored")]
657	Sum(Ignored<SumLayer>),
658	#[serde(deserialize_with="deserialize_nothing")]
659	#[serde(serialize_with="serialize_nothing")]
660	Tanh(Tanh),
661	#[serde(deserialize_with="deserialize_ignored")]
662	#[serde(serialize_with="serialize_ignored")]
663	Unsqueeze(Ignored<UnsqueezeLayer>),
664}
665/// scales the initializer
666pub fn w_scale(initializer:Initializer,r:f32)->Initializer{
667	let r=r as f64;// apparently
668	match initializer{
669		Initializer::Constant{value}=>Initializer::Constant{value:value*r},
670		Initializer::KaimingNormal{gain,fan_out_only}=>Initializer::KaimingNormal{gain:gain*r,fan_out_only},
671		Initializer::KaimingUniform{gain,fan_out_only}=>Initializer::KaimingUniform{gain:gain*r,fan_out_only},
672		Initializer::Normal{mean,std}=>Initializer::Normal{mean:mean*r,std:std*r},
673		Initializer::Ones=>Initializer::Constant{value:r},
674		Initializer::Orthogonal{gain}=>Initializer::Orthogonal{gain:gain*r},
675		Initializer::Uniform{min,max}=>Initializer::Uniform{min:min*r,max:max*r},
676		Initializer::XavierNormal{gain}=>Initializer::XavierNormal{gain:gain*r},
677		Initializer::XavierUniform{gain}=>Initializer::XavierUniform{gain:gain*r},
678		Initializer::Zeros=>Initializer::Zeros
679	}
680}
681/// scales the initializer
682pub fn w_scale_mut(initializer:&mut Initializer,r:f32){*initializer=w_scale(initializer.clone(),r)}
683#[derive(Config,Debug)]
684/// layer for computing attention from [key,query,value] inputs
685pub struct AttentionConfig{
686	#[config(default="0.2")]
687	dropout:f32,
688	heads:usize,
689	mask:AttentionMask
690}
691#[derive(Debug,Deserialize,Module,Serialize)]
692#[serde(bound="")]
693/// layer for computing attention from [key,query,value] inputs
694pub struct Attention<B:Backend>{
695	dropout:f32,
696	heads:usize,
697	#[serde(deserialize_with="deserialize_ignored")]
698	#[serde(serialize_with="serialize_ignored")]
699	mask:Ignored<AttentionMask>,
700	phantom:PhantomData<B>
701}
702#[derive(Config,Debug)]
703/// layer for adding bias somewhere
704pub struct BiasConfig{
705	dim:usize,
706	#[config(default="Initializer::Normal{mean:0.0,std:1.0}")]
707	initializer:Initializer
708}
709#[derive(Config,Debug)]
710/// layer for linear splitting into [key,query,value] for attention purposes
711pub struct KQVConfig{
712	embed:usize,
713	#[config(default="Initializer::XavierNormal{gain:1.0}")]
714	initializer:Initializer,
715	kdim:usize,
716	vdim:usize
717}
718#[derive(Debug,Deserialize,Module,Serialize)]
719#[serde(bound="")]
720/// layer for adding bias anywhere
721pub struct Bias<B:Backend>{
722	#[serde(deserialize_with="deserialize_param")]
723	#[serde(serialize_with="serialize_param")]
724	bias:Param<Tensor<B,1>>
725}
726#[derive(Debug,Default,Deserialize,Module,Serialize)]
727#[serde(bound="")]
728/// layer for caching kv values from kqv when run mutably. cats along d1 and outputs the concatenated keys and values. clears cache on forward_mut when new data is incompatible for concatenation
729pub struct CacheKV<B:Backend>{keys:Value<B>,values:Value<B>}
730#[derive(Debug,Deserialize,Module,Serialize)]
731#[serde(bound="")]
732/// layer for linear splitting into [key,query,value] for attention purposes
733pub struct KQV<B:Backend>{
734	#[serde(deserialize_with="deserialize_linear")]
735	#[serde(serialize_with="serialize_linear")]
736	key:Linear<B>,
737	#[serde(deserialize_with="deserialize_linear")]
738	#[serde(serialize_with="serialize_linear")]
739	query:Linear<B>,
740	#[serde(deserialize_with="deserialize_linear")]
741	#[serde(serialize_with="serialize_linear")]
742	value:Linear<B>
743}
744#[derive(Debug,Deserialize,Module,Serialize)]
745#[serde(bound="")]
746/// layer that applies a componentwise scalar affine transformation: f(x)=ax+b where a and b are tunable scalars
747pub struct ScaleShift<B:Backend>{
748	#[serde(deserialize_with="deserialize_param")]
749	#[serde(serialize_with="serialize_param")]
750	a:Param<Tensor<B,1>>,
751	#[serde(deserialize_with="deserialize_param")]
752	#[serde(serialize_with="serialize_param")]
753	b:Param<Tensor<B,1>>
754}
755#[derive(Config,Debug)]
756/// scale shift config
757pub struct ScaleShiftConfig{
758	#[config(default="None")]
759	initializer:Option<Initializer>
760}
761#[derive(Deserialize,Serialize)]
762#[serde(bound="")]
763struct Conv2dRecord<B:Backend>{
764	bias:Option<Value<B>>,
765	dilation:[usize;2],
766	groups:usize,
767	kernelsize:[usize;2],
768	#[serde(deserialize_with="deserialize_ignored")]
769	#[serde(serialize_with="serialize_ignored")]
770	padding:Ignored<PaddingConfig2d>,
771	stride:[usize;2],
772	weight:Value<B>
773}
774#[derive(Deserialize,Serialize)]
775#[serde(bound="")]
776struct BatchNormRecord<B:Backend>{beta:Value<B>,epsilon:f64,gamma:Value<B>,mean:Value<B>,momentum:f64,variance:Value<B>}
777#[derive(Deserialize,Serialize)]
778#[serde(bound="")]
779struct CrossEntropyRecord<B:Backend>{logits:bool,pad:Option<Vec<usize>>,weights:Option<Value<B>>,smoothing:Option<f32>}
780#[derive(Deserialize,Serialize)]
781#[serde(bound="")]
782struct LayerNormRecord<B:Backend>{beta:Value<B>,gamma:Value<B>}
783#[derive(Deserialize,Serialize)]
784#[serde(bound="")]
785struct LinearRecord<B:Backend>{bias:Option<Value<B>>,weight:Value<B>}
786use Bound::{Excluded,Included,Unbounded};
787use burn::{
788	module::{Ignored,Param,RunningState},
789	nn::{
790		BatchNorm,BatchNormConfig,Dropout,DropoutConfig,Embedding,EmbeddingConfig,Initializer,LayerNorm,LayerNormConfig,Linear,LinearConfig,PaddingConfig2d,Relu,RotaryEncoding,RotaryEncodingConfig,Tanh,conv::{Conv2d,Conv2dConfig},loss::{CrossEntropyLoss,CrossEntropyLossConfig,MseLoss},pool::{MaxPool2d,MaxPool2dConfig}
791	},
792	prelude::*,
793	tensor::activation
794};
795use crate::{
796	ai::{AI,Decompose,IntoSequence,Op},
797	builtin::{
798		Sequential,math::SumLayer,structural::{FlattenLayer,CatLayer,ReshapeLayer,SqueezeLayer,StackLayer,UnsqueezeLayer}
799	},
800	burn::{Reshape,Value},
801	ops::Cat as OpsCat
802};
803use serde::{Deserialize,Deserializer,Serialize,Serializer,de::Error as Derror,ser::Error as Serror};
804use std::{
805	fmt::Display,marker::PhantomData,mem,ops::{Bound,Range,RangeBounds}
806};