1use std::fmt::Debug;
4use std::marker::PhantomData;
5use std::str::FromStr;
6use crate::arr::{Arr, IntoConverter};
7use crate::{Cons, Stack};
8use crate::cuda::{CudaTensor1dPtr, ReadMemory, WriteMemory};
9use crate::device::{Device, DeviceCpu, DeviceGpu, DeviceMemoryPool};
10use crate::device::bias::DeviceBias;
11use crate::error::{ConfigReadError, EvaluateError, LayerInstantiationError, PersistenceError, TrainingError, TypeConvertError};
12use crate::layer::{AskDiffInput, Backward, BackwardAll, BatchBackward, BatchDataType, BatchForward, BatchForwardBase, BatchLoss, BatchPreTrain, BatchPreTrainBase, BatchSize, Forward, ForwardAll, Loss, PreTrain, UpdateWeight};
13use crate::lossfunction::LossFunction;
14use crate::mem::AsRawSlice;
15use crate::ope::{UnitValue};
16use crate::optimizer::{Optimizer, OptimizerBuilder};
17use crate::persistence::{Linear, LinearPersistence, Persistence, Specialized, TextFilePersistence, UnitOrMarker};
18
19pub trait BiasLayerInstantiation<U,C,P,OP,D,I,PI,const N:usize>
21 where P: ForwardAll<Input=I,Output=PI> +
22 BackwardAll<U,LossInput=PI> + PreTrain<U> + Loss<U>,
23 U: Default + Clone + Copy + Send + UnitValue<U>,
24 D: Device<U>,
25 I: Debug + Send + Sync,
26 PI: Debug,
27 OP: Optimizer<U,D> {
28 fn instantiation<UI: FnMut() -> U,B: OptimizerBuilder<U,D,Output=OP>>(parent:P,device:&D,ui:UI,b:&B) -> Result<BiasLayer<U,C,P,OP,D,I,PI,N>,LayerInstantiationError>;
36}
37pub struct BiasLayer<U,C,P,OP,D,I,PI,const N:usize>
39 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U> + Loss<U>,
40 U: Default + Clone + Copy + Send + UnitValue<U>,
41 D: Device<U>,
42 I: Debug + Send + Sync,
43 PI: Debug,
44 OP: Optimizer<U,D> {
45 parent:P,
46 device:D,
47 bias:C,
48 u:PhantomData<U>,
49 optimizer:OP
50}
51impl<U,P,OP,I,PI,const N:usize> Persistence<U,TextFilePersistence<U>,Specialized> for BiasLayer<U,Arr<U,N>,P,OP,DeviceCpu<U>,I,PI,N>
52 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> +
53 PreTrain<U> + Loss<U> + Persistence<U,TextFilePersistence<U>,Specialized>,
54 U: Default + Clone + Copy + UnitValue<U> + FromStr,
55 I: Debug + Send + Sync,
56 PI: Debug,
57 OP: Optimizer<U,DeviceCpu<U>>,
58 ConfigReadError: From<<U as FromStr>::Err> {
59 fn load(&mut self, persistence: &mut TextFilePersistence<U>) -> Result<(),ConfigReadError> {
60 self.parent.load(persistence)?;
61
62 for b in self.bias.iter_mut() {
63 *b = persistence.read()?;
64 }
65
66 Ok(())
67 }
68
69 fn save(&mut self, persistence: &mut TextFilePersistence<U>) -> Result<(), PersistenceError> {
70 self.parent.save(persistence)?;
71
72 persistence.write(UnitOrMarker::LayerStart);
73
74 for b in self.bias.iter() {
75 persistence.write(UnitOrMarker::Unit(*b));
76 }
77
78 Ok(())
79 }
80}
81impl<U,P,OP,I,PI,const N:usize> Persistence<U,TextFilePersistence<U>,Specialized> for BiasLayer<U,CudaTensor1dPtr<U,N>,P,OP,DeviceGpu<U>,I,PI,N>
82 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> +
83 PreTrain<U> + Loss<U> + Persistence<U,TextFilePersistence<U>,Specialized>,
84 U: Default + Clone + Copy + UnitValue<U> + FromStr,
85 I: Debug + Send + Sync,
86 PI: Debug,
87 OP: Optimizer<U,DeviceGpu<U>>,
88 DeviceGpu<U>: Device<U>,
89 ConfigReadError: From<<U as FromStr>::Err> {
90 fn load(&mut self, persistence: &mut TextFilePersistence<U>) -> Result<(),ConfigReadError> {
91 self.parent.load(persistence)?;
92
93 let mut bias = Arr::<U,N>::new();
94
95 for b in bias.iter_mut() {
96 *b = persistence.read()?;
97 }
98
99 self.bias.memcpy(bias.as_raw_slice().as_ptr(),N)?;
100
101 Ok(())
102 }
103
104 fn save(&mut self, persistence: &mut TextFilePersistence<U>) -> Result<(), PersistenceError> {
105 self.parent.save(persistence)?;
106
107 persistence.write(UnitOrMarker::LayerStart);
108
109 let bias = self.bias.read_to_vec()?;
110
111 for b in bias.iter() {
112 persistence.write(UnitOrMarker::Unit(*b));
113 }
114
115 Ok(())
116 }
117}
118impl<T,U,P,OP,I,PI,const N:usize> Persistence<U,T,Linear> for BiasLayer<U,Arr<U,N>,P,OP,DeviceCpu<U>,I,PI,N>
119 where T: LinearPersistence<U>,
120 P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> +
121 PreTrain<U> + Loss<U> + Persistence<U,T,Linear>,
122 U: Default + Clone + Copy + UnitValue<U>,
123 I: Debug + Send + Sync,
124 PI: Debug,
125 OP: Optimizer<U,DeviceCpu<U>> {
126 fn load(&mut self, persistence: &mut T) -> Result<(),ConfigReadError> {
127 self.parent.load(persistence)?;
128
129 for b in self.bias.iter_mut() {
130 *b = persistence.read()?;
131 }
132
133 Ok(())
134 }
135
136 fn save(&mut self, persistence: &mut T) -> Result<(), PersistenceError> {
137 self.parent.save(persistence)?;
138
139 for b in self.bias.iter() {
140 persistence.write(*b)?;
141 }
142
143 Ok(())
144 }
145}
146impl<T,U,P,OP,I,PI,const N:usize> Persistence<U,T,Linear> for BiasLayer<U,CudaTensor1dPtr<U,N>,P,OP,DeviceGpu<U>,I,PI,N>
147 where T: LinearPersistence<U>,
148 P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> +
149 PreTrain<U> + Loss<U> + Persistence<U,T,Linear>,
150 U: Default + Clone + Copy + UnitValue<U>,
151 I: Debug + Send + Sync,
152 PI: Debug,
153 OP: Optimizer<U,DeviceGpu<U>>,
154 DeviceGpu<U>: Device<U> {
155 fn load(&mut self, persistence: &mut T) -> Result<(),ConfigReadError> {
156 self.parent.load(persistence)?;
157
158 let mut bias = Arr::<U,N>::new();
159
160 for b in bias.iter_mut() {
161 *b = persistence.read()?;
162 }
163
164 self.bias.memcpy(bias.as_raw_slice().as_ptr(),N)?;
165
166 Ok(())
167 }
168
169 fn save(&mut self, persistence: &mut T) -> Result<(), PersistenceError> {
170 self.parent.save(persistence)?;
171
172 let bias = self.bias.read_to_vec()?;
173
174 for b in bias.iter() {
175 persistence.write(*b)?;
176 }
177
178 Ok(())
179 }
180}
181impl<U,C,P,OP,D,I,PI,const N:usize> Forward<PI,Result<PI,EvaluateError>> for BiasLayer<U,C,P,OP,D,I,PI,N>
182 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U,PreOutput=PI> + Loss<U>,
183 U: Default + Clone + Copy + Send + UnitValue<U>,
184 D: Device<U> + DeviceBias<U,C,PI,N>,
185 I: Debug + Send + Sync,
186 PI: Debug + BatchDataType,
187 OP: Optimizer<U,D>,
188 <PI as BatchDataType>::Type: Debug + BatchSize {
189
190 fn forward(&self,input:&PI) -> Result<PI,EvaluateError> {
191 self.device.forward_bias(&self.bias,input)
192 }
193}
194impl<U,C,P,OP,D,I,PI,const N:usize> ForwardAll for BiasLayer<U,C,P,OP,D,I,PI,N>
195 where P: ForwardAll<Input=I,Output=PI> +
196 BackwardAll<U,LossInput=PI> + PreTrain<U,PreOutput=PI> + Loss<U>,
197 D: Device<U> + DeviceBias<U,C,PI,N>,
198 U: Default + Clone + Copy + Send + UnitValue<U>,
199 I: Debug + Send + Sync,
200 PI: Debug + BatchDataType + 'static,
201 OP: Optimizer<U,D>,
202 <PI as BatchDataType>::Type: Debug + BatchSize + 'static {
203 type Input = I;
204 type Output = PI;
205 fn forward_all(&self, input: Self::Input) -> Result<Self::Output, EvaluateError> {
206 self.forward(&self.parent.forward_all(input)?)
207 }
208}
209impl<U,C,P,OP,D,I,PI,const N:usize> PreTrain<U> for BiasLayer<U,C,P,OP,D,I,PI,N>
210 where P: PreTrain<U,PreOutput=PI> + ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + Loss<U>,
211 D: Device<U> + DeviceBias<U,C,PI,N>,
212 U: Default + Clone + Copy + Send + UnitValue<U>,
213 I: Debug + Send + Sync,
214 PI: Debug + BatchDataType + 'static,
215 OP: Optimizer<U,D>,
216 <PI as BatchDataType>::Type: Debug + BatchSize + 'static {
217 type PreOutput = PI;
218 type OutStack = Cons<<P as PreTrain<U>>::OutStack,Self::PreOutput>;
219
220 fn pre_train(&self, input: Self::Input) -> Result<Self::OutStack, EvaluateError> {
221 let r = self.parent.pre_train(input)?;
222
223 let u = r.map(|r| self.forward(r))?;
224
225 Ok(Cons(r,u))
226 }
227}
228impl<U,C,P,OP,D,I,PI,const N:usize> Backward<U,PI,Result<PI,TrainingError>> for BiasLayer<U,C,P,OP,D,I,PI,N>
229 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U,PreOutput=PI> + Loss<U>,
230 U: Default + Clone + Copy + UnitValue<U>,
231 D: Device<U> + DeviceBias<U,C,PI,N>,
232 I: Debug + Send + Sync,
233 PI: Debug + BatchDataType + 'static,
234 OP: Optimizer<U,D>,
235 <PI as BatchDataType>::Type: Debug + BatchSize + 'static {
236 fn backward(&mut self, input: PI) -> Result<PI,TrainingError> {
237 self.device.backward_bias(input)
238 }
239}
240impl<U,P,OP,D,I,PI,const N:usize> BackwardAll<U> for BiasLayer<U,Arr<U,N>,P,OP,D,I,PI,N>
241 where P: BackwardAll<U,LossInput=PI> + ForwardAll<Input=I,Output=PI> + PreTrain<U,PreOutput=PI> + Loss<U>,
242 U: Default + Clone + Copy + Send + UnitValue<U>,
243 D: Device<U> + DeviceBias<U,Arr<U,N>,PI,N>,
244 I: Debug + Send + Sync,
245 PI: Debug + BatchDataType + 'static,
246 OP: Optimizer<U,D>,
247 <PI as BatchDataType>::Type: Debug + BatchSize + 'static,
248 for<'a> &'a <OP as Optimizer<U,D>>::InternalType: From<&'a Arr<U,N>>,
249 for<'a> <OP as Optimizer<U,D>>::InternalUpdateType<'a>: From<&'a mut Arr<U,N>> {
250 type LossInput = PI;
251 type LossOutput = <P as BackwardAll<U>>::LossOutput;
252
253 fn backward_all<L: LossFunction<U>>(&mut self, input: Self::LossInput, stack:Self::OutStack, lossf:&L)
254 -> Result<(<Self as BackwardAll<U>>::LossOutput,<Self as UpdateWeight<U>>::GradientStack), TrainingError> {
255 let (s,_) = stack.pop();
256
257 let loss = input;
258
259 let g = self.device.backward_bias_weight_gradient(&loss)?;
260
261 let next_loss= self.backward(loss)?;
262
263 let (s,next_loss) = self.parent.loss(next_loss.into(),lossf,s)?;
264
265 let (l,s) = self.parent.backward_all(next_loss, s, lossf)?;
266
267 Ok((l,Cons(s,g)))
268 }
269}
270impl<U,P,OP,D,I,PI,const N:usize> UpdateWeight<U> for BiasLayer<U,Arr<U,N>,P,OP,D,I,PI,N>
271 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> +
272 PreTrain<U,PreOutput=PI> + Loss<U> + UpdateWeight<U>,
273 U: Default + Clone + Copy + Send + UnitValue<U>,
274 I: Debug + Send + Sync,
275 PI: Debug + BatchDataType + 'static,
276 OP: Optimizer<U,D>,
277 D: Device<U>,
278 <PI as BatchDataType>::Type: Debug + BatchSize + 'static,
279 OP: Optimizer<U,D>,
280 for<'a> &'a <OP as Optimizer<U,D>>::InternalType: From<&'a Arr<U,N>>,
281 for<'a> <OP as Optimizer<U,D>>::InternalUpdateType<'a>: From<&'a mut Arr<U,N>> {
282 type GradientStack = Cons<<P as UpdateWeight<U>>::GradientStack,Arr<U,N>>;
283
284 fn update_weight(&mut self, stack: Self::GradientStack) -> Result<(), TrainingError> {
285 let (s,bias) = stack.pop();
286
287 self.optimizer.update((&bias).into(),(&mut self.bias).into())?;
288
289 Ok(self.parent.update_weight(s)?)
290 }
291}
292impl<U,C,P,OP,D,I,PI,const N:usize> AskDiffInput<U> for BiasLayer<U,C,P,OP,D,I,PI,N>
293 where P: PreTrain<U,OutStack=<<Self as PreTrain<U>>::OutStack as Stack>::Remaining> +
294 ForwardAll<Input=I,Output=PI> +
295 BackwardAll<U,LossInput=PI> + Loss<U> +
296 AskDiffInput<U>,
297 U: Default + Clone + Copy + Send + UnitValue<U>,
298 D: Device<U>,
299 I: Debug + Send + Sync,
300 PI: Debug + BatchDataType + 'static,
301 OP: Optimizer<U,D>,
302 <PI as BatchDataType>::Type: Debug + BatchSize + 'static,
303 Self: PreTrain<U,PreOutput=PI> {
304 type DiffInput = P::DiffInput;
305
306 fn ask_diff_input(&self, stack: &Self::OutStack) -> Result<Self::DiffInput,TypeConvertError> {
307 stack.map_remaining(|s| self.parent.ask_diff_input(s))
308 }
309}
310impl<U,P,OP,D,I,PI,const N:usize> Loss<U> for BiasLayer<U,Arr<U,N>,P,OP,D,I,PI,N>
311 where P: PreTrain<U,PreOutput=PI> + ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + Loss<U>,
312 U: Default + Clone + Copy + Send + UnitValue<U>,
313 I: Debug + Send + Sync,
314 PI: Debug + BatchDataType + 'static,
315 OP: Optimizer<U,D>,
316 <PI as BatchDataType>::Type: Debug + BatchSize + 'static,
317 D: Device<U> + DeviceBias<U,Arr<U,N>,PI,N>,
318 for<'a> &'a <OP as Optimizer<U,D>>::InternalType: From<&'a Arr<U,N>>,
319 for<'a> <OP as Optimizer<U,D>>::InternalUpdateType<'a>: From<&'a mut Arr<U,N>> {
320}
321impl<U,C,P,OP,D,I,PI,const N:usize> BatchForwardBase for BiasLayer<U,C,P,OP,D,I,PI,N>
322 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U,PreOutput=PI> + Loss<U> +
323 BatchForwardBase<BatchInput=<I as BatchDataType>::Type,BatchOutput=<PI as BatchDataType>::Type>,
324 U: Default + Clone + Copy + Send + UnitValue<U>,
325 D: Device<U> + DeviceBias<U,C,PI,N>,
326 I: Debug + Send + Sync + BatchDataType,
327 PI: Debug + BatchDataType + 'static,
328 <PI as BatchDataType>::Type: Debug + BatchSize + 'static,
329 <I as BatchDataType>::Type: Debug,
330 OP: Optimizer<U,D>,
331 Self: ForwardAll {
332 type BatchInput = <I as BatchDataType>::Type;
333 type BatchOutput = <PI as BatchDataType>::Type;
334}
335impl<U,C,P,OP,D,I,PI,const N:usize> BatchForward for BiasLayer<U,C,P,OP,D,I,PI,N>
336 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U,PreOutput=PI> + Loss<U> +
337 BatchForwardBase<BatchInput=<I as BatchDataType>::Type,BatchOutput=<PI as BatchDataType>::Type> + BatchForward,
338 D: Device<U> + DeviceBias<U,C,PI,N>,
339 U: Default + Clone + Copy + Send + UnitValue<U>,
340 I: Debug + Send + Sync + BatchDataType,
341 PI: Debug + BatchDataType + 'static,
342 <PI as BatchDataType>::Type: Debug + BatchSize + 'static,
343 <I as BatchDataType>::Type: Debug,
344 OP: Optimizer<U,D> {
345 fn batch_forward(&self, input: Self::BatchInput) -> Result<Self::BatchOutput, TrainingError> {
346 let input = self.parent.batch_forward(input)?;
347
348 Ok(self.device.batch_forward_bias(&self.bias,&input)?)
349 }
350}
351impl<U,C,P,OP,D,I,PI,const N:usize> BatchPreTrainBase<U> for BiasLayer<U,C,P,OP,D,I,PI,N>
352 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U,PreOutput=PI> + Loss<U> +
353 BatchForwardBase<BatchInput=<I as BatchDataType>::Type,BatchOutput=<PI as BatchDataType>::Type> + BatchForward +
354 BatchPreTrainBase<U,BatchPreOutput=<PI as BatchDataType>::Type>,
355 U: Default + Clone + Copy + Send + UnitValue<U>,
356 D: Device<U> + DeviceBias<U,C,PI,N>,
357 I: Debug + Send + Sync + BatchDataType,
358 PI: Debug + BatchDataType + 'static,
359 <PI as BatchDataType>::Type: Debug + BatchSize + 'static,
360 <I as BatchDataType>::Type: Debug,
361 OP: Optimizer<U,D>,
362 Self: PreTrain<U,PreOutput=PI> {
363 type BatchPreOutput = <PI as BatchDataType>::Type;
364 type BatchOutStack = Cons<<P as BatchPreTrainBase<U>>::BatchOutStack,Self::BatchPreOutput>;
365}
366impl<U,C,P,OP,D,I,PI,const N:usize> BatchPreTrain<U> for BiasLayer<U,C,P,OP,D,I,PI,N>
367 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U,PreOutput=PI> + Loss<U> +
368 BatchForwardBase<BatchInput=<I as BatchDataType>::Type,BatchOutput=<PI as BatchDataType>::Type> + BatchForward +
369 BatchPreTrainBase<U,BatchPreOutput=<PI as BatchDataType>::Type> + BatchPreTrain<U>,
370 U: Default + Clone + Copy + Send + UnitValue<U>,
371 D: Device<U> + DeviceBias<U,C,PI,N>,
372 I: Debug + Send + Sync + BatchDataType,
373 PI: Debug + BatchDataType + 'static,
374 <PI as BatchDataType>::Type: Debug + BatchSize + 'static,
375 <I as BatchDataType>::Type: Debug,
376 OP: Optimizer<U,D> {
377 fn batch_pre_train(&self, input: Self::BatchInput) -> Result<Self::BatchOutStack, TrainingError> {
378 let r = self.parent.batch_pre_train(input)?;
379
380 let u = r.map(|input| self.device.batch_forward_bias(&self.bias,input))?;
381
382 Ok(Cons(r,u))
383 }
384}
385impl<U,P,OP,D,I,PI,const N:usize> BatchBackward<U> for BiasLayer<U,Arr<U,N>,P,OP,D,I,PI,N>
386 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U,PreOutput=PI> + Loss<U> +
387 BatchForwardBase<BatchInput=<I as BatchDataType>::Type,BatchOutput=<PI as BatchDataType>::Type> + BatchForward +
388 BatchPreTrainBase<U,BatchPreOutput=<PI as BatchDataType>::Type> + BatchPreTrain<U> +
389 BatchBackward<U> + BatchLoss<U,BatchLossInput=<PI as BatchDataType>::Type>,
390 U: Default + Clone + Copy + Send + UnitValue<U>,
391 I: Debug + Send + Sync + BatchDataType,
392 PI: Debug + BatchDataType + 'static,
393 <PI as BatchDataType>::Type: Debug + BatchSize + IntoConverter + 'static,
394 <I as BatchDataType>::Type: Debug,
395 OP: Optimizer<U,D>,
396 D: Device<U> + DeviceBias<U,Arr<U,N>,PI,N>,
397 for<'a> &'a <OP as Optimizer<U,D>>::InternalType: From<&'a Arr<U,N>>,
398 for<'a> <OP as Optimizer<U,D>>::InternalUpdateType<'a>: From<&'a mut Arr<U,N>> {
399 type BatchLossInput = <PI as BatchDataType>::Type;
400 type BatchLossOutput = <P as BatchBackward<U>>::BatchLossOutput;
401
402 fn batch_backward<L: LossFunction<U>>(&mut self, input: Self::BatchLossInput, stack: Self::BatchOutStack, lossf: &L)
403 -> Result<(<Self as BatchBackward<U>>::BatchLossOutput,<Self as UpdateWeight<U>>::GradientStack), TrainingError> {
404 let (s, _) = stack.pop();
405
406 let loss = input;
407
408 let g = self.device.batch_backward_bias_weight_gradient(&loss)?;
409
410 let next_loss = self.device.batch_backward_bias(loss)?;
411
412 let (
413 s,next_loss
414 ) = self.parent.batch_loss(next_loss,lossf,s)?;
415
416 let (l,s) = self.parent.batch_backward(next_loss, s, lossf)?;
417
418 Ok((l,Cons(s,g)))
419 }
420}
421impl<U,P,OP,D,I,PI,const N:usize> BatchLoss<U> for BiasLayer<U,Arr<U,N>,P,OP,D,I,PI,N>
422 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U,PreOutput=PI> + Loss<U> +
423 BatchForwardBase<BatchInput=<I as BatchDataType>::Type,BatchOutput=<PI as BatchDataType>::Type> + BatchForward +
424 BatchPreTrainBase<U,BatchPreOutput=<PI as BatchDataType>::Type> + BatchPreTrain<U> +
425 BatchBackward<U> + BatchLoss<U,BatchLossInput=<PI as BatchDataType>::Type>,
426 U: Default + Clone + Copy + Send + UnitValue<U>,
427 I: Debug + Send + Sync + BatchDataType,
428 PI: Debug + BatchDataType + 'static,
429 <PI as BatchDataType>::Type: Debug + BatchSize + IntoConverter + 'static,
430 <I as BatchDataType>::Type: Debug,
431 OP: Optimizer<U,D>,
432 D: Device<U> + DeviceBias<U,Arr<U,N>,PI,N>,
433 for<'a> &'a <OP as Optimizer<U,D>>::InternalType: From<&'a Arr<U,N>>,
434 for<'a> <OP as Optimizer<U,D>>::InternalUpdateType<'a>: From<&'a mut Arr<U,N>> {
435}
436impl<U,P,OP,I,PI,const N:usize> BiasLayerInstantiation<U,Arr<U,N>,P,OP,DeviceCpu<U>,I,PI,N> for BiasLayer<U,Arr<U,N>,P,OP,DeviceCpu<U>,I,PI,N>
437 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U> + Loss<U>,
438 U: Default + Clone + Copy + Send + UnitValue<U>,
439 I: Debug + Send + Sync,
440 PI: Debug + BatchDataType,
441 OP: Optimizer<U,DeviceCpu<U>> {
442 fn instantiation<UI: FnMut() -> U,B: OptimizerBuilder<U,DeviceCpu<U>,Output=OP>>(parent: P, device: &DeviceCpu<U>, ui: UI, b: &B)
443 -> Result<BiasLayer<U, Arr<U,N>, P, OP, DeviceCpu<U>, I, PI, N>, LayerInstantiationError> {
444 let mut ui = ui;
445
446 let mut bias = Arr::new();
447
448 for it in bias.iter_mut() {
449 *it = ui();
450 }
451
452 Ok(BiasLayer {
453 parent: parent,
454 device: device.clone(),
455 bias: bias,
456 u:PhantomData::<U>,
457 optimizer:b.build(N)?
458 })
459 }
460}
461impl<U,P,OP,I,PI,const N:usize> BiasLayerInstantiation<U,CudaTensor1dPtr<U,N>,P,OP,DeviceGpu<U>,I,PI,N> for BiasLayer<U,Arr<U,N>,P,OP,DeviceGpu<U>,I,PI,N>
462 where P: ForwardAll<Input=I,Output=PI> + BackwardAll<U,LossInput=PI> + PreTrain<U> + Loss<U>,
463 U: Default + Clone + Copy + Send + UnitValue<U>,
464 I: Debug + Send + Sync,
465 PI: Debug + BatchDataType,
466 OP: Optimizer<U,DeviceGpu<U>>,
467 DeviceGpu<U>: Device<U> {
468 fn instantiation<UI: FnMut() -> U,B: OptimizerBuilder<U,DeviceGpu<U>,Output=OP>>(parent: P, device: &DeviceGpu<U>, ui: UI, b: &B)
469 -> Result<BiasLayer<U, CudaTensor1dPtr<U,N>, P, OP, DeviceGpu<U>, I, PI, N>, LayerInstantiationError> {
470 Ok(BiasLayer {
471 parent: parent,
472 device: device.clone(),
473 bias: CudaTensor1dPtr::with_initializer(device.get_memory_pool(),ui)?,
474 u:PhantomData::<U>,
475 optimizer:b.build(N)?
476 })
477 }
478}
479pub struct BiasLayerBuilder<const N:usize> {
481
482}
483impl<const N:usize> BiasLayerBuilder<N> {
484 pub fn new() -> BiasLayerBuilder<N> {
485 BiasLayerBuilder {}
486 }
487
488 pub fn build<U,C,P,D,I,PI,UI,OP,B>(&self,parent:P,device:&D,ui:UI,b:&B)
500 -> Result<BiasLayer<U,C,P,OP,D,I,PI,N>,LayerInstantiationError>
501 where P: ForwardAll<Input=I,Output=PI> +
502 BackwardAll<U,LossInput=PI> + PreTrain<U> + Loss<U>,
503 U: Default + Clone + Copy + Send + UnitValue<U>,
504 D: Device<U>,
505 I: Debug + Send + Sync + BatchDataType,
506 PI: Debug + BatchDataType,
507 <I as BatchDataType>::Type: Debug + Send + Sync + 'static,
508 OP: Optimizer<U,D>,
509 B: OptimizerBuilder<U,D,Output=OP>,
510 UI: FnMut() -> U,
511 BiasLayer<U,C,P,OP,D,I,PI,N>: BiasLayerInstantiation<U,C,P,OP,D,I,PI,N> {
512 BiasLayer::instantiation(parent,device,ui,b)
513 }
514}