1use module::*;
2use opcode::{Opcode, Memarg};
3use byteorder::{LittleEndian, ByteOrder};
4
5#[derive(Copy, Clone, Debug)]
6#[repr(u8)]
7pub enum TargetOp {
8 Drop = 1,
9 Dup,
10 Swap2,
11 Select,
12
13 Call,
14 Return,
15 Halt,
16
17 GetLocal,
18 SetLocal,
19 TeeLocal,
20
21 GetSlotIndirect,
22 GetSlot,
23 SetSlot,
24 ResetSlots,
25
26 NativeInvoke,
27
28 CurrentMemory,
29 GrowMemory,
30
31 Nop,
32 Unreachable,
33 NotSupported,
34
35 Jmp,
36 JmpIf,
37 JmpEither,
38 JmpTable,
39
40 I32Load,
41 I32Load8U,
42 I32Load8S,
43 I32Load16U,
44 I32Load16S,
45 I32Store,
46 I32Store8,
47 I32Store16,
48
49 I32Const,
50 I32Ctz,
51 I32Clz,
52 I32Popcnt,
53 I32Add,
54 I32Sub,
55 I32Mul,
56 I32DivU,
57 I32DivS,
58 I32RemU,
59 I32RemS,
60 I32And,
61 I32Or,
62 I32Xor,
63 I32Shl,
64 I32ShrU,
65 I32ShrS,
66 I32Rotl,
67 I32Rotr,
68
69 I32Eq,
70 I32Ne,
71 I32LtU,
72 I32LtS,
73 I32LeU,
74 I32LeS,
75 I32GtU,
76 I32GtS,
77 I32GeU,
78 I32GeS,
79
80 I32WrapI64,
81
82 I64Load,
83 I64Load8U,
84 I64Load8S,
85 I64Load16U,
86 I64Load16S,
87 I64Load32U,
88 I64Load32S,
89 I64Store,
90 I64Store8,
91 I64Store16,
92 I64Store32,
93
94 I64Const,
95 I64Ctz,
96 I64Clz,
97 I64Popcnt,
98 I64Add,
99 I64Sub,
100 I64Mul,
101 I64DivU,
102 I64DivS,
103 I64RemU,
104 I64RemS,
105 I64And,
106 I64Or,
107 I64Xor,
108 I64Shl,
109 I64ShrU,
110 I64ShrS,
111 I64Rotl,
112 I64Rotr,
113
114 I64Eq,
115 I64Ne,
116 I64LtU,
117 I64LtS,
118 I64LeU,
119 I64LeS,
120 I64GtU,
121 I64GtS,
122 I64GeU,
123 I64GeS,
124
125 I64ExtendI32U,
126 I64ExtendI32S,
127
128 Never
129}
130
131pub trait MapNativeInvoke {
132 fn map_native_invoke(&mut self, module: &str, field: &str) -> Option<u32>;
133}
134
135pub struct NullMapNativeInvoke;
136impl MapNativeInvoke for NullMapNativeInvoke {
137 fn map_native_invoke(&mut self, _module: &str, _field: &str) -> Option<u32> { None }
138}
139
140struct Reloc {
141 code_loc: usize,
142 ty: RelocType
143}
144
145enum RelocType {
146 Function(usize ),
147 LocalJmp(usize )
148}
149
150#[derive(Debug)]
151struct OffsetTable {
152 table_slot_offset: usize,
153 globals_slot_offset: usize
154}
155
156struct TargetFunction {
157 code: Vec<u8>,
158 opcode_relocs: Vec<usize>, generic_relocs: Vec<Reloc>
160}
161
162pub fn translate_module(m: &Module, entry_fn: usize, mni: &mut MapNativeInvoke) -> Vec<u8> {
163 let mut target_code: Vec<u8> = Vec::new();
164
165 let (target_dss, slot_values, offset_table) = build_initializers(m);
166 let _init_data_relocs = write_initializers(&target_dss, &mut target_code);
167
168 let mut functions: Vec<TargetFunction> = Vec::with_capacity(m.functions.len());
171
172 for f in &m.functions {
173 functions.push(translate_function(&m, f, &offset_table, mni));
174 }
175
176 let mut slot_initializer_relocs: Vec<usize> = Vec::with_capacity(functions.len());
177 let mut function_relocs: Vec<usize> = Vec::with_capacity(functions.len());
178 let mut executable: Vec<u8> = Vec::new();
179
180 executable.push(TargetOp::ResetSlots as u8);
181 write_u32(&mut executable, slot_values.len() as u32);
182
183 for (i, sv) in slot_values.iter().enumerate() {
184 executable.push(TargetOp::I64Const as u8);
185
186 slot_initializer_relocs.push(executable.len());
187 write_u64(&mut executable, *sv as u64);
188
189 executable.push(TargetOp::SetSlot as u8);
190 write_u32(&mut executable, i as u32);
191 }
192
193 let mut entry_reloc_point = build_call(m, &mut executable, entry_fn);
194 executable.push(TargetOp::Halt as u8);
195
196 for (i, f) in functions.iter().enumerate() {
197 function_relocs.push(executable.len());
199 executable.extend_from_slice(&f.code);
200 }
201
202 LittleEndian::write_u32(
204 &mut executable[entry_reloc_point .. entry_reloc_point + 4],
205 function_relocs[entry_fn] as u32
206 );
207
208 for (i, f) in functions.iter().enumerate() {
210 let target_section = &mut executable[function_relocs[i] .. function_relocs[i] + f.code.len()];
211 for reloc in &f.generic_relocs {
212 let slot = &mut target_section[reloc.code_loc .. reloc.code_loc + 4];
213 match reloc.ty {
214 RelocType::Function(id) => {
215 LittleEndian::write_u32(slot, function_relocs[id] as u32);
216 },
217 RelocType::LocalJmp(pos) => {
218 LittleEndian::write_u32(slot, (function_relocs[i] + f.opcode_relocs[pos]) as u32);
219 }
220 }
221 }
222 }
223
224 for i in 0..m.tables[0].elements.len() {
226 let base = slot_initializer_relocs[offset_table.table_slot_offset + i];
227 let elem = &mut executable[base .. base + 8];
228
229 let function_id = LittleEndian::read_u32(elem);
231 if function_id != ::std::u32::MAX {
232 LittleEndian::write_u32(elem, function_relocs[function_id as usize] as u32);
234 }
235 }
236
237 target_code.extend_from_slice(&executable);
238
239 target_code
240}
241
242fn build_call(m: &Module, out: &mut Vec<u8>, target: usize) -> usize {
243 let tf: &Function = &m.functions[target];
244 let Type::Func(ref ty_args, ref ty_rets) = &m.types[tf.typeidx as usize];
245
246 out.push(TargetOp::I32Const as u8);
248 let reloc_point = out.len();
249 write_u32(out, ::std::u32::MAX);
250
251 out.push(TargetOp::I32Const as u8);
253 write_u32(out, tf.locals.len() as u32);
254
255 out.push(TargetOp::Call as u8);
256 write_u32(out, ty_args.len() as u32);
257
258 reloc_point
259}
260
261fn translate_function(m: &Module, f: &Function, offset_table: &OffsetTable, mni: &mut MapNativeInvoke) -> TargetFunction {
262 let mut result: Vec<u8> = Vec::new();
263 let mut relocs: Vec<Reloc> = Vec::new();
264 let opcodes = &f.body.opcodes;
265 let mut opcode_relocs: Vec<usize> = Vec::with_capacity(opcodes.len());
266
267 for op in opcodes {
268 opcode_relocs.push(result.len());
269 match *op {
270 Opcode::Drop => {
271 result.push(TargetOp::Drop as u8);
272 },
273 Opcode::Select => {
274 result.push(TargetOp::Select as u8);
275 },
276 Opcode::Call(target) => {
277 let reloc_point = build_call(m, &mut result, target as usize);
278 relocs.push(Reloc {
279 code_loc: reloc_point,
280 ty: RelocType::Function(target as usize)
281 });
282 },
283 Opcode::CallIndirect(target_ty) => {
284 let Type::Func(ref ty_args, ref ty_rets) = &m.types[target_ty as usize];
285
286 result.push(TargetOp::I32Const as u8);
288 write_u32(&mut result, offset_table.table_slot_offset as u32);
289 result.push(TargetOp::I32Add as u8);
290 result.push(TargetOp::GetSlotIndirect as u8);
291 result.push(TargetOp::Dup as u8);
294 result.push(TargetOp::I64Const as u8);
295 write_u64(&mut result, 0xffffffffu64);
296 result.push(TargetOp::I64And as u8); result.push(TargetOp::Swap2 as u8);
300 result.push(TargetOp::I64Const as u8);
301 write_u64(&mut result, 0xffffffffu64 << 32); result.push(TargetOp::I64And as u8);
303 result.push(TargetOp::I64Const as u8);
304 write_u64(&mut result, 32);
305 result.push(TargetOp::I64ShrU as u8);
306 result.push(TargetOp::Call as u8);
309 write_u32(&mut result, ty_args.len() as u32);
310 },
311 Opcode::Return => {
312 result.push(TargetOp::Return as u8);
313 },
314 Opcode::Nop => {},
315 Opcode::Unreachable => result.push(TargetOp::Unreachable as u8),
316 Opcode::GetLocal(id) => {
317 result.push(TargetOp::GetLocal as u8);
318 write_u32(&mut result, id);
319 },
320 Opcode::SetLocal(id) => {
321 result.push(TargetOp::SetLocal as u8);
322 write_u32(&mut result, id);
323 },
324 Opcode::TeeLocal(id) => {
325 result.push(TargetOp::TeeLocal as u8);
326 write_u32(&mut result, id);
327 },
328 Opcode::GetGlobal(id) => {
329 result.push(TargetOp::GetSlot as u8);
330 write_u32(&mut result, offset_table.globals_slot_offset as u32 + id);
331 },
332 Opcode::SetGlobal(id) => {
333 result.push(TargetOp::SetSlot as u8);
334 write_u32(&mut result, offset_table.globals_slot_offset as u32 + id);
335 },
336 Opcode::Jmp(loc) => {
337 result.push(TargetOp::Jmp as u8);
338 relocs.push(Reloc {
339 code_loc: result.len(),
340 ty: RelocType::LocalJmp(loc as usize)
341 });
342 write_u32(&mut result, ::std::u32::MAX);
343 },
344 Opcode::JmpIf(loc) => {
345 result.push(TargetOp::JmpIf as u8);
346 relocs.push(Reloc {
347 code_loc: result.len(),
348 ty: RelocType::LocalJmp(loc as usize)
349 });
350 write_u32(&mut result, ::std::u32::MAX);
351 },
352 Opcode::JmpEither(loc_a, loc_b) => {
353 result.push(TargetOp::JmpEither as u8);
354 relocs.push(Reloc {
355 code_loc: result.len(),
356 ty: RelocType::LocalJmp(loc_a as usize)
357 });
358 write_u32(&mut result, ::std::u32::MAX);
359 relocs.push(Reloc {
360 code_loc: result.len(),
361 ty: RelocType::LocalJmp(loc_b as usize)
362 });
363 write_u32(&mut result, ::std::u32::MAX);
364 },
365 Opcode::JmpTable(ref targets, otherwise) => {
366 result.push(TargetOp::JmpTable as u8);
367 relocs.push(Reloc {
368 code_loc: result.len(),
369 ty: RelocType::LocalJmp(otherwise as usize)
370 });
371 write_u32(&mut result, ::std::u32::MAX);
372
373 write_u32(&mut result, targets.len() as u32);
374 for t in targets {
375 relocs.push(Reloc {
376 code_loc: result.len(),
377 ty: RelocType::LocalJmp(*t as usize)
378 });
379 write_u32(&mut result, ::std::u32::MAX);
380 }
381 },
382 Opcode::CurrentMemory => {
383 result.push(TargetOp::CurrentMemory as u8);
385 result.push(TargetOp::I32Const as u8);
386 write_u32(&mut result, 65536 as u32);
387 result.push(TargetOp::I32DivU as u8);
388 },
389 Opcode::GrowMemory => {
390 result.push(TargetOp::I32Const as u8);
392 write_u32(&mut result, 65536 as u32);
393 result.push(TargetOp::I32Mul as u8);
394
395 result.push(TargetOp::GrowMemory as u8);
396
397 result.push(TargetOp::I32Const as u8);
399 write_u32(&mut result, 65536 as u32);
400 result.push(TargetOp::I32DivU as u8);
401 },
402 Opcode::I32Const(v) => {
403 result.push(TargetOp::I32Const as u8);
404 write_u32(&mut result, v as u32);
405 },
406 Opcode::I32Clz => result.push(TargetOp::I32Clz as u8),
407 Opcode::I32Ctz => result.push(TargetOp::I32Ctz as u8),
408 Opcode::I32Popcnt => result.push(TargetOp::I32Popcnt as u8),
409 Opcode::I32Add => result.push(TargetOp::I32Add as u8),
410 Opcode::I32Sub => result.push(TargetOp::I32Sub as u8),
411 Opcode::I32Mul => result.push(TargetOp::I32Mul as u8),
412 Opcode::I32DivU => result.push(TargetOp::I32DivU as u8),
413 Opcode::I32DivS => result.push(TargetOp::I32DivS as u8),
414 Opcode::I32RemU => result.push(TargetOp::I32RemU as u8),
415 Opcode::I32RemS => result.push(TargetOp::I32RemS as u8),
416 Opcode::I32And => result.push(TargetOp::I32And as u8),
417 Opcode::I32Or => result.push(TargetOp::I32Or as u8),
418 Opcode::I32Xor => result.push(TargetOp::I32Xor as u8),
419 Opcode::I32Shl => result.push(TargetOp::I32Shl as u8),
420 Opcode::I32ShrU => result.push(TargetOp::I32ShrU as u8),
421 Opcode::I32ShrS => result.push(TargetOp::I32ShrS as u8),
422 Opcode::I32Rotl => result.push(TargetOp::I32Rotl as u8),
423 Opcode::I32Rotr => result.push(TargetOp::I32Rotr as u8),
424 Opcode::I32Eqz => {
425 result.push(TargetOp::I32Const as u8);
426 write_u32(&mut result, 0);
427 result.push(TargetOp::I32Eq as u8);
428 },
429 Opcode::I32Eq => result.push(TargetOp::I32Eq as u8),
430 Opcode::I32Ne => result.push(TargetOp::I32Ne as u8),
431 Opcode::I32LtU => result.push(TargetOp::I32LtU as u8),
432 Opcode::I32LtS => result.push(TargetOp::I32LtS as u8),
433 Opcode::I32LeU => result.push(TargetOp::I32LeU as u8),
434 Opcode::I32LeS => result.push(TargetOp::I32LeS as u8),
435 Opcode::I32GtU => result.push(TargetOp::I32GtU as u8),
436 Opcode::I32GtS => result.push(TargetOp::I32GtS as u8),
437 Opcode::I32GeU => result.push(TargetOp::I32GeU as u8),
438 Opcode::I32GeS => result.push(TargetOp::I32GeS as u8),
439 Opcode::I32WrapI64 => result.push(TargetOp::I32WrapI64 as u8),
440 Opcode::I32Load(Memarg { offset, align }) => {
441 result.push(TargetOp::I32Load as u8);
442 write_u32(&mut result, offset);
443 },
444 Opcode::I32Load8U(Memarg { offset, align }) => {
445 result.push(TargetOp::I32Load8U as u8);
446 write_u32(&mut result, offset);
447 },
448 Opcode::I32Load8S(Memarg { offset, align }) => {
449 result.push(TargetOp::I32Load8S as u8);
450 write_u32(&mut result, offset);
451 },
452 Opcode::I32Load16U(Memarg { offset, align }) => {
453 result.push(TargetOp::I32Load16U as u8);
454 write_u32(&mut result, offset);
455 },
456 Opcode::I32Load16S(Memarg { offset, align }) => {
457 result.push(TargetOp::I32Load16S as u8);
458 write_u32(&mut result, offset);
459 },
460 Opcode::I32Store(Memarg { offset, align }) => {
461 result.push(TargetOp::I32Store as u8);
462 write_u32(&mut result, offset);
463 },
464 Opcode::I32Store8(Memarg { offset, align }) => {
465 result.push(TargetOp::I32Store8 as u8);
466 write_u32(&mut result, offset);
467 },
468 Opcode::I32Store16(Memarg { offset, align }) => {
469 result.push(TargetOp::I32Store16 as u8);
470 write_u32(&mut result, offset);
471 },
472 Opcode::I64Const(v) => {
473 result.push(TargetOp::I64Const as u8);
474 write_u64(&mut result, v as u64);
475 },
476 Opcode::I64Clz => result.push(TargetOp::I64Clz as u8),
477 Opcode::I64Ctz => result.push(TargetOp::I64Ctz as u8),
478 Opcode::I64Popcnt => result.push(TargetOp::I64Popcnt as u8),
479 Opcode::I64Add => result.push(TargetOp::I64Add as u8),
480 Opcode::I64Sub => result.push(TargetOp::I64Sub as u8),
481 Opcode::I64Mul => result.push(TargetOp::I64Mul as u8),
482 Opcode::I64DivU => result.push(TargetOp::I64DivU as u8),
483 Opcode::I64DivS => result.push(TargetOp::I64DivS as u8),
484 Opcode::I64RemU => result.push(TargetOp::I64RemU as u8),
485 Opcode::I64RemS => result.push(TargetOp::I64RemS as u8),
486 Opcode::I64And => result.push(TargetOp::I64And as u8),
487 Opcode::I64Or => result.push(TargetOp::I64Or as u8),
488 Opcode::I64Xor => result.push(TargetOp::I64Xor as u8),
489 Opcode::I64Shl => result.push(TargetOp::I64Shl as u8),
490 Opcode::I64ShrU => result.push(TargetOp::I64ShrU as u8),
491 Opcode::I64ShrS => result.push(TargetOp::I64ShrS as u8),
492 Opcode::I64Rotl => result.push(TargetOp::I64Rotl as u8),
493 Opcode::I64Rotr => result.push(TargetOp::I64Rotr as u8),
494 Opcode::I64Eqz => {
495 result.push(TargetOp::I64Const as u8);
496 write_u64(&mut result, 0);
497 result.push(TargetOp::I64Eq as u8);
498 },
499 Opcode::I64Eq => result.push(TargetOp::I64Eq as u8),
500 Opcode::I64Ne => result.push(TargetOp::I64Ne as u8),
501 Opcode::I64LtU => result.push(TargetOp::I64LtU as u8),
502 Opcode::I64LtS => result.push(TargetOp::I64LtS as u8),
503 Opcode::I64LeU => result.push(TargetOp::I64LeU as u8),
504 Opcode::I64LeS => result.push(TargetOp::I64LeS as u8),
505 Opcode::I64GtU => result.push(TargetOp::I64GtU as u8),
506 Opcode::I64GtS => result.push(TargetOp::I64GtS as u8),
507 Opcode::I64GeU => result.push(TargetOp::I64GeU as u8),
508 Opcode::I64GeS => result.push(TargetOp::I64GeS as u8),
509 Opcode::I64ExtendI32U => result.push(TargetOp::I64ExtendI32U as u8),
510 Opcode::I64ExtendI32S => result.push(TargetOp::I64ExtendI32S as u8),
511 Opcode::I64Load(Memarg { offset, align }) => {
512 result.push(TargetOp::I64Load as u8);
513 write_u32(&mut result, offset);
514 },
515 Opcode::I64Load8U(Memarg { offset, align }) => {
516 result.push(TargetOp::I64Load8U as u8);
517 write_u32(&mut result, offset);
518 },
519 Opcode::I64Load8S(Memarg { offset, align }) => {
520 result.push(TargetOp::I64Load8S as u8);
521 write_u32(&mut result, offset);
522 },
523 Opcode::I64Load16U(Memarg { offset, align }) => {
524 result.push(TargetOp::I64Load16U as u8);
525 write_u32(&mut result, offset);
526 },
527 Opcode::I64Load16S(Memarg { offset, align }) => {
528 result.push(TargetOp::I64Load16S as u8);
529 write_u32(&mut result, offset);
530 },
531 Opcode::I64Load32U(Memarg { offset, align }) => {
532 result.push(TargetOp::I64Load32U as u8);
533 write_u32(&mut result, offset);
534 },
535 Opcode::I64Load32S(Memarg { offset, align }) => {
536 result.push(TargetOp::I64Load32S as u8);
537 write_u32(&mut result, offset);
538 },
539 Opcode::I64Store(Memarg { offset, align }) => {
540 result.push(TargetOp::I64Store as u8);
541 write_u32(&mut result, offset);
542 },
543 Opcode::I64Store8(Memarg { offset, align }) => {
544 result.push(TargetOp::I64Store8 as u8);
545 write_u32(&mut result, offset);
546 },
547 Opcode::I64Store16(Memarg { offset, align }) => {
548 result.push(TargetOp::I64Store16 as u8);
549 write_u32(&mut result, offset);
550 },
551 Opcode::I64Store32(Memarg { offset, align }) => {
552 result.push(TargetOp::I64Store32 as u8);
553 write_u32(&mut result, offset);
554 },
555 Opcode::F32Const(v) => {
556 result.push(TargetOp::I32Const as u8);
557 write_u32(&mut result, v as u32);
558 },
559 Opcode::F64Const(v) => {
560 result.push(TargetOp::I64Const as u8);
561 write_u64(&mut result, v as u64);
562 },
563 Opcode::F32ReinterpretI32 | Opcode::I32ReinterpretF32
564 | Opcode::F64ReinterpretI64 | Opcode::I64ReinterpretF64 => {},
565 Opcode::NativeInvoke(id) => {
566 let native = &m.natives[id as usize];
567
568 result.push(TargetOp::NativeInvoke as u8);
569 write_u32(
570 &mut result,
571 if let Some(ni_id) = mni.map_native_invoke(&native.module, &native.field) {
572 ni_id
573 } else {
574 if native.module != "hexagon_e" {
575 panic!("NativeInvoke with a module other than `hexagon_e` is not supported. Got: {}", native.module);
576 }
577
578 if !native.field.starts_with("syscall_") {
579 panic!("Invalid NativeInvoke field prefix; Expecting `syscall_`");
580 }
581
582 let ni_id: u32 = native.field.splitn(2, "_").nth(1).unwrap().parse().unwrap_or_else(|_| {
583 panic!("Unable to parse NativeInvoke id");
584 });
585
586 ni_id
587 }
588 );
589 },
590 _ => {
591 if cfg!(feature = "debug") {
592 eprintln!("Not implemented: {:?}", op);
593 }
594 result.push(TargetOp::NotSupported as u8);
595 }
596 }
597 }
598
599 TargetFunction {
600 code: result,
601 opcode_relocs: opcode_relocs,
602 generic_relocs: relocs
603 }
604}
605
606fn write_initializers(dss: &[DataSegment], target: &mut Vec<u8>) -> Vec<usize> {
607 let mut relocs: Vec<usize> = Vec::with_capacity(dss.len());
608
609 assert_eq!(target.len(), 0);
610
611 write_u32(target, ::std::u32::MAX);
613
614 let initial_len = target.len(); for ds in dss {
618 write_u32(target, ds.offset);
619 write_u32(target, ds.data.len() as u32);
620 relocs.push(target.len());
621 target.extend_from_slice(&ds.data);
622 }
623
624 let actual_len = target.len() - initial_len;
625 LittleEndian::write_u32(&mut target[0..4], actual_len as u32);
626
627 relocs
628}
629
630fn build_initializers(m: &Module) -> (Vec<DataSegment>, Vec<i64>, OffsetTable) {
632 let mut slot_values: Vec<i64> = Vec::new();
633
634 let wasm_table = &m.tables[0];
635 let wasm_globals = &m.globals;
636
637 let wasm_table_offset: usize = 0;
638 for elem in &wasm_table.elements {
639 let elem = elem.unwrap_or(::std::u32::MAX);
640 let n_locals = if (elem as usize) < m.functions.len() {
641 m.functions[elem as usize].locals.len() as u32
642 } else {
643 ::std::u32::MAX
644 };
645
646 slot_values.push(
647 (((n_locals as u64) << 32) | (elem as u64)) as i64
648 );
649 }
650
651 let wasm_globals_offset: usize = slot_values.len();
652 for g in wasm_globals {
653 let val = g.value.reinterpret_as_i64();
654 slot_values.push(val);
655 }
656
657 (m.data_segments.clone(), slot_values, OffsetTable {
658 table_slot_offset: wasm_table_offset,
659 globals_slot_offset: wasm_globals_offset
660 })
661}
662
663fn write_u32(target: &mut Vec<u8>, val: u32) {
664 let val = unsafe { ::std::mem::transmute::<u32, [u8; 4]>(val) };
665 target.extend_from_slice(&val);
666}
667
668fn write_u64(target: &mut Vec<u8>, val: u64) {
669 let val = unsafe { ::std::mem::transmute::<u64, [u8; 8]>(val) };
670 target.extend_from_slice(&val);
671}