use crate::constant::ConstantRef;
#[cfg(feature="llvm-9-or-greater")]
use crate::debugloc::*;
use crate::function::{Function, FunctionAttribute, GroupID};
use crate::llvm_sys::*;
use crate::name::Name;
use crate::types::{FPType, Type, TypeRef, Typed, Types, TypesBuilder};
use std::collections::{BTreeMap, HashMap, HashSet};
use std::path::Path;
#[derive(Clone)]
pub struct Module {
pub name: String,
pub source_file_name: String,
pub data_layout: DataLayout,
pub target_triple: Option<String>,
pub functions: Vec<Function>,
pub global_vars: Vec<GlobalVariable>,
pub global_aliases: Vec<GlobalAlias>,
pub inline_assembly: String,
pub types: Types,
}
impl Module {
pub fn type_of<T: Typed + ?Sized>(&self, t: &T) -> TypeRef {
self.types.type_of(t)
}
pub fn get_func_by_name(&self, name: &str) -> Option<&Function> {
self.functions.iter().find(|func| func.name == name)
}
pub fn from_bc_path(path: impl AsRef<Path>) -> Result<Self, String> {
use std::ffi::{CStr, CString};
use std::mem;
let path = CString::new(
path.as_ref()
.to_str()
.expect("Did not find a valid Unicode path string"),
)
.expect("Failed to convert to CString");
debug!("Creating a Module from path {:?}", path);
let memory_buffer = unsafe {
let mut memory_buffer = std::ptr::null_mut();
let mut err_string = std::mem::zeroed();
let return_code = LLVMCreateMemoryBufferWithContentsOfFile(
path.as_ptr() as *const _,
&mut memory_buffer,
&mut err_string,
);
if return_code != 0 {
return Err(CStr::from_ptr(err_string)
.to_str()
.expect("Failed to convert CStr")
.to_owned());
}
memory_buffer
};
debug!("Created a MemoryBuffer");
let context = crate::from_llvm::Context::new();
use llvm_sys::bit_reader::LLVMParseBitcodeInContext2;
let module = unsafe {
let mut module: mem::MaybeUninit<LLVMModuleRef> = mem::MaybeUninit::uninit();
let return_code =
LLVMParseBitcodeInContext2(context.ctx, memory_buffer, module.as_mut_ptr());
LLVMDisposeMemoryBuffer(memory_buffer);
if return_code != 0 {
return Err("Failed to parse bitcode".to_string());
}
module.assume_init()
};
debug!("Parsed bitcode to llvm_sys module");
Ok(Self::from_llvm_ref(module))
}
}
#[derive(PartialEq, Clone, Debug)]
pub struct GlobalVariable {
pub name: Name,
pub linkage: Linkage,
pub visibility: Visibility,
pub is_constant: bool,
pub ty: TypeRef,
pub addr_space: AddrSpace,
pub dll_storage_class: DLLStorageClass,
pub thread_local_mode: ThreadLocalMode,
pub unnamed_addr: Option<UnnamedAddr>,
pub initializer: Option<ConstantRef>,
pub section: Option<String>,
pub comdat: Option<Comdat>, pub alignment: u32,
#[cfg(feature="llvm-9-or-greater")]
pub debugloc: Option<DebugLoc>,
}
impl Typed for GlobalVariable {
fn get_type(&self, _types: &Types) -> TypeRef {
self.ty.clone()
}
}
#[cfg(feature="llvm-9-or-greater")]
impl HasDebugLoc for GlobalVariable {
fn get_debug_loc(&self) -> &Option<DebugLoc> {
&self.debugloc
}
}
#[derive(PartialEq, Clone, Debug)]
pub struct GlobalAlias {
pub name: Name,
pub aliasee: ConstantRef,
pub linkage: Linkage,
pub visibility: Visibility,
pub ty: TypeRef,
pub addr_space: AddrSpace,
pub dll_storage_class: DLLStorageClass,
pub thread_local_mode: ThreadLocalMode,
pub unnamed_addr: Option<UnnamedAddr>,
}
impl Typed for GlobalAlias {
fn get_type(&self, _types: &Types) -> TypeRef {
self.ty.clone()
}
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum UnnamedAddr {
Local,
Global,
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Linkage {
Private,
Internal,
External,
ExternalWeak,
AvailableExternally,
LinkOnceAny,
LinkOnceODR,
LinkOnceODRAutoHide,
WeakAny,
WeakODR,
Common,
Appending,
DLLImport,
DLLExport,
Ghost,
LinkerPrivate,
LinkerPrivateWeak,
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Visibility {
Default,
Hidden,
Protected,
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum DLLStorageClass {
Default,
Import,
Export,
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum ThreadLocalMode {
NotThreadLocal,
GeneralDynamic,
LocalDynamic,
InitialExec,
LocalExec,
}
pub type AddrSpace = u32;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct FunctionAttributeGroup {
pub group_id: GroupID,
pub attrs: Vec<FunctionAttribute>,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Comdat {
pub name: String,
pub selection_kind: SelectionKind,
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum SelectionKind {
Any,
ExactMatch,
Largest,
NoDuplicates,
SameSize,
}
#[derive(Clone, Debug)]
pub struct DataLayout {
pub layout_str: String,
pub endianness: Endianness,
pub stack_alignment: Option<u32>,
pub program_address_space: AddrSpace,
pub alloca_address_space: AddrSpace,
pub alignments: Alignments,
pub mangling: Option<Mangling>,
pub native_int_widths: Option<HashSet<u32>>,
pub non_integral_ptr_types: HashSet<AddrSpace>,
}
impl PartialEq for DataLayout {
fn eq(&self, other: &Self) -> bool {
&self.layout_str == &other.layout_str
}
}
impl Eq for DataLayout {}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Endianness {
LittleEndian,
BigEndian,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Alignment {
pub abi: u32,
pub pref: u32,
}
#[cfg(feature="llvm-9-or-greater")]
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FunctionPtrAlignment {
pub independent: bool,
pub abi: u32,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct PointerLayout {
pub size: u32,
pub alignment: Alignment,
pub index_size: u32,
}
#[derive(Clone, Debug)]
pub struct Alignments {
int_alignments: BTreeMap<u32, Alignment>,
vec_alignments: BTreeMap<u32, Alignment>,
fp_alignments: HashMap<u32, Alignment>,
agg_alignment: Alignment,
#[cfg(feature="llvm-9-or-greater")]
fptr_alignment: FunctionPtrAlignment,
#[cfg(feature="llvm-9-or-greater")]
fptr_alignment_as_alignment: Alignment,
pointer_layouts: HashMap<AddrSpace, PointerLayout>,
}
impl Alignments {
pub fn type_alignment(&self, ty: &Type) -> &Alignment {
match ty {
Type::IntegerType { bits } => self.int_alignment(*bits),
Type::VectorType { element_type, num_elements, .. } => {
let element_size_bits = match element_type.as_ref() {
Type::IntegerType { bits } => *bits,
Type::FPType(fpt) => Self::fpt_size(*fpt),
ty => panic!("Didn't expect a vector with element type {:?}", ty),
};
self.vec_alignment(element_size_bits * (*num_elements as u32))
},
Type::FPType(fpt) => self.fp_alignment(*fpt),
Type::StructType { .. } | Type::NamedStructType { .. } | Type::ArrayType { .. } => {
self.agg_alignment()
},
Type::PointerType {
pointee_type,
addr_space,
} => match pointee_type.as_ref() {
#[cfg(feature="llvm-9-or-greater")]
Type::FuncType { .. } => &self.fptr_alignment_as_alignment,
_ => &self.ptr_alignment(*addr_space).alignment,
},
_ => panic!("Don't know how to get the alignment of {:?}", ty),
}
}
pub fn int_alignment(&self, size: u32) -> &Alignment {
if let Some(alignment) = self.int_alignments.get(&size) {
return alignment;
}
let next_largest_entry = self.int_alignments.iter().filter(|(&k, _)| k > size).next();
match next_largest_entry {
Some((_, alignment)) => alignment,
None => {
self.int_alignments
.values()
.rev()
.next()
.expect("Should have at least one explicit entry")
},
}
}
pub fn vec_alignment(&self, size: u32) -> &Alignment {
if let Some(alignment) = self.vec_alignments.get(&size) {
return alignment;
}
let next_smaller_entry = self.vec_alignments.iter().filter(|(&k, _)| k < size).next();
match next_smaller_entry {
Some((_, alignment)) => alignment,
None => {
self.vec_alignments
.values()
.next()
.expect("Should have at least one explicit entry")
},
}
}
pub fn fp_alignment(&self, fpt: FPType) -> &Alignment {
self.fp_alignments
.get(&Self::fpt_size(fpt))
.unwrap_or_else(|| {
panic!(
"No alignment information for {:?} - does the target support that type?",
fpt
)
})
}
pub fn agg_alignment(&self) -> &Alignment {
&self.agg_alignment
}
#[cfg(feature="llvm-9-or-greater")]
pub fn fptr_alignment(&self) -> &FunctionPtrAlignment {
&self.fptr_alignment
}
pub fn ptr_alignment(&self, addr_space: AddrSpace) -> &PointerLayout {
match self.pointer_layouts.get(&addr_space) {
Some(layout) => layout,
None => self
.pointer_layouts
.get(&0)
.expect("Should have a pointer layout for address space 0"),
}
}
fn fpt_size(fpt: FPType) -> u32 {
match fpt {
FPType::Half => 16,
#[cfg(feature="llvm-11-or-greater")]
FPType::BFloat => 16,
FPType::Single => 32,
FPType::Double => 64,
FPType::FP128 => 128,
FPType::X86_FP80 => 80,
FPType::PPC_FP128 => 128,
}
}
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Mangling {
ELF,
MIPS,
MachO,
WindowsX86COFF,
WindowsCOFF,
#[cfg(feature="llvm-11-or-greater")]
XCOFF,
}
use crate::constant::Constant;
use crate::from_llvm::*;
use crate::function::AttributesData;
use llvm_sys::comdat::*;
use llvm_sys::{
LLVMDLLStorageClass,
LLVMLinkage,
LLVMThreadLocalMode,
LLVMUnnamedAddr,
LLVMVisibility,
};
pub(crate) struct ModuleContext<'a> {
pub types: TypesBuilder,
pub attrsdata: AttributesData,
#[allow(clippy::mutable_key_type)] pub constants: HashMap<LLVMValueRef, ConstantRef>,
#[allow(clippy::mutable_key_type)] pub global_names: &'a HashMap<LLVMValueRef, Name>,
}
impl<'a> ModuleContext<'a> {
#[allow(clippy::mutable_key_type)] fn new(global_names: &'a HashMap<LLVMValueRef, Name>) -> Self {
Self {
types: TypesBuilder::new(),
attrsdata: AttributesData::create(),
constants: HashMap::new(),
global_names,
}
}
}
impl Module {
pub(crate) fn from_llvm_ref(module: LLVMModuleRef) -> Self {
debug!("Creating a Module from an LLVMModuleRef");
let mut global_ctr = 0;
#[allow(clippy::mutable_key_type)] let global_names: HashMap<LLVMValueRef, Name> = get_defined_functions(module)
.chain(get_declared_functions(module))
.chain(get_globals(module))
.chain(get_global_aliases(module))
.map(|g| {
(
g,
Name::name_or_num(unsafe { get_value_name(g) }, &mut global_ctr),
)
})
.collect();
global_ctr = 0;
let mut ctx = ModuleContext::new(&global_names);
Self {
name: unsafe { get_module_identifier(module) },
source_file_name: unsafe { get_source_file_name(module) },
data_layout: DataLayout::from_module_ref(module),
target_triple: unsafe { get_target(module) },
functions: get_defined_functions(module)
.map(|f| Function::from_llvm_ref(f, &mut ctx))
.collect(),
global_vars: get_globals(module)
.map(|g| GlobalVariable::from_llvm_ref(g, &mut global_ctr, &mut ctx))
.collect(),
global_aliases: get_global_aliases(module)
.map(|g| GlobalAlias::from_llvm_ref(g, &mut global_ctr, &mut ctx))
.collect(),
inline_assembly: unsafe { get_module_inline_asm(module) },
types: ctx.types.build(),
}
}
}
impl GlobalVariable {
pub(crate) fn from_llvm_ref(
global: LLVMValueRef,
ctr: &mut usize,
ctx: &mut ModuleContext,
) -> Self {
let ty = ctx.types.type_from_llvm_ref(unsafe { LLVMTypeOf(global) });
let addr_space = match ty.as_ref() {
Type::PointerType { addr_space, .. } => *addr_space,
_ => panic!("GlobalVariable has a non-pointer type, {:?}", ty),
};
debug!("Processing a GlobalVariable with type {:?}", ty);
Self {
name: Name::name_or_num(unsafe { get_value_name(global) }, ctr),
linkage: Linkage::from_llvm(unsafe { LLVMGetLinkage(global) }),
visibility: Visibility::from_llvm(unsafe { LLVMGetVisibility(global) }),
is_constant: unsafe { LLVMIsGlobalConstant(global) } != 0,
ty,
addr_space,
dll_storage_class: DLLStorageClass::from_llvm(unsafe {
LLVMGetDLLStorageClass(global)
}),
thread_local_mode: ThreadLocalMode::from_llvm(unsafe {
LLVMGetThreadLocalMode(global)
}),
unnamed_addr: UnnamedAddr::from_llvm(unsafe { LLVMGetUnnamedAddress(global) }),
initializer: {
let it = unsafe { LLVMGetInitializer(global) };
if it.is_null() {
None
} else {
Some(Constant::from_llvm_ref(it, ctx))
}
},
section: unsafe { get_section(global) },
comdat: {
let comdat = unsafe { LLVMGetComdat(global) };
if comdat.is_null() {
None
} else {
Some(Comdat::from_llvm_ref(unsafe { LLVMGetComdat(global) }))
}
},
alignment: unsafe { LLVMGetAlignment(global) },
#[cfg(feature="llvm-9-or-greater")]
debugloc: DebugLoc::from_llvm_no_col(global),
}
}
}
impl GlobalAlias {
pub(crate) fn from_llvm_ref(
alias: LLVMValueRef,
ctr: &mut usize,
ctx: &mut ModuleContext,
) -> Self {
let ty = ctx.types.type_from_llvm_ref(unsafe { LLVMTypeOf(alias) });
let addr_space = match ty.as_ref() {
Type::PointerType { addr_space, .. } => *addr_space,
_ => panic!("GlobalAlias has a non-pointer type, {:?}", ty),
};
Self {
name: Name::name_or_num(unsafe { get_value_name(alias) }, ctr),
aliasee: Constant::from_llvm_ref(unsafe { LLVMAliasGetAliasee(alias) }, ctx),
linkage: Linkage::from_llvm(unsafe { LLVMGetLinkage(alias) }),
visibility: Visibility::from_llvm(unsafe { LLVMGetVisibility(alias) }),
ty,
addr_space,
dll_storage_class: DLLStorageClass::from_llvm(unsafe { LLVMGetDLLStorageClass(alias) }),
thread_local_mode: ThreadLocalMode::from_llvm(unsafe { LLVMGetThreadLocalMode(alias) }),
unnamed_addr: UnnamedAddr::from_llvm(unsafe { LLVMGetUnnamedAddress(alias) }),
}
}
}
impl UnnamedAddr {
pub(crate) fn from_llvm(ua: LLVMUnnamedAddr) -> Option<Self> {
use LLVMUnnamedAddr::*;
match ua {
LLVMNoUnnamedAddr => None,
LLVMLocalUnnamedAddr => Some(UnnamedAddr::Local),
LLVMGlobalUnnamedAddr => Some(UnnamedAddr::Global),
}
}
}
impl Linkage {
pub(crate) fn from_llvm(linkage: LLVMLinkage) -> Self {
use LLVMLinkage::*;
match linkage {
LLVMExternalLinkage => Linkage::External,
LLVMAvailableExternallyLinkage => Linkage::AvailableExternally,
LLVMLinkOnceAnyLinkage => Linkage::LinkOnceAny,
LLVMLinkOnceODRLinkage => Linkage::LinkOnceODR,
LLVMLinkOnceODRAutoHideLinkage => Linkage::LinkOnceODRAutoHide,
LLVMWeakAnyLinkage => Linkage::WeakAny,
LLVMWeakODRLinkage => Linkage::WeakODR,
LLVMAppendingLinkage => Linkage::Appending,
LLVMInternalLinkage => Linkage::Internal,
LLVMPrivateLinkage => Linkage::Private,
LLVMDLLImportLinkage => Linkage::DLLImport,
LLVMDLLExportLinkage => Linkage::DLLExport,
LLVMExternalWeakLinkage => Linkage::ExternalWeak,
LLVMGhostLinkage => Linkage::Ghost,
LLVMCommonLinkage => Linkage::Common,
LLVMLinkerPrivateLinkage => Linkage::LinkerPrivate,
LLVMLinkerPrivateWeakLinkage => Linkage::LinkerPrivateWeak,
}
}
}
impl Visibility {
pub(crate) fn from_llvm(visibility: LLVMVisibility) -> Self {
use LLVMVisibility::*;
match visibility {
LLVMDefaultVisibility => Visibility::Default,
LLVMHiddenVisibility => Visibility::Hidden,
LLVMProtectedVisibility => Visibility::Protected,
}
}
}
impl DLLStorageClass {
pub(crate) fn from_llvm(dllsc: LLVMDLLStorageClass) -> Self {
use LLVMDLLStorageClass::*;
match dllsc {
LLVMDefaultStorageClass => DLLStorageClass::Default,
LLVMDLLImportStorageClass => DLLStorageClass::Import,
LLVMDLLExportStorageClass => DLLStorageClass::Export,
}
}
}
impl ThreadLocalMode {
pub(crate) fn from_llvm(tlm: LLVMThreadLocalMode) -> Self {
use LLVMThreadLocalMode::*;
match tlm {
LLVMNotThreadLocal => ThreadLocalMode::NotThreadLocal,
LLVMGeneralDynamicTLSModel => ThreadLocalMode::GeneralDynamic,
LLVMLocalDynamicTLSModel => ThreadLocalMode::LocalDynamic,
LLVMInitialExecTLSModel => ThreadLocalMode::InitialExec,
LLVMLocalExecTLSModel => ThreadLocalMode::LocalExec,
}
}
}
impl Comdat {
pub(crate) fn from_llvm_ref(comdat: LLVMComdatRef) -> Self {
Self {
name: "error: not yet implemented: Comdat.name".to_owned(), selection_kind: SelectionKind::from_llvm(unsafe { LLVMGetComdatSelectionKind(comdat) }),
}
}
}
impl SelectionKind {
pub(crate) fn from_llvm(sk: LLVMComdatSelectionKind) -> Self {
use LLVMComdatSelectionKind::*;
match sk {
LLVMAnyComdatSelectionKind => SelectionKind::Any,
LLVMExactMatchComdatSelectionKind => SelectionKind::ExactMatch,
LLVMLargestComdatSelectionKind => SelectionKind::Largest,
LLVMNoDuplicatesComdatSelectionKind => SelectionKind::NoDuplicates,
LLVMSameSizeComdatSelectionKind => SelectionKind::SameSize,
}
}
}
impl Default for DataLayout {
fn default() -> Self {
Self {
layout_str: String::new(),
endianness: Endianness::BigEndian,
stack_alignment: None,
program_address_space: 0,
alloca_address_space: 0,
alignments: Alignments::default(),
mangling: None,
native_int_widths: None,
non_integral_ptr_types: HashSet::new(),
}
}
}
impl DataLayout {
pub(crate) fn from_module_ref(module: LLVMModuleRef) -> Self {
let layout_str = unsafe { get_data_layout_str(module) };
let mut data_layout = DataLayout::default();
data_layout.layout_str = layout_str;
for spec in data_layout.layout_str.split('-') {
if spec == "E" {
data_layout.endianness = Endianness::BigEndian;
} else if spec == "e" {
data_layout.endianness = Endianness::LittleEndian;
} else if spec.starts_with('S') {
data_layout.stack_alignment =
Some(spec[1 ..].parse().expect("datalayout 'S': Failed to parse"));
} else if spec.starts_with('P') {
data_layout.program_address_space =
spec[1 ..].parse().expect("datalayout 'P': Failed to parse");
} else if spec.starts_with('A') {
data_layout.alloca_address_space =
spec[1 ..].parse().expect("datalayout 'A': Failed to parse");
} else if spec.starts_with('p') {
let mut chunks = spec.split(':');
let first_chunk = chunks.next().unwrap();
let addr_space: AddrSpace = if first_chunk == "p" {
0
} else {
first_chunk[1 ..]
.parse()
.expect("datalayout 'p': Failed to parse address space")
};
let second_chunk = chunks
.next()
.expect("datalayout 'p' spec should have a size chunk");
let size: u32 = second_chunk
.parse()
.expect("datalayout 'p': Failed to parse pointer size");
let third_chunk = chunks
.next()
.expect("datalayout 'p' spec should have an abi chunk");
let abi: u32 = third_chunk
.parse()
.expect("datalayout 'p': Failed to parse abi");
let pref: u32 = if let Some(fourth_chunk) = chunks.next() {
fourth_chunk
.parse()
.expect("datalayout 'p': Failed to parse pref")
} else {
abi
};
let idx: u32 = if let Some(fifth_chunk) = chunks.next() {
fifth_chunk
.parse()
.expect("datalayout 'p': Failed to parse idx")
} else {
size
};
assert!(chunks.next().is_none(), "datalayout 'p': Too many chunks");
data_layout.alignments.pointer_layouts.insert(
addr_space,
PointerLayout {
size,
alignment: Alignment { abi, pref },
index_size: idx,
},
);
} else if spec.starts_with('i') {
let mut chunks = spec.split(':');
let first_chunk = chunks.next().unwrap();
let size: u32 = first_chunk[1 ..]
.parse()
.expect("datalayout 'i': Failed to parse size");
let second_chunk = chunks
.next()
.expect("datalayout 'i' spec should have an abi chunk");
let abi: u32 = second_chunk
.parse()
.expect("datalayout 'i': Failed to parse abi");
let pref = if let Some(third_chunk) = chunks.next() {
third_chunk
.parse()
.expect("datalayout 'i': Failed to parse pref")
} else {
abi
};
assert!(chunks.next().is_none(), "datalayout 'i': Too many chunks");
data_layout
.alignments
.int_alignments
.insert(size, Alignment { abi, pref });
} else if spec.starts_with('v') {
let mut chunks = spec.split(':');
let first_chunk = chunks.next().unwrap();
let size: u32 = first_chunk[1 ..]
.parse()
.expect("datalayout 'v': Failed to parse size");
let second_chunk = chunks
.next()
.expect("datalayout 'v' spec should have an abi chunk");
let abi: u32 = second_chunk
.parse()
.expect("datalayout 'v': Failed to parse abi");
let pref = if let Some(third_chunk) = chunks.next() {
third_chunk
.parse()
.expect("datalayout 'v': Failed to parse pref")
} else {
abi
};
assert!(chunks.next().is_none(), "datalayout 'v': Too many chunks");
data_layout
.alignments
.vec_alignments
.insert(size, Alignment { abi, pref });
} else if spec.starts_with('f') {
let mut chunks = spec.split(':');
let first_chunk = chunks.next().unwrap();
let size: u32 = first_chunk[1 ..]
.parse()
.expect("datalayout 'f': Failed to parse size");
let second_chunk = chunks
.next()
.expect("datalayout 'f' spec should have an abi chunk");
let abi: u32 = second_chunk
.parse()
.expect("datalayout 'f': Failed to parse abi");
let pref = if let Some(third_chunk) = chunks.next() {
third_chunk
.parse()
.expect("datalayout 'f': Failed to parse pref")
} else {
abi
};
assert!(chunks.next().is_none(), "datalayout 'f': Too many chunks");
data_layout
.alignments
.fp_alignments
.insert(size, Alignment { abi, pref });
} else if spec.starts_with('a') {
let mut chunks = spec.split(':');
let first_chunk = chunks.next().unwrap();
assert!(first_chunk == "a" || first_chunk == "a0");
let second_chunk = chunks
.next()
.expect("datalayout 'a' spec should have an abi chunk");
let abi: u32 = second_chunk
.parse()
.expect("datalayout 'a': Failed to parse abi");
let pref = if let Some(third_chunk) = chunks.next() {
third_chunk
.parse()
.expect("datalayout 'a': Failed to parse pref")
} else {
abi
};
assert!(chunks.next().is_none(), "datalayout 'a': Too many chunks");
data_layout.alignments.agg_alignment = Alignment { abi, pref };
} else if spec.starts_with("Fi") {
#[cfg(feature="llvm-8-or-lower")]
{
panic!("datalayout: Unknown spec {:?}", spec);
}
#[cfg(feature="llvm-9-or-greater")]
{
let abi: u32 = spec[2 ..]
.parse()
.expect("datalayout 'Fi': Failed to parse abi");
data_layout.alignments.fptr_alignment = FunctionPtrAlignment {
independent: true,
abi,
};
data_layout.alignments.fptr_alignment_as_alignment =
Alignment { abi, pref: abi };
}
} else if spec.starts_with("Fn") {
#[cfg(feature="llvm-8-or-lower")]
{
panic!("datalayout: Unknown spec {:?}", spec);
}
#[cfg(feature="llvm-9-or-greater")]
{
let abi: u32 = spec[2 ..]
.parse()
.expect("datalayout 'Fn': Failed to parse abi");
data_layout.alignments.fptr_alignment = FunctionPtrAlignment {
independent: false,
abi,
};
data_layout.alignments.fptr_alignment_as_alignment =
Alignment { abi, pref: abi };
}
} else if spec.starts_with('m') {
let mut chunks = spec.split(':');
let first_chunk = chunks.next().unwrap();
assert_eq!(first_chunk, "m");
let second_chunk = chunks
.next()
.expect("datalayout 'm' spec should have a mangling chunk");
let mangling = match second_chunk {
"e" => Mangling::ELF,
"m" => Mangling::MIPS,
"o" => Mangling::MachO,
"x" => Mangling::WindowsX86COFF,
"w" => Mangling::WindowsCOFF,
#[cfg(feature="llvm-11-or-greater")]
"a" => Mangling::XCOFF,
_ => panic!("datalayout 'm': Unknown mangling {:?}", second_chunk),
};
assert!(chunks.next().is_none(), "datalayout 'm': Too many chunks");
data_layout.mangling = Some(mangling);
} else if spec.starts_with("ni") {
let mut chunks = spec.split(':');
let first_chunk = chunks.next().unwrap();
assert_eq!(first_chunk, "ni");
for chunk in chunks {
let addr_space: AddrSpace = chunk
.parse()
.expect("datalayout 'ni': Failed to parse addr space");
assert_ne!(addr_space, 0, "LLVM spec does not allow address space 0 to have non-integral pointer types");
data_layout.non_integral_ptr_types.insert(addr_space);
}
} else if spec.starts_with('n') {
let native_int_widths = data_layout
.native_int_widths
.get_or_insert_with(|| HashSet::new());
let mut chunks = spec.split(':');
let first_chunk = chunks.next().unwrap();
let size = first_chunk[1 ..]
.parse()
.expect("datalayout 'n': Failed to parse first size");
native_int_widths.insert(size);
for chunk in chunks {
let size = chunk.parse().expect("datalayout 'n': Failed to parse size");
native_int_widths.insert(size);
}
} else if spec == "" {
} else {
panic!("datalayout: Unknown spec {:?}", spec);
}
}
data_layout
}
}
impl Default for Alignments {
fn default() -> Self {
Self {
int_alignments: vec![
(1, Alignment { abi: 8, pref: 8 }),
(8, Alignment { abi: 8, pref: 8 }),
(16, Alignment { abi: 16, pref: 16 }),
(32, Alignment { abi: 32, pref: 32 }),
(64, Alignment { abi: 32, pref: 64 }),
]
.into_iter()
.collect(),
vec_alignments: vec![
(64, Alignment { abi: 64, pref: 64 }),
(128, Alignment { abi: 128, pref: 128 }),
]
.into_iter()
.collect(),
fp_alignments: vec![
(16, Alignment { abi: 16, pref: 16 }),
(32, Alignment { abi: 32, pref: 32 }),
(64, Alignment { abi: 64, pref: 64 }),
(128, Alignment { abi: 128, pref: 128 }),
]
.into_iter()
.collect(),
agg_alignment: Alignment { abi: 0, pref: 64 },
#[cfg(feature="llvm-9-or-greater")]
fptr_alignment: FunctionPtrAlignment {
independent: true,
abi: 64,
},
#[cfg(feature="llvm-9-or-greater")]
fptr_alignment_as_alignment: Alignment { abi: 64, pref: 64 },
pointer_layouts: vec![(
0,
PointerLayout {
size: 64,
alignment: Alignment { abi: 64, pref: 64 },
index_size: 64,
},
)]
.into_iter()
.collect(),
}
}
}