Skip to content

Commit ceb29e2

Browse files
committed
Use implicit deref instead of BuilderMethods::cx()
1 parent e457330 commit ceb29e2

File tree

13 files changed

+270
-271
lines changed

13 files changed

+270
-271
lines changed

src/librustc_codegen_llvm/abi.rs

+7-7
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
212212
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
213213
let can_store_through_cast_ptr = false;
214214
if can_store_through_cast_ptr {
215-
let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx()));
215+
let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
216216
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
217217
bx.store(val, cast_dst, self.layout.align.abi);
218218
} else {
@@ -231,9 +231,9 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
231231
// bitcasting to the struct type yields invalid cast errors.
232232

233233
// We instead thus allocate some scratch space...
234-
let scratch_size = cast.size(bx.cx());
235-
let scratch_align = cast.align(bx.cx());
236-
let llscratch = bx.alloca(cast.llvm_type(bx.cx()), "abi_cast", scratch_align);
234+
let scratch_size = cast.size(bx);
235+
let scratch_align = cast.align(bx);
236+
let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align);
237237
bx.lifetime_start(llscratch, scratch_size);
238238

239239
// ...where we first store the value...
@@ -245,7 +245,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
245245
self.layout.align.abi,
246246
llscratch,
247247
scratch_align,
248-
bx.cx().const_usize(self.layout.size.bytes()),
248+
bx.const_usize(self.layout.size.bytes()),
249249
MemFlags::empty()
250250
);
251251

@@ -299,7 +299,7 @@ impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
299299
ty.store(self, val, dst)
300300
}
301301
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
302-
ty.memory_ty(self.cx())
302+
ty.memory_ty(self)
303303
}
304304
}
305305

@@ -780,7 +780,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
780780
// by the LLVM verifier.
781781
if let layout::Int(..) = scalar.value {
782782
if !scalar.is_bool() {
783-
let range = scalar.valid_range_exclusive(bx.cx());
783+
let range = scalar.valid_range_exclusive(bx);
784784
if range.start != range.end {
785785
bx.range_metadata(callsite, range);
786786
}

src/librustc_codegen_llvm/asm.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
5757

5858
// Default per-arch clobbers
5959
// Basically what clang does
60-
let arch_clobbers = match &self.cx().sess().target.target.arch[..] {
60+
let arch_clobbers = match &self.sess().target.target.arch[..] {
6161
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
6262
"mips" | "mips64" => vec!["~{$1}"],
6363
_ => Vec::new()
@@ -76,9 +76,9 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
7676
// Depending on how many outputs we have, the return type is different
7777
let num_outputs = output_types.len();
7878
let output_type = match num_outputs {
79-
0 => self.cx().type_void(),
79+
0 => self.type_void(),
8080
1 => output_types[0],
81-
_ => self.cx().type_struct(&output_types, false)
81+
_ => self.type_struct(&output_types, false)
8282
};
8383

8484
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
@@ -108,13 +108,13 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
108108
// back to source locations. See #17552.
109109
unsafe {
110110
let key = "srcloc";
111-
let kind = llvm::LLVMGetMDKindIDInContext(self.cx().llcx,
111+
let kind = llvm::LLVMGetMDKindIDInContext(self.llcx,
112112
key.as_ptr() as *const c_char, key.len() as c_uint);
113113

114-
let val: &'ll Value = self.cx().const_i32(ia.ctxt.outer().as_u32() as i32);
114+
let val: &'ll Value = self.const_i32(ia.ctxt.outer().as_u32() as i32);
115115

116116
llvm::LLVMSetMetadata(r, kind,
117-
llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1));
117+
llvm::LLVMMDNodeInContext(self.llcx, &val, 1));
118118
}
119119

120120
true

src/librustc_codegen_llvm/builder.rs

+29-29
Original file line numberDiff line numberDiff line change
@@ -143,11 +143,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
143143
}
144144

145145
fn count_insn(&self, category: &str) {
146-
if self.cx().sess().codegen_stats() {
147-
self.cx().stats.borrow_mut().n_llvm_insns += 1;
146+
if self.sess().codegen_stats() {
147+
self.stats.borrow_mut().n_llvm_insns += 1;
148148
}
149-
if self.cx().sess().count_llvm_insns() {
150-
*self.cx().stats
149+
if self.sess().count_llvm_insns() {
150+
*self.stats
151151
.borrow_mut()
152152
.llvm_insns
153153
.entry(category.to_string())
@@ -475,8 +475,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
475475
use rustc::ty::{Int, Uint};
476476

477477
let new_sty = match ty.sty {
478-
Int(Isize) => Int(self.cx().tcx.sess.target.isize_ty),
479-
Uint(Usize) => Uint(self.cx().tcx.sess.target.usize_ty),
478+
Int(Isize) => Int(self.tcx.sess.target.isize_ty),
479+
Uint(Usize) => Uint(self.tcx.sess.target.usize_ty),
480480
ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
481481
_ => panic!("tried to get overflow intrinsic for op applied to non-int type")
482482
};
@@ -529,7 +529,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
529529
},
530530
};
531531

532-
let intrinsic = self.cx().get_intrinsic(&name);
532+
let intrinsic = self.get_intrinsic(&name);
533533
let res = self.call(intrinsic, &[lhs, rhs], None);
534534
(
535535
self.extract_value(res, 0),
@@ -637,7 +637,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
637637
let vr = scalar.valid_range.clone();
638638
match scalar.value {
639639
layout::Int(..) => {
640-
let range = scalar.valid_range_exclusive(bx.cx());
640+
let range = scalar.valid_range_exclusive(bx);
641641
if range.start != range.end {
642642
bx.range_metadata(load, range);
643643
}
@@ -676,7 +676,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
676676
let load = self.load(llptr, align);
677677
scalar_load_metadata(self, load, scalar);
678678
if scalar.is_bool() {
679-
self.trunc(load, self.cx().type_i1())
679+
self.trunc(load, self.type_i1())
680680
} else {
681681
load
682682
}
@@ -696,7 +696,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
696696

697697

698698
fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
699-
if self.cx().sess().target.target.arch == "amdgpu" {
699+
if self.sess().target.target.arch == "amdgpu" {
700700
// amdgpu/LLVM does something weird and thinks a i64 value is
701701
// split into a v2i32, halving the bitwidth LLVM expects,
702702
// tripping an assertion. So, for now, just disable this
@@ -942,7 +942,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
942942
}).collect::<Vec<_>>();
943943

944944
debug!("Asm Output Type: {:?}", output);
945-
let fty = self.cx().type_func(&argtys[..], output);
945+
let fty = self.type_func(&argtys[..], output);
946946
unsafe {
947947
// Ask LLVM to verify that the constraints are well-formed.
948948
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr());
@@ -970,14 +970,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
970970
if flags.contains(MemFlags::NONTEMPORAL) {
971971
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
972972
let val = self.load(src, src_align);
973-
let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
973+
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
974974
self.store_with_flags(val, ptr, dst_align, flags);
975975
return;
976976
}
977-
let size = self.intcast(size, self.cx().type_isize(), false);
977+
let size = self.intcast(size, self.type_isize(), false);
978978
let is_volatile = flags.contains(MemFlags::VOLATILE);
979-
let dst = self.pointercast(dst, self.cx().type_i8p());
980-
let src = self.pointercast(src, self.cx().type_i8p());
979+
let dst = self.pointercast(dst, self.type_i8p());
980+
let src = self.pointercast(src, self.type_i8p());
981981
unsafe {
982982
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
983983
src, src_align.bytes() as c_uint, size, is_volatile);
@@ -990,14 +990,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
990990
if flags.contains(MemFlags::NONTEMPORAL) {
991991
// HACK(nox): This is inefficient but there is no nontemporal memmove.
992992
let val = self.load(src, src_align);
993-
let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
993+
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
994994
self.store_with_flags(val, ptr, dst_align, flags);
995995
return;
996996
}
997-
let size = self.intcast(size, self.cx().type_isize(), false);
997+
let size = self.intcast(size, self.type_isize(), false);
998998
let is_volatile = flags.contains(MemFlags::VOLATILE);
999-
let dst = self.pointercast(dst, self.cx().type_i8p());
1000-
let src = self.pointercast(src, self.cx().type_i8p());
999+
let dst = self.pointercast(dst, self.type_i8p());
1000+
let src = self.pointercast(src, self.type_i8p());
10011001
unsafe {
10021002
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
10031003
src, src_align.bytes() as c_uint, size, is_volatile);
@@ -1012,12 +1012,12 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
10121012
align: Align,
10131013
flags: MemFlags,
10141014
) {
1015-
let ptr_width = &self.cx().sess().target.target.target_pointer_width;
1015+
let ptr_width = &self.sess().target.target.target_pointer_width;
10161016
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
1017-
let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
1018-
let ptr = self.pointercast(ptr, self.cx().type_i8p());
1019-
let align = self.cx().const_u32(align.bytes() as u32);
1020-
let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
1017+
let llintrinsicfn = self.get_intrinsic(&intrinsic_key);
1018+
let ptr = self.pointercast(ptr, self.type_i8p());
1019+
let align = self.const_u32(align.bytes() as u32);
1020+
let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE));
10211021
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
10221022
}
10231023

@@ -1083,10 +1083,10 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
10831083
fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
10841084
unsafe {
10851085
let elt_ty = self.cx.val_ty(elt);
1086-
let undef = llvm::LLVMGetUndef(self.cx().type_vector(elt_ty, num_elts as u64));
1086+
let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
10871087
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
1088-
let vec_i32_ty = self.cx().type_vector(self.cx().type_i32(), num_elts as u64);
1089-
self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
1088+
let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
1089+
self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
10901090
}
10911091
}
10921092

@@ -1397,7 +1397,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
13971397
let param_tys = self.cx.func_params_types(fn_ty);
13981398

13991399
let all_args_match = param_tys.iter()
1400-
.zip(args.iter().map(|&v| self.cx().val_ty(v)))
1400+
.zip(args.iter().map(|&v| self.val_ty(v)))
14011401
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
14021402

14031403
if all_args_match {
@@ -1408,7 +1408,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
14081408
.zip(args.iter())
14091409
.enumerate()
14101410
.map(|(i, (expected_ty, &actual_val))| {
1411-
let actual_ty = self.cx().val_ty(actual_val);
1411+
let actual_ty = self.val_ty(actual_val);
14121412
if expected_ty != actual_ty {
14131413
debug!("Type mismatch in function call of {:?}. \
14141414
Expected {:?} for param {}, got {:?}; injecting bitcast",

src/librustc_codegen_llvm/debuginfo/gdb.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,11 @@ use syntax::attr;
2424
/// Inserts a side-effect free instruction sequence that makes sure that the
2525
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
2626
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder) {
27-
if needs_gdb_debug_scripts_section(bx.cx()) {
28-
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx());
27+
if needs_gdb_debug_scripts_section(bx) {
28+
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
2929
// Load just the first byte as that's all that's necessary to force
3030
// LLVM to keep around the reference to the global.
31-
let indices = [bx.cx().const_i32(0), bx.cx().const_i32(0)];
31+
let indices = [bx.const_i32(0), bx.const_i32(0)];
3232
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
3333
let volative_load_instruction = bx.volatile_load(element);
3434
unsafe {

src/librustc_codegen_llvm/debuginfo/source_loc.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ pub fn set_source_location<D>(
4141
};
4242

4343
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
44-
debug!("set_source_location: {}", bx.cx().sess().source_map().span_to_string(span));
44+
debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span));
4545
let loc = span_start(bx.cx(), span);
4646
InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize())
4747
} else {
@@ -76,7 +76,7 @@ pub fn set_debug_location(
7676
// For MSVC, set the column number to zero.
7777
// Otherwise, emit it. This mimics clang behaviour.
7878
// See discussion in https://github.com/rust-lang/rust/issues/42921
79-
let col_used = if bx.cx().sess().target.target.options.is_like_msvc {
79+
let col_used = if bx.sess().target.target.options.is_like_msvc {
8080
UNKNOWN_COLUMN_NUMBER
8181
} else {
8282
col as c_uint

0 commit comments

Comments
 (0)