I have the following methods
var photos = [MWPhoto] = [MWPhoto]()
func numberOfPhotosInPhotoBrowser(photoBrowser: MWPhotoBrowser!) -> UInt {
return
// initializing Int
var someInt: Int = 8
someInt
// Converting Int to UInt
var someIntToUInt: UInt = UInt(someInt)
someIntToUInt
// initializing UInt
var someUInt: UInt = 10
someUInt
// Converting UInt to Int
var someUIntToInt: Int = Int(someUInt)
someUIntToInt
In the first one, the return type is UInt, but you return Int since count returns Int.
Basically UInt has initializer which take variants of value types arguments such as Int, CGFloat, Double or event string and return a new value type.
-
func numberOfPhotosInPhotoBrowser(photoBrowser: MWPhotoBrowser!) -> UInt {
return UInt(self.photos.count)
}
For the second one, the array subscript expects Int value where you are passing UInt, so create a new Int value type from UInt,
func photoBrowser(photoBrowser: MWPhotoBrowser!, photoAtIndex index: UInt) -> MWPhotoProtocol! {
return self.photos[Int(index)]
}
Add this anywhere outside of a class:
extension UInt {
/// SwiftExtensionKit
var toInt: Int { return Int(self) }
}
Then just call:
self.photos[index].toInt
I got so frustrated with Swift's cryptic method parameters bitPattern: and truncatingBitPattern: and my inability to remember which one to use when, that I created the following class containing a large number of conversion methods.
I'm not necessarily recommending that you include this in your program. I'm sure many people will say that Swift is trying to protect us from ourselves and that sabotaging that effort is dumb. So maybe you should just keep this file somewhere as a kind of cheat sheet so you can quickly determine how to do a conversion, and copy the parameters into your program when needed.
Incidentally, JDI stands for "just do it".
/// Class containing a large number of static methods to convert an Int to a UInt or vice-versa, and
/// also to perform conversions between different bit sizes, for example UInt32 to UInt8.
///
/// Many of these "conversions" are trivial, and are only included for the sake of completeness.
///
/// A few of the conversions involving Int and UInt can give different results when run on 32-bit
/// and 64-bit systems. All of the conversion where the bit size of both the source and the target
/// are specified will always give the same result independent of platform.
public class JDI {
// MARK: - To signed Int
// To Int8
public static func ToInt8(_ x : Int8) -> Int8 {
return x
}
public static func ToInt8(_ x : Int32) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : Int64) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : Int) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt8) -> Int8 {
return Int8(bitPattern: x)
}
public static func ToInt8(_ x : UInt32) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt64) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt) -> Int8 {
return Int8(truncatingBitPattern: x)
}
// To Int32
public static func ToInt32(_ x : Int8) -> Int32 {
return Int32(x)
}
public static func ToInt32(_ x : Int32) -> Int32 {
return x
}
public static func ToInt32(_ x : Int64) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : Int) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : UInt8) -> Int32 {
return Int32(x)
}
public static func ToInt32(_ x : UInt32) -> Int32 {
return Int32(bitPattern: x)
}
public static func ToInt32(_ x : UInt64) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : UInt) -> Int32 {
return Int32(truncatingBitPattern: x)
}
// To Int64
public static func ToInt64(_ x : Int8) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : Int32) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : Int64) -> Int64 {
return x
}
public static func ToInt64(_ x : Int) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt8) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt32) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt64) -> Int64 {
return Int64(bitPattern: x)
}
public static func ToInt64(_ x : UInt) -> Int64 {
return Int64(bitPattern: UInt64(x)) // Does not extend high bit of 32-bit input
}
// To Int
public static func ToInt(_ x : Int8) -> Int {
return Int(x)
}
public static func ToInt(_ x : Int32) -> Int {
return Int(x)
}
public static func ToInt(_ x : Int64) -> Int {
return Int(truncatingBitPattern: x)
}
public static func ToInt(_ x : Int) -> Int {
return x
}
public static func ToInt(_ x : UInt8) -> Int {
return Int(x)
}
public static func ToInt(_ x : UInt32) -> Int {
if MemoryLayout<Int>.size == MemoryLayout<Int32>.size {
return Int(Int32(bitPattern: x)) // For 32-bit systems, non-authorized interpretation
}
return Int(x)
}
public static func ToInt(_ x : UInt64) -> Int {
return Int(truncatingBitPattern: x)
}
public static func ToInt(_ x : UInt) -> Int {
return Int(bitPattern: x)
}
// MARK: - To unsigned Int
// To UInt8
public static func ToUInt8(_ x : Int8) -> UInt8 {
return UInt8(bitPattern: x)
}
public static func ToUInt8(_ x : Int32) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : Int64) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : Int) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt8) -> UInt8 {
return x
}
public static func ToUInt8(_ x : UInt32) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt64) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
// To UInt32
public static func ToUInt32(_ x : Int8) -> UInt32 {
return UInt32(bitPattern: Int32(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt32(_ x : Int32) -> UInt32 {
return UInt32(bitPattern: x)
}
public static func ToUInt32(_ x : Int64) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : Int) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : UInt8) -> UInt32 {
return UInt32(x)
}
public static func ToUInt32(_ x : UInt32) -> UInt32 {
return x
}
public static func ToUInt32(_ x : UInt64) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : UInt) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
// To UInt64
public static func ToUInt64(_ x : Int8) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt64(_ x : Int32) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt64(_ x : Int64) -> UInt64 {
return UInt64(bitPattern: x)
}
public static func ToUInt64(_ x : Int) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit if necessary, assume minus input significant
}
public static func ToUInt64(_ x : UInt8) -> UInt64 {
return UInt64(x)
}
public static func ToUInt64(_ x : UInt32) -> UInt64 {
return UInt64(x)
}
public static func ToUInt64(_ x : UInt64) -> UInt64 {
return x
}
public static func ToUInt64(_ x : UInt) -> UInt64 {
return UInt64(x) // Does not extend high bit of 32-bit input
}
// To UInt
public static func ToUInt(_ x : Int8) -> UInt {
return UInt(bitPattern: Int(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt(_ x : Int32) -> UInt {
return UInt(truncatingBitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt(_ x : Int64) -> UInt {
return UInt(truncatingBitPattern: x)
}
public static func ToUInt(_ x : Int) -> UInt {
return UInt(bitPattern: x)
}
public static func ToUInt(_ x : UInt8) -> UInt {
return UInt(x)
}
public static func ToUInt(_ x : UInt32) -> UInt {
return UInt(x)
}
public static func ToUInt(_ x : UInt64) -> UInt {
return UInt(truncatingBitPattern: x)
}
public static func ToUInt(_ x : UInt) -> UInt {
return x
}
}
Here's some test code:
public func doTest() {
// To Int8
assert(JDI.ToInt8(42 as Int8) == 42)
assert(JDI.ToInt8(-13 as Int8) == -13)
assert(JDI.ToInt8(42 as Int32) == 42)
assert(JDI.ToInt8(257 as Int32) == 1)
assert(JDI.ToInt8(42 as Int64) == 42)
assert(JDI.ToInt8(257 as Int64) == 1)
assert(JDI.ToInt8(42 as Int) == 42)
assert(JDI.ToInt8(257 as Int) == 1)
assert(JDI.ToInt8(42 as UInt8) == 42)
assert(JDI.ToInt8(0xf3 as UInt8) == -13)
assert(JDI.ToInt8(42 as UInt32) == 42)
assert(JDI.ToInt8(0xfffffff3 as UInt32) == -13)
assert(JDI.ToInt8(42 as UInt64) == 42)
assert(JDI.ToInt8(UInt64.max - 12) == -13)
assert(JDI.ToInt8(42 as UInt) == 42)
assert(JDI.ToInt8(UInt.max - 12) == -13)
// To Int32
assert(JDI.ToInt32(42 as Int8) == 42)
assert(JDI.ToInt32(-13 as Int8) == -13)
assert(JDI.ToInt32(42 as Int32) == 42)
assert(JDI.ToInt32(-13 as Int32) == -13)
assert(JDI.ToInt32(42 as Int64) == 42)
assert(JDI.ToInt32(Int64(Int32.min) - 1) == Int32.max)
assert(JDI.ToInt32(42 as Int) == 42)
assert(JDI.ToInt32(-13 as Int) == -13)
assert(JDI.ToInt32(42 as UInt8) == 42)
assert(JDI.ToInt32(0xf3 as UInt8) == 243)
assert(JDI.ToInt32(42 as UInt32) == 42)
assert(JDI.ToInt32(0xfffffff3 as UInt32) == -13)
assert(JDI.ToInt32(42 as UInt64) == 42)
assert(JDI.ToInt32(UInt64.max - 12) == -13)
assert(JDI.ToInt32(42 as UInt) == 42)
assert(JDI.ToInt32(UInt.max - 12) == -13)
// To Int64
assert(JDI.ToInt64(42 as Int8) == 42)
assert(JDI.ToInt64(-13 as Int8) == -13)
assert(JDI.ToInt64(42 as Int32) == 42)
assert(JDI.ToInt64(-13 as Int32) == -13)
assert(JDI.ToInt64(42 as Int64) == 42)
assert(JDI.ToInt64(-13 as Int64) == -13)
assert(JDI.ToInt64(42 as Int) == 42)
assert(JDI.ToInt64(-13 as Int) == -13)
assert(JDI.ToInt64(42 as UInt8) == 42)
assert(JDI.ToInt64(0xf3 as UInt8) == 243)
assert(JDI.ToInt64(42 as UInt32) == 42)
assert(JDI.ToInt64(0xfffffff3 as UInt32) == 4294967283)
assert(JDI.ToInt64(42 as UInt64) == 42)
assert(JDI.ToInt64(UInt64.max - 12) == -13)
assert(JDI.ToInt64(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToInt64(UInt.max - 12) == 4294967283) // For 32-bit systems
#else
assert(JDI.ToInt64(UInt.max - 12) == -13) // For 64-bit systems
#endif
// To Int
assert(JDI.ToInt(42 as Int8) == 42)
assert(JDI.ToInt(-13 as Int8) == -13)
assert(JDI.ToInt(42 as Int32) == 42)
assert(JDI.ToInt(-13 as Int32) == -13)
assert(JDI.ToInt(42 as Int64) == 42)
assert(JDI.ToInt(-13 as Int64) == -13)
assert(JDI.ToInt(42 as Int) == 42)
assert(JDI.ToInt(-13 as Int) == -13)
assert(JDI.ToInt(42 as UInt8) == 42)
assert(JDI.ToInt(0xf3 as UInt8) == 243)
assert(JDI.ToInt(42 as UInt32) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToInt(0xfffffff3 as UInt32) == -13) // For 32-bit systems
#else
assert(JDI.ToInt(0xfffffff3 as UInt32) == 4294967283) // For 64-bit systems
#endif
assert(JDI.ToInt(42 as UInt64) == 42)
assert(JDI.ToInt(UInt64.max - 12) == -13)
assert(JDI.ToInt(42 as UInt) == 42)
assert(JDI.ToInt(UInt.max - 12) == -13)
// To UInt8
assert(JDI.ToUInt8(42 as Int8) == 42)
assert(JDI.ToUInt8(-13 as Int8) == 0xf3)
assert(JDI.ToUInt8(42 as Int32) == 42)
assert(JDI.ToUInt8(-13 as Int32) == 0xf3)
assert(JDI.ToUInt8(42 as Int64) == 42)
assert(JDI.ToUInt8(-13 as Int64) == 0xf3)
assert(JDI.ToUInt8(Int64.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as Int) == 42)
assert(JDI.ToUInt8(-13 as Int) == 0xf3)
assert(JDI.ToUInt8(Int.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as UInt8) == 42)
assert(JDI.ToUInt8(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt8(42 as UInt32) == 42)
assert(JDI.ToUInt8(0xfffffff3 as UInt32) == 0xf3)
assert(JDI.ToUInt8(42 as UInt64) == 42)
assert(JDI.ToUInt8(UInt64.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as UInt) == 42)
assert(JDI.ToUInt8(UInt.max - 12) == 0xf3)
// To UInt32
assert(JDI.ToUInt32(42 as Int8) == 42)
assert(JDI.ToUInt32(-13 as Int8) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int32) == 42)
assert(JDI.ToUInt32(-13 as Int32) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int64) == 42)
assert(JDI.ToUInt32(-13 as Int64) == 0xfffffff3)
assert(JDI.ToUInt32(Int64.max - 12) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int) == 42)
assert(JDI.ToUInt32(-13 as Int) == 0xfffffff3)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt32(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt32(Int.max - 12) == 0xfffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt32(42 as UInt8) == 42)
assert(JDI.ToUInt32(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt32(42 as UInt32) == 42)
assert(JDI.ToUInt32(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt32(42 as UInt64) == 42)
assert(JDI.ToUInt32(UInt64.max - 12) == 0xfffffff3)
assert(JDI.ToUInt32(42 as UInt) == 42)
assert(JDI.ToUInt32(UInt.max - 12) == 0xfffffff3)
// To UInt64
assert(JDI.ToUInt64(42 as Int8) == 42)
assert(JDI.ToUInt64(-13 as Int8) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as Int32) == 42)
assert(JDI.ToUInt64(-13 as Int32) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as Int64) == 42)
assert(JDI.ToUInt64(-13 as Int64) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(Int64.max - 12) == (UInt64.max >> 1) - 12)
assert(JDI.ToUInt64(42 as Int) == 42)
assert(JDI.ToUInt64(-13 as Int) == 0xfffffffffffffff3)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt64(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt64(Int.max - 12) == 0x7ffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt64(42 as UInt8) == 42)
assert(JDI.ToUInt64(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt64(42 as UInt32) == 42)
assert(JDI.ToUInt64(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt64(42 as UInt64) == 42)
assert(JDI.ToUInt64(UInt64.max - 12) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt64(UInt.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt64(UInt.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
// To UInt
assert(JDI.ToUInt(42 as Int8) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int8) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(-13 as Int8) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as Int32) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int32) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(-13 as Int32) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as Int64) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int64) == 0xfffffff3) // For 32-bit systems
assert(JDI.ToUInt(Int64.max - 12) == 0xfffffff3)
#else
assert(JDI.ToUInt(-13 as Int64) == 0xfffffffffffffff3) // For 64-bit systems
assert(JDI.ToUInt(Int64.max - 12) == 0x7ffffffffffffff3)
#endif
assert(JDI.ToUInt(42 as Int) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(Int.max - 12) == 0x7ffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as UInt8) == 42)
assert(JDI.ToUInt(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt(42 as UInt32) == 42)
assert(JDI.ToUInt(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt(42 as UInt64) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(UInt64.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(UInt64.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(UInt.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(UInt.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
print("\nTesting JDI complete.\n")
}
If you want unsigned int from negative value use UInt(bitPattern:)
let intVal = -1
let uintVal = UInt(bitPattern: intVal) // uintVal == 0xffffffffffffffff