aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/platform.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/include/platform.h b/include/platform.h
index 8ed05a4ad..a41daa08c 100644
--- a/include/platform.h
+++ b/include/platform.h
@@ -194,12 +194,14 @@
194 * a lvalue. This makes it more likely to not swap them by mistake 194 * a lvalue. This makes it more likely to not swap them by mistake
195 */ 195 */
196#if defined(i386) || defined(__x86_64__) 196#if defined(i386) || defined(__x86_64__)
197# define move_from_unaligned_int(v, intp) ((v) = *(int*)(intp))
197# define move_from_unaligned16(v, u16p) ((v) = *(uint16_t*)(u16p)) 198# define move_from_unaligned16(v, u16p) ((v) = *(uint16_t*)(u16p))
198# define move_from_unaligned32(v, u32p) ((v) = *(uint32_t*)(u32p)) 199# define move_from_unaligned32(v, u32p) ((v) = *(uint32_t*)(u32p))
199# define move_to_unaligned32(u32p, v) (*(uint32_t*)(u32p) = (v)) 200# define move_to_unaligned32(u32p, v) (*(uint32_t*)(u32p) = (v))
200/* #elif ... - add your favorite arch today! */ 201/* #elif ... - add your favorite arch today! */
201#else 202#else
202/* performs reasonably well (gcc usually inlines memcpy here) */ 203/* performs reasonably well (gcc usually inlines memcpy here) */
204# define move_from_unaligned_int(v, intp) (memcpy(&(v), (intp), sizeof(int)))
203# define move_from_unaligned16(v, u16p) (memcpy(&(v), (u16p), 2)) 205# define move_from_unaligned16(v, u16p) (memcpy(&(v), (u16p), 2))
204# define move_from_unaligned32(v, u32p) (memcpy(&(v), (u32p), 4)) 206# define move_from_unaligned32(v, u32p) (memcpy(&(v), (u32p), 4))
205# define move_to_unaligned32(u32p, v) do { \ 207# define move_to_unaligned32(u32p, v) do { \