summaryrefslogtreecommitdiff
path: root/Include
diff options
context:
space:
mode:
authorChristian Heimes <christian@python.org>2016-09-13 20:22:02 +0200
committerChristian Heimes <christian@python.org>2016-09-13 20:22:02 +0200
commita516e391d34bf1983282a237d9eb60d7c2cbb82f (patch)
tree8db983051f1c70d754ec169fbf251757f72fff9f /Include
parent6b081918f2a92178617e417202148b735d6311e0 (diff)
downloadcpython-a516e391d34bf1983282a237d9eb60d7c2cbb82f.tar.gz
Issue #28126: Replace Py_MEMCPY with memcpy(). Visual Studio can properly optimize memcpy().
Diffstat (limited to 'Include')
-rw-r--r--Include/pyport.h45
-rw-r--r--Include/unicodeobject.h2
2 files changed, 15 insertions, 32 deletions
diff --git a/Include/pyport.h b/Include/pyport.h
index b631cf3c98..be1d66d563 100644
--- a/Include/pyport.h
+++ b/Include/pyport.h
@@ -177,26 +177,9 @@ typedef int Py_ssize_clean_t;
#define Py_LOCAL_INLINE(type) static type
#endif
-/* Py_MEMCPY can be used instead of memcpy in cases where the copied blocks
- * are often very short. While most platforms have highly optimized code for
- * large transfers, the setup costs for memcpy are often quite high. MEMCPY
- * solves this by doing short copies "in line".
- */
-
-#if defined(_MSC_VER)
-#define Py_MEMCPY(target, source, length) do { \
- size_t i_, n_ = (length); \
- char *t_ = (void*) (target); \
- const char *s_ = (void*) (source); \
- if (n_ >= 16) \
- memcpy(t_, s_, n_); \
- else \
- for (i_ = 0; i_ < n_; i_++) \
- t_[i_] = s_[i_]; \
- } while (0)
-#else
+/* Py_MEMCPY is kept for backwards compatibility,
+ * see https://bugs.python.org/issue28126 */
#define Py_MEMCPY memcpy
-#endif
#include <stdlib.h>
@@ -449,18 +432,18 @@ extern "C" {
#define HAVE_PY_SET_53BIT_PRECISION 1
#define _Py_SET_53BIT_PRECISION_HEADER \
unsigned int old_fpcr, new_fpcr
-#define _Py_SET_53BIT_PRECISION_START \
- do { \
- __asm__ ("fmove.l %%fpcr,%0" : "=g" (old_fpcr)); \
- /* Set double precision / round to nearest. */ \
- new_fpcr = (old_fpcr & ~0xf0) | 0x80; \
- if (new_fpcr != old_fpcr) \
- __asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (new_fpcr)); \
+#define _Py_SET_53BIT_PRECISION_START \
+ do { \
+ __asm__ ("fmove.l %%fpcr,%0" : "=g" (old_fpcr)); \
+ /* Set double precision / round to nearest. */ \
+ new_fpcr = (old_fpcr & ~0xf0) | 0x80; \
+ if (new_fpcr != old_fpcr) \
+ __asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (new_fpcr)); \
} while (0)
-#define _Py_SET_53BIT_PRECISION_END \
- do { \
- if (new_fpcr != old_fpcr) \
- __asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (old_fpcr)); \
+#define _Py_SET_53BIT_PRECISION_END \
+ do { \
+ if (new_fpcr != old_fpcr) \
+ __asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (old_fpcr)); \
} while (0)
#endif
@@ -742,7 +725,7 @@ extern pid_t forkpty(int *, char *, struct termios *, struct winsize *);
#endif
#ifdef VA_LIST_IS_ARRAY
-#define Py_VA_COPY(x, y) Py_MEMCPY((x), (y), sizeof(va_list))
+#define Py_VA_COPY(x, y) memcpy((x), (y), sizeof(va_list))
#else
#ifdef __va_copy
#define Py_VA_COPY __va_copy
diff --git a/Include/unicodeobject.h b/Include/unicodeobject.h
index 38f733bd4f..bc6ecd4e81 100644
--- a/Include/unicodeobject.h
+++ b/Include/unicodeobject.h
@@ -156,7 +156,7 @@ typedef uint8_t Py_UCS1;
Py_UNICODE_ISNUMERIC(ch))
#define Py_UNICODE_COPY(target, source, length) \
- Py_MEMCPY((target), (source), (length)*sizeof(Py_UNICODE))
+ memcpy((target), (source), (length)*sizeof(Py_UNICODE))
#define Py_UNICODE_FILL(target, value, length) \
do {Py_ssize_t i_; Py_UNICODE *t_ = (target); Py_UNICODE v_ = (value);\