summaryrefslogtreecommitdiff
path: root/erts/include
diff options
context:
space:
mode:
authorKian-Meng, Ang <kianmeng@cpan.org>2021-11-19 11:35:37 +0800
committerKian-Meng, Ang <kianmeng@cpan.org>2021-11-19 11:35:37 +0800
commit66fd5472e1b3b42082914ebc72bf3497a061fc31 (patch)
tree87297a39c7b8fe0fd34f6e5431084a8d93f1bd9d /erts/include
parente1fd3e0ca3a555ed6bb776bedcee42517d540941 (diff)
downloaderlang-66fd5472e1b3b42082914ebc72bf3497a061fc31.tar.gz
Fix typos in erts/{example, include}
Diffstat (limited to 'erts/include')
-rw-r--r--erts/include/internal/ethr_atomics.h2
-rw-r--r--erts/include/internal/ethread_header_config.h.in2
-rw-r--r--erts/include/internal/gcc/ethr_dw_atomic.h2
-rw-r--r--erts/include/internal/gcc/ethr_membar.h12
-rw-r--r--erts/include/internal/i386/ethr_dw_atomic.h6
-rw-r--r--erts/include/internal/win/ethr_dw_atomic.h2
6 files changed, 13 insertions, 13 deletions
diff --git a/erts/include/internal/ethr_atomics.h b/erts/include/internal/ethr_atomics.h
index 06568201ad..65381fc558 100644
--- a/erts/include/internal/ethr_atomics.h
+++ b/erts/include/internal/ethr_atomics.h
@@ -62,7 +62,7 @@
* - read
* - init
*
- * Appart from a function implementing the atomic operation
+ * Apart from a function implementing the atomic operation
* with unspecified memory barrier semantics, there are
* functions implementing each operation with the following
* implied memory barrier semantics:
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index 300068b952..fee3b013e8 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -182,7 +182,7 @@
/* Define if you use a gcc that supports the double word cmpxchg instruction */
#undef ETHR_GCC_HAVE_DW_CMPXCHG_ASM_SUPPORT
-/* Define if gcc wont let you clobber ebx with cmpxchg8b and position
+/* Define if gcc won't let you clobber ebx with cmpxchg8b and position
independent code */
#undef ETHR_CMPXCHG8B_PIC_NO_CLOBBER_EBX
diff --git a/erts/include/internal/gcc/ethr_dw_atomic.h b/erts/include/internal/gcc/ethr_dw_atomic.h
index d661bf33fb..eb597d5a4d 100644
--- a/erts/include/internal/gcc/ethr_dw_atomic.h
+++ b/erts/include/internal/gcc/ethr_dw_atomic.h
@@ -72,7 +72,7 @@ typedef volatile ETHR_NATIVE_SU_DW_SINT_T * ethr_native_dw_ptr_t;
* This code assumes 8 byte aligned memory in 64-bit mode, and 4 byte
* aligned memory in 32-bit mode. A malloc implementation that does
* not adhere to these alignment requirements is seriously broken,
- * and we wont bother trying to work around it.
+ * and we won't bother trying to work around it.
*
* Since memory alignment may be off by one word we need to align at
* runtime. We, therefore, need an extra word allocated.
diff --git a/erts/include/internal/gcc/ethr_membar.h b/erts/include/internal/gcc/ethr_membar.h
index aeef8115a3..6763c3475e 100644
--- a/erts/include/internal/gcc/ethr_membar.h
+++ b/erts/include/internal/gcc/ethr_membar.h
@@ -31,9 +31,9 @@
* ordered around it according to the semantics of the
* barrier specified.
*
- * The C11 aproch is different. The __atomic builtins
+ * The C11 approach is different. The __atomic builtins
* API takes a memory model parameter. Assuming that all
- * memory syncronizations using the involved atomic
+ * memory synchronizations using the involved atomic
* variables are made using this API, the synchronizations
* will adhere to the memory models used. That is, you do
* *not* know how loads and stores will be ordered around
@@ -69,7 +69,7 @@
* Why is this? Since all synchronizations is expected
* to be made using the __atomic builtins, memory
* barriers only have to be issued by some of them,
- * and you do not know which ones wont issue memory
+ * and you do not know which ones won't issue memory
* barriers.
*
* One can easily be fooled into believing that when
@@ -93,8 +93,8 @@
* __ATOMIC_ACQUIRE and __ATOMIC_RELEASE memory
* models. This since an __atomic builtin memory
* access using the __ATOMIC_ACQUIRE must at least
- * issue an aquire memory barrier and an __atomic
- * builtin memory acess with the __ATOMIC_RELEASE
+ * issue an acquire memory barrier and an __atomic
+ * builtin memory access with the __ATOMIC_RELEASE
* memory model must at least issue a release memory
* barrier. Otherwise the two cannot be paired.
* 4. All __atomic builtins accessing memory using the
@@ -240,7 +240,7 @@ ethr_full_fence__(void)
#endif
/*
- * Define ETHR_READ_DEPEND_MEMORY_BARRIER for all architechtures
+ * Define ETHR_READ_DEPEND_MEMORY_BARRIER for all architectures
* not known to order data dependent loads
*
* This is a bit too conservative, but better safe than sorry...
diff --git a/erts/include/internal/i386/ethr_dw_atomic.h b/erts/include/internal/i386/ethr_dw_atomic.h
index e3dbc82518..94e9bad2ff 100644
--- a/erts/include/internal/i386/ethr_dw_atomic.h
+++ b/erts/include/internal/i386/ethr_dw_atomic.h
@@ -67,7 +67,7 @@ typedef volatile ethr_native_sint128_t__ * ethr_native_dw_ptr_t;
* This code assumes 8 byte aligned memory in 64-bit mode, and 4 byte
* aligned memory in 32-bit mode. A malloc implementation that does
* not adhere to these alignment requirements is seriously broken,
- * and we wont bother trying to work around it.
+ * and we won't bother trying to work around it.
*
* Since memory alignment may be off by one word we need to align at
* runtime. We, therefore, need an extra word allocated.
@@ -159,7 +159,7 @@ ethr_native_dw_atomic_cmpxchg_mb(ethr_native_dw_atomic_t *var,
#if ETHR_NO_CLOBBER_EBX__ && ETHR_CMPXCHG8B_REGISTER_SHORTAGE
/*
- * gcc wont let us use ebx as input and we
+ * gcc won't let us use ebx as input and we
* get a register shortage
*/
@@ -176,7 +176,7 @@ ethr_native_dw_atomic_cmpxchg_mb(ethr_native_dw_atomic_t *var,
#elif ETHR_NO_CLOBBER_EBX__
/*
- * gcc wont let us use ebx as input
+ * gcc won't let us use ebx as input
*/
__asm__ __volatile__(
diff --git a/erts/include/internal/win/ethr_dw_atomic.h b/erts/include/internal/win/ethr_dw_atomic.h
index 03311f6025..d04d2e28ca 100644
--- a/erts/include/internal/win/ethr_dw_atomic.h
+++ b/erts/include/internal/win/ethr_dw_atomic.h
@@ -74,7 +74,7 @@ typedef volatile __int64 * ethr_native_dw_ptr_t;
* This code assumes 8 byte aligned memory in 64-bit mode, and 4 byte
* aligned memory in 32-bit mode. A malloc implementation that does
* not adhere to these alignment requirements is seriously broken,
- * and we wont bother trying to work around it.
+ * and we won't bother trying to work around it.
*
* Since memory alignment may be off by one word we need to align at
* runtime. We, therefore, need an extra word allocated.