@@ -612,3 +612,56 @@ static inline void _Py_atomic_fence_release(void);
612612#else
613613# error "long must be 4 or 8 bytes in size"
614614#endif // SIZEOF_LONG
615+
616+
617+ // --- _Py_atomic_memcpy / _Py_atomic_memmove ------------
618+
619+ static inline void *
620+ _Py_atomic_memcpy_ptr_store_relaxed (void * dest , void * src , size_t n )
621+ {
622+ assert (_Py_IS_ALIGNED (dest , sizeof (void * )));
623+ assert (_Py_IS_ALIGNED (src , sizeof (void * )));
624+ assert (n % sizeof (void * ) == 0 );
625+
626+ if (dest != src ) {
627+ void * * dest_ = (void * * )dest ;
628+ void * * src_ = (void * * )src ;
629+ void * * end = dest_ + n / sizeof (void * );
630+
631+ for (; dest_ != end ; dest_ ++ , src_ ++ ) {
632+ _Py_atomic_store_ptr_relaxed (dest_ , * src_ );
633+ }
634+ }
635+
636+ return dest ;
637+ }
638+
639+ static inline void *
640+ _Py_atomic_memmove_ptr_store_relaxed (void * dest , void * src , size_t n )
641+ {
642+ assert (_Py_IS_ALIGNED (dest , sizeof (void * )));
643+ assert (_Py_IS_ALIGNED (src , sizeof (void * )));
644+ assert (n % sizeof (void * ) == 0 );
645+
646+ if (dest < src || dest >= (void * )((char * )src + n )) {
647+ void * * dest_ = (void * * )dest ;
648+ void * * src_ = (void * * )src ;
649+ void * * end = dest_ + n / sizeof (void * );
650+
651+ for (; dest_ != end ; dest_ ++ , src_ ++ ) {
652+ _Py_atomic_store_ptr_relaxed (dest_ , * src_ );
653+ }
654+ }
655+ else if (dest > src ) {
656+ n = n / sizeof (void * ) - 1 ;
657+ void * * dest_ = (void * * )dest + n ;
658+ void * * src_ = (void * * )src + n ;
659+ void * * end = (void * * )dest - 1 ;
660+
661+ for (; dest_ != end ; dest_ -- , src_ -- ) {
662+ _Py_atomic_store_ptr_relaxed (dest_ , * src_ );
663+ }
664+ }
665+
666+ return dest ;
667+ }
0 commit comments