ThreadingUtil.cc 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include <sys/types.h>
  19. #include "ThreadingUtil.h"
  20. #include "LibCSymTable.h"
  21. #ifdef THREADED
  22. // ****************************************************************************
  23. // Mutex wrapper
  24. struct Mutex::Impl{
  25. Impl(){
  26. LIBC_SYMBOLS.pthread_mutex_init(&mut_, 0);
  27. }
  28. ~Impl(){
  29. LIBC_SYMBOLS.pthread_mutex_destroy(&mut_);
  30. }
  31. pthread_mutex_t mut_;
  32. };
  33. Mutex::Mutex():impl_(new Impl) {}
  34. Mutex::~Mutex() { delete impl_;}
  35. void Mutex::acquire() {
  36. LIBC_SYMBOLS.pthread_mutex_lock(&impl_->mut_);
  37. }
  38. void Mutex::release() {
  39. LIBC_SYMBOLS.pthread_mutex_unlock(&impl_->mut_);
  40. }
  41. // ****************************************************************************
  42. // Atomics
  43. int32_t atomic_post_incr(volatile int32_t* operand, int32_t incr)
  44. {
  45. #if defined(__GNUC__)
  46. return __sync_fetch_and_add(operand,incr);
  47. #else
  48. int32_t result;
  49. __asm__ __volatile__(
  50. "lock xaddl %0,%1\n"
  51. : "=r"(result), "=m"(*operand)
  52. : "0"(incr)
  53. : "memory");
  54. return result;
  55. #endif
  56. }
  57. int32_t atomic_fetch_store(volatile int32_t *ptr, int32_t value)
  58. {
  59. #if defined(__GNUC__)
  60. return __sync_lock_test_and_set(ptr,value);
  61. #else
  62. int32_t result;
  63. __asm__ __volatile__("lock xchgl %0,%1\n"
  64. : "=r"(result), "=m"(*ptr)
  65. : "0"(value)
  66. : "memory");
  67. return result;
  68. #endif
  69. }
  70. #else
  71. int32_t atomic_post_incr(volatile int32_t* operand, int32_t incr){
  72. int32_t v=*operand;
  73. *operand+=incr;
  74. return v;
  75. }
  76. int32_t atomic_fetch_store(volatile int32_t *ptr, int32_t value)
  77. {
  78. int32_t result=*ptr;
  79. *ptr=value;
  80. return result;
  81. }
  82. #endif // THREADED