gwnavruntime/kernel/SF_Atomic.h Source File

SF_Atomic.h
Go to the documentation of this file.
1 /*
2 * Copyright 2015 Autodesk, Inc. All rights reserved.
3 * Use of this software is subject to the terms of the Autodesk license agreement and any attachments or Appendices thereto provided at the time of installation or download,
4 * or which otherwise accompanies this software in either electronic or hard copy form, or which is signed by you and accepted by Autodesk.
5 */
6 
7 /**************************************************************************
8 
9 PublicHeader: None
10 Filename : KY_Atomic.h
11 Content : Contains atomic operations and inline fastest locking
12  functionality. Will contain #ifdefs for OS efficiency.
13  Have non-thread-safe implementaion if not available.
14 Created : May 5, 2003
15 Authors : Michael Antonov, Andrew Reisse
16 
17 **************************************************************************/
18 
19 #ifndef INC_KY_Kernel_Atomic_H
20 #define INC_KY_Kernel_Atomic_H
21 
23 
24 // Include System thread functionality.
25 #if defined(KY_OS_WIN32)
26 #include <windows.h>
27 #elif defined(KY_OS_XBOX360)
28 #include <xtl.h>
29 
30 #elif defined(KY_OS_PS3)
31 #include <sys/synchronization.h>
32 #include <pthread.h>
33 
34 #elif defined(KY_OS_WII)
35 #include <revolution/os.h>
36 
37 #elif defined(KY_OS_PSVITA)
38 #include <kernel.h>
39 #include <string.h>
40 
41 #elif defined(KY_OS_3DS)
42 #include <nn/os/os_CriticalSection.h>
43 
44 #elif defined(KY_OS_WIIU)
45 
46 #else
47 #include <pthread.h>
48 #endif
49 
50 
51 namespace Kaim {
52 
53 // ****** Declared classes
54 
55 // If there is NO thread support we implement AtomicOps and
56 // Lock objects as no-ops. The other classes are not defined.
57 template<class C> class AtomicOps;
58 template<class T> class AtomicInt;
59 template<class T> class AtomicPtr;
60 
61 class Lock;
62 
63 
64 // ***** AtomicOps
65 
66 // Atomic operations are provided by the AtomicOps templates class,
67 // implemented through system-specific AtomicOpsRaw specializations.
68 // It provides several fundamental operations such as Exchange, ExchangeAdd
69 // CompareAndSet, and Store_Release. Each function includes several memory
70 // synchronization versions, important for multiprocessing CPUs with weak
71 // memory consistency. The following memory fencing strategies are supported:
72 //
73 // - NoSync. No memory synchronization is done for atomic op.
74 // - Release. All other memory writes are completed before atomic op
75 // writes its results.
76 // - Acquire. Further memory reads are forced to wait until atomic op
77 // executes, guaranteeing that the right values will be seen.
78 // - Sync. A combination of Release and Acquire.
79 
80 
81 // *** AtomicOpsRaw
82 
83 // AtomicOpsRaw is a specialized template that provides atomic operations
84 // used by AtomicOps. This class has two fundamental qualities: (1) it
85 // defines a type T of correct size, and (2) provides operations that work
86 // atomically, such as Exchange_Sync and CompareAndSet_Release.
87 
88 // AtomicOpsRawBase class contains shared constants/classes for AtomicOpsRaw.
89 // The primary thing is does is define sync class objects, whose destructor and
90 // constructor provide places to insert appropriate synchronization calls, on
91 // systems where such calls are necessary. So far, the breakdown is as follows:
92 //
93 // - X86 systems don't need custom syncs, since their exchange/atomic
94 // instructions are implicitly synchronized.
95 // - PowerPC requires lwsync/isync instructions that can use this mechanism.
96 // - If some other systems require a mechanism where syncing type is associated
97 // with a particular instruction, the default implementation (which implements
98 // all Sync, Acquire, and Release modes in terms of NoSync and fence) may not
99 // work. Ii that case it will need to be #ifdef-ed conditionally.
100 
101 struct AtomicOpsRawBase
102 {
103 #if !defined(KY_ENABLE_THREADS) || defined(KY_CPU_X86) || defined(KY_OS_WIN32) || defined(KY_OS_XBOX) || defined(KY_OS_IPHONE) || defined(KY_OS_3DS)
104  // Need to have empty constructor to avoid class 'unused' variable warning.
105  struct FullSync { inline FullSync() { } };
106  struct AcquireSync { inline AcquireSync() { } };
107  struct ReleaseSync { inline ReleaseSync() { } };
108 
109 #elif defined(KY_OS_XBOX360)
110  // XBox360 CPU implements weak memory ordering and it thus needs to be synced. Unlike Win32
111  // where synchronizing is implied with interlocked operations, on 360 it needs to be done explicitly.
112  // For some reason 360 documentation recommends the use of '__lwsync()' for both acquire and
113  // release semantics, although technically PowerPC should use 'isync' for acquire. XBox360
114  // docs never mentions 'isync'. This should be investigated in more detail.
115  struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { __lwsync(); } };
116  struct ReleaseSync { inline ReleaseSync() { __lwsync(); } ~ReleaseSync() { } };
117  // TBD: Can we not have two sides?
118  struct FullSync { inline FullSync() { __lwsync(); } ~FullSync() { __lwsync(); } };
119 
120 #elif defined(KY_CPU_PPC64) || defined(KY_CPU_PPC)
121  struct FullSync { inline FullSync() { asm volatile("sync\n"); } ~FullSync() { asm volatile("isync\n"); } };
122  struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { asm volatile("isync\n"); } };
123  struct ReleaseSync { inline ReleaseSync() { asm volatile("sync\n"); } };
124 
125 #elif defined(KY_CPU_MIPS)
126  struct FullSync { inline FullSync() { asm volatile("sync\n"); } ~FullSync() { asm volatile("sync\n"); } };
127  struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { asm volatile("sync\n"); } };
128  struct ReleaseSync { inline ReleaseSync() { asm volatile("sync\n"); } };
129 
130 #elif defined(KY_CPU_ARM)
131  struct FullSync { inline FullSync() { asm volatile("dmb\n"); } ~FullSync() { asm volatile("dmb\n"); } };
132  struct AcquireSync { inline AcquireSync() { } ~AcquireSync() { asm volatile("dmb\n"); } };
133  struct ReleaseSync { inline ReleaseSync() { asm volatile("dmb\n"); } };
134 
135 
136 #elif (defined(KY_CC_GNU) && (__GNUC__ >= 4)) || defined(KY_CC_CLANG)
137  // __sync functions are already full sync
138  struct FullSync { inline FullSync() { } };
139  struct AcquireSync { inline AcquireSync() { } };
140  struct ReleaseSync { inline ReleaseSync() { } };
141 #endif
142 };
143 
144 
145 // 4-Byte raw data atomic op implementation class.
146 struct AtomicOpsRaw_4ByteImpl : public AtomicOpsRawBase
147 {
148 #if !defined(KY_ENABLE_THREADS)
149 
150  // Provide a type for no-thread-support cases. Used by AtomicOpsRaw_DefImpl.
151  typedef UInt32 T;
152 
153  // *** Thread - Safe Atomic Versions.
154 
155 #elif defined(KY_OS_WIN32) || defined(KY_OS_WINCE) || defined(KY_OS_XBOX) || defined(KY_OS_XBOX360)
156 
157  // Use special defined for VC6, where volatile is not used and
158  // InterlockedCompareExchange is declared incorrectly.
159  typedef LONG T;
160 #if defined(KY_CC_MSVC) && (KY_CC_MSVC < 1300)
161  typedef T* InterlockTPtr;
162  typedef LPVOID ET;
163  typedef ET* InterlockETPtr;
164 #else
165  typedef volatile T* InterlockTPtr;
166  typedef T ET;
167  typedef InterlockTPtr InterlockETPtr;
168 #endif
169  inline static T Exchange_NoSync(volatile T* p, T val) { return InterlockedExchange((InterlockTPtr)p, val); }
170  inline static T ExchangeAdd_NoSync(volatile T* p, T val) { return InterlockedExchangeAdd((InterlockTPtr)p, val); }
171  inline static bool CompareAndSet_NoSync(volatile T* p, T c, T val) { return InterlockedCompareExchange((InterlockETPtr)p, (ET)val, (ET)c) == (ET)c; }
172 
173 #elif defined(KY_CPU_PPC64) || defined(KY_CPU_PPC)
174  typedef UInt32 T;
175 #ifdef KY_CC_MWERKS
176  static inline UInt32 Exchange_NoSync(volatile register UInt32 *i, register UInt32 j)
177  {
178  register UInt32 ret;
179  asm {
180  @L1: lwarx ret,0,i
181  stwcx. j,0,i
182  bne- @L1
183  };
184 
185  return ret;
186  }
187 
188  static inline UInt32 ExchangeAdd_NoSync(volatile register UInt32 *i, register UInt32 j)
189  {
190  register UInt32 tmp, ret;
191 
192  asm {
193  @L1: lwarx ret,0,i
194  add tmp,ret,j
195  stwcx. tmp,0,i
196  bne- @L1
197  };
198 
199  return ret;
200  }
201 
202  static inline bool CompareAndSet_NoSync(volatile register UInt32 *i, register UInt32 c, register UInt32 value)
203  {
204  register UInt32 ret;
205 
206  asm {
207  @L1: lwarx ret,0,i
208  cmpw 0,ret,c
209  mfcr ret
210  bne- @L2
211  stwcx. value,0,i
212  bne- @L1
213  @L2:
214  };
215 
216  return (ret & 0x20000000) ? 1 : 0;
217  }
218 #elif defined(KY_CC_SNC)
219  static inline UInt32 Exchange_NoSync(volatile register UInt32 *i, register UInt32 j)
220  {
221  return __builtin_cellAtomicStore32((unsigned int *) i, j);
222  }
223 
224  static inline UInt32 ExchangeAdd_NoSync(volatile register UInt32 *i, register UInt32 j)
225  {
226  return __builtin_cellAtomicAdd32((unsigned int *) i, j);
227  }
228 
229  static inline bool CompareAndSet_NoSync(volatile register UInt32 *i, register UInt32 c, register UInt32 value)
230  {
231  return (c == __builtin_cellAtomicCompareAndSwap32((unsigned int *) i, c, value));
232  }
233 
234 #else
235  static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
236  {
237  UInt32 ret;
238 
239  asm volatile("1:\n\t"
240  "lwarx %[r],0,%[i]\n\t"
241  "stwcx. %[j],0,%[i]\n\t"
242  "bne- 1b\n"
243  : "+m" (*i), [r] "=&b" (ret) : [i] "b" (i), [j] "b" (j) : "cc", "memory");
244 
245  return ret;
246  }
247 
248  static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
249  {
250  UInt32 dummy, ret;
251 
252  asm volatile("1:\n\t"
253  "lwarx %[r],0,%[i]\n\t"
254  "add %[o],%[r],%[j]\n\t"
255  "stwcx. %[o],0,%[i]\n\t"
256  "bne- 1b\n"
257  : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [j] "b" (j) : "cc", "memory");
258 
259  return ret;
260  }
261 
262  static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
263  {
264  UInt32 ret;
265 
266  asm volatile("1:\n\t"
267  "lwarx %[r],0,%[i]\n\t"
268  "cmpw 0,%[r],%[cmp]\n\t"
269  "mfcr %[r]\n\t"
270  "bne- 2f\n\t"
271  "stwcx. %[val],0,%[i]\n\t"
272  "bne- 1b\n\t"
273  "2:\n"
274  : "+m" (*i), [r] "=&b" (ret) : [i] "b" (i), [cmp] "b" (c), [val] "b" (value) : "cc", "memory");
275 
276  return (ret & 0x20000000) ? 1 : 0;
277  }
278 
279 #endif
280 #elif defined(KY_CPU_MIPS)
281  typedef UInt32 T;
282 
283 #ifdef KY_CC_SNC
284  static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
285  {
286  UInt32 ret;
287 
288  asm volatile("1:\n\t"
289  "ll ret,0(i)\n\t"
290  "move $t4,j\n\t"
291  "sc $t4,0(i)\n\t"
292  "beq $t4,$0,1b\n\t"
293  "nop \n\t");
294 
295  return ret;
296  }
297 
298  static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
299  {
300  UInt32 dummy, ret;
301 
302  asm volatile("1:\n\t"
303  "ll ret,0(i)\n\t"
304  "addu $t4,ret,j\n\t"
305  "sc $t4,0(i)\n\t"
306  "beq $t4,$0,1b\n\t"
307  "nop \n\t");
308 
309  return ret;
310  }
311 
312  static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
313  {
314  UInt32 ret, dummy;
315 
316  asm volatile("1:\n\t"
317  "move ret,$0\n\t"
318  "ll $t4,0(i)\n\t"
319  "bne $t4,c,2f\n\t"
320  "move $t4,value\n\t"
321  "sc $t4,0(i)\n\t"
322  "beq $t4,$0,1b\n\t"
323  "nop \n\t"
324  "2:\n"
325  "move ret,$t4\n\t");
326 
327  return ret;
328  }
329 
330 #else
331  static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
332  {
333  UInt32 ret;
334 
335  asm volatile("1:\n\t"
336  "ll %[r],0(%[i])\n\t"
337  "sc %[j],0(%[i])\n\t"
338  "beq %[j],$0,1b\n\t"
339  "nop \n"
340  : "+m" (*i), [r] "=&d" (ret) : [i] "d" (i), [j] "d" (j) : "cc", "memory");
341 
342  return ret;
343  }
344 
345  static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
346  {
347  UInt32 ret;
348 
349  asm volatile("1:\n\t"
350  "ll %[r],0(%[i])\n\t"
351  "addu %[j],%[r],%[j]\n\t"
352  "sc %[j],0(%[i])\n\t"
353  "beq %[j],$0,1b\n\t"
354  "nop \n"
355  : "+m" (*i), [r] "=&d" (ret) : [i] "d" (i), [j] "d" (j) : "cc", "memory");
356 
357  return ret;
358  }
359 
360  static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
361  {
362  UInt32 ret, dummy;
363 
364  asm volatile("1:\n\t"
365  "move %[r],$0\n\t"
366  "ll %[o],0(%[i])\n\t"
367  "bne %[o],%[c],2f\n\t"
368  "move %[r],%[v]\n\t"
369  "sc %[r],0(%[i])\n\t"
370  "beq %[r],$0,1b\n\t"
371  "nop \n\t"
372  "2:\n"
373  : "+m" (*i),[r] "=&d" (ret), [o] "=&d" (dummy) : [i] "d" (i), [c] "d" (c), [v] "d" (value)
374  : "cc", "memory");
375 
376  return ret;
377  }
378 #endif
379 
380 #elif defined(KY_CPU_ARM) && (defined(KY_CC_ARM) || defined(KY_CC_SNC))
381  typedef UInt32 T;
382 
383  static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
384  {
385  for(;;)
386  {
387  T r = __ldrex(i);
388  if (__strex(j, i) == 0)
389  return r;
390  }
391  }
392  static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
393  {
394  for(;;)
395  {
396  T r = __ldrex(i);
397  if (__strex(r + j, i) == 0)
398  return r;
399  }
400  }
401 
402  static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
403  {
404  for(;;)
405  {
406  T r = __ldrex(i);
407  if (r != c)
408  return 0;
409  if (__strex(value, i) == 0)
410  return 1;
411  }
412  }
413 
414 #elif defined(KY_CPU_ARM)
415  typedef UInt32 T;
416 
417  static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
418  {
419  UInt32 ret, dummy;
420 
421  asm volatile("1:\n\t"
422  "ldrex %[r],[%[i]]\n\t"
423  "strex %[t],%[j],[%[i]]\n\t"
424  "cmp %[t],#0\n\t"
425  "bne 1b\n\t"
426  : "+m" (*i), [r] "=&r" (ret), [t] "=&r" (dummy) : [i] "r" (i), [j] "r" (j) : "cc", "memory");
427 
428  return ret;
429  }
430 
431  static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
432  {
433  UInt32 ret, dummy, test;
434 
435  asm volatile("1:\n\t"
436  "ldrex %[r],[%[i]]\n\t"
437  "add %[o],%[r],%[j]\n\t"
438  "strex %[t],%[o],[%[i]]\n\t"
439  "cmp %[t],#0\n\t"
440  "bne 1b\n\t"
441  : "+m" (*i), [r] "=&r" (ret), [o] "=&r" (dummy), [t] "=&r" (test) : [i] "r" (i), [j] "r" (j) : "cc", "memory");
442 
443  return ret;
444  }
445 
446  static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
447  {
448  UInt32 ret = 1, dummy, test;
449 
450  asm volatile("1:\n\t"
451  "ldrex %[o],[%[i]]\n\t"
452  "cmp %[o],%[c]\n\t"
453  "bne 2f\n\t"
454  "strex %[r],%[v],[%[i]]\n\t"
455  "cmp %[r],#0\n\t"
456  "bne 1b\n\t"
457  "2:\n"
458  : "+m" (*i),[r] "=&r" (ret), [o] "=&r" (dummy), [t] "=&r" (test) : [i] "r" (i), [c] "r" (c), [v] "r" (value)
459  : "cc", "memory");
460 
461  return !ret;
462  }
463 
464 #elif defined(KY_CPU_X86)
465  typedef UInt32 T;
466 
467  static inline UInt32 Exchange_NoSync(volatile UInt32 *i, UInt32 j)
468  {
469  asm volatile("xchgl %1,%[i]\n"
470  : "+m" (*i), "=q" (j) : [i] "m" (*i), "1" (j) : "cc", "memory");
471 
472  return j;
473  }
474 
475  static inline UInt32 ExchangeAdd_NoSync(volatile UInt32 *i, UInt32 j)
476  {
477  asm volatile("lock; xaddl %1,%[i]\n"
478  : "+m" (*i), "+q" (j) : [i] "m" (*i) : "cc", "memory");
479 
480  return j;
481  }
482 
483  static inline bool CompareAndSet_NoSync(volatile UInt32 *i, UInt32 c, UInt32 value)
484  {
485  UInt32 ret;
486 
487  asm volatile("lock; cmpxchgl %[v],%[i]\n"
488  : "+m" (*i), "=a" (ret) : [i] "m" (*i), "1" (c), [v] "q" (value) : "cc", "memory");
489 
490  return (ret == c);
491  }
492 
493 #elif (defined(KY_CC_GNU) && (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1)) || defined(KY_CC_CLANG)
494 
495  typedef UInt32 T;
496 
497  static inline T Exchange_NoSync(volatile T *i, T j)
498  {
499  T v;
500  do {
501  v = *i;
502  } while (!__sync_bool_compare_and_swap(i, v, j));
503  return v;
504  }
505 
506  static inline T ExchangeAdd_NoSync(volatile T *i, T j)
507  {
508  return __sync_fetch_and_add(i, j);
509  }
510 
511  static inline bool CompareAndSet_NoSync(volatile T *i, T c, T value)
512  {
513  return __sync_bool_compare_and_swap(i, c, value);
514  }
515 
516 #endif // OS
517 };
518 
519 
520 // 8-Byte raw data data atomic op implementation class.
521 // Currently implementation is provided only on systems with 64-bit pointers.
522 struct AtomicOpsRaw_8ByteImpl : public AtomicOpsRawBase
523 {
524 #if !defined(KY_64BIT_POINTERS) || !defined(KY_ENABLE_THREADS)
525 
526  // Provide a type for no-thread-support cases. Used by AtomicOpsRaw_DefImpl.
527  typedef UInt64 T;
528 
529  // *** Thread - Safe OS specific versions.
530 #elif defined(KY_OS_WIN32)
531 
532  // This is only for 64-bit systems.
533  typedef LONG64 T;
534  typedef volatile T* InterlockTPtr;
535  inline static T Exchange_NoSync(volatile T* p, T val) { return InterlockedExchange64((InterlockTPtr)p, val); }
536  inline static T ExchangeAdd_NoSync(volatile T* p, T val) { return InterlockedExchangeAdd64((InterlockTPtr)p, val); }
537  inline static bool CompareAndSet_NoSync(volatile T* p, T c, T val) { return InterlockedCompareExchange64((InterlockTPtr)p, val, c) == c; }
538 
539 #elif defined(KY_CPU_PPC64)
540 
541  typedef UInt64 T;
542 
543  static inline UInt64 Exchange_NoSync(volatile UInt64 *i, UInt64 j)
544  {
545  UInt64 dummy, ret;
546 
547  asm volatile("1:\n\t"
548  "ldarx %[r],0,%[i]\n\t"
549  "mr %[o],%[j]\n\t"
550  "stdcx. %[o],0,%[i]\n\t"
551  "bne- 1b\n"
552  : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [j] "b" (j) : "cc");
553 
554  return ret;
555  }
556 
557  static inline UInt64 ExchangeAdd_NoSync(volatile UInt64 *i, UInt64 j)
558  {
559  UInt64 dummy, ret;
560 
561  asm volatile("1:\n\t"
562  "ldarx %[r],0,%[i]\n\t"
563  "add %[o],%[r],%[j]\n\t"
564  "stdcx. %[o],0,%[i]\n\t"
565  "bne- 1b\n"
566  : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [j] "b" (j) : "cc");
567 
568  return ret;
569  }
570 
571  static inline bool CompareAndSet_NoSync(volatile UInt64 *i, UInt64 c, UInt64 value)
572  {
573  UInt64 ret, dummy;
574 
575  asm volatile("1:\n\t"
576  "ldarx %[r],0,%[i]\n\t"
577  "cmpw 0,%[r],%[cmp]\n\t"
578  "mfcr %[r]\n\t"
579  "bne- 2f\n\t"
580  "stdcx. %[val],0,%[i]\n\t"
581  "bne- 1b\n\t"
582  "2:\n"
583  : "+m" (*i), [r] "=&b" (ret), [o] "=&r" (dummy) : [i] "b" (i), [cmp] "b" (c), [val] "b" (value) : "cc");
584 
585  return (ret & 0x20000000) ? 1 : 0;
586  }
587 
588 #elif (defined(KY_CC_GNU) && (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1)) || defined(KY_CC_CLANG)
589 
590  typedef UInt64 T;
591 
592  static inline T Exchange_NoSync(volatile T *i, T j)
593  {
594  T v;
595  do {
596  v = *i;
597  } while (!__sync_bool_compare_and_swap(i, v, j));
598  return v;
599  }
600 
601  static inline T ExchangeAdd_NoSync(volatile T *i, T j)
602  {
603  return __sync_fetch_and_add(i, j);
604  }
605 
606  static inline bool CompareAndSet_NoSync(volatile T *i, T c, T value)
607  {
608  return __sync_bool_compare_and_swap(i, c, value);
609  }
610 
611 #endif // OS
612 };
613 
614 
615 // Default implementation for AtomicOpsRaw; provides implementation of mem-fenced
616 // atomic operations where fencing is done with a sync object wrapped around a NoSync
617 // operation implemented in the base class. If such implementation is not possible
618 // on a given platform, #ifdefs can be used to disable it and then op functions can be
619 // implemented individually in the appropriate AtomicOpsRaw<size> class.
620 
621 template<class O>
622 struct AtomicOpsRaw_DefImpl : public O
623 {
624  typedef typename O::T O_T;
625  typedef typename O::FullSync O_FullSync;
626  typedef typename O::AcquireSync O_AcquireSync;
627  typedef typename O::ReleaseSync O_ReleaseSync;
628 
629  // If there is no thread support, provide the default implementation. In this case,
630  // the base class (0) must still provide the T declaration.
631 #ifndef KY_ENABLE_THREADS
632 
633  // Atomic exchange of val with argument. Returns old val.
634  inline static O_T Exchange_NoSync(volatile O_T* p, O_T val) { O_T old = *p; *p = val; return old; }
635  // Adds a new val to argument; returns its old val.
636  inline static O_T ExchangeAdd_NoSync(volatile O_T* p, O_T val) { O_T old = *p; *p += val; return old; }
637  // Compares the argument data with 'c' val.
638  // If succeeded, stores val int '*p' and returns true; otherwise returns false.
639  inline static bool CompareAndSet_NoSync(volatile O_T* p, O_T c, O_T val) { if (*p==c) { *p = val; return 1; } return 0; }
640 
641 #endif
642 
643  // If NoSync wrapped implementation may not be possible, it this block should be
644  // replaced with per-function implementation in O.
645  // !AB: if Wii compiler (CW 4.3 145) is used with option "-iso_templates on" when we need to use
646  // "AtomicOpsRaw_DefImpl<O>::" prefix in calls below.
647  inline static O_T Exchange_Sync(volatile O_T* p, O_T val) { O_FullSync sync; KY_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::Exchange_NoSync(p, val); }
648  inline static O_T Exchange_Release(volatile O_T* p, O_T val) { O_ReleaseSync sync; KY_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::Exchange_NoSync(p, val); }
649  inline static O_T Exchange_Acquire(volatile O_T* p, O_T val) { O_AcquireSync sync; KY_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::Exchange_NoSync(p, val); }
650  inline static O_T ExchangeAdd_Sync(volatile O_T* p, O_T val) { O_FullSync sync; KY_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::ExchangeAdd_NoSync(p, val); }
651  inline static O_T ExchangeAdd_Release(volatile O_T* p, O_T val) { O_ReleaseSync sync; KY_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::ExchangeAdd_NoSync(p, val); }
652  inline static O_T ExchangeAdd_Acquire(volatile O_T* p, O_T val) { O_AcquireSync sync; KY_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::ExchangeAdd_NoSync(p, val); }
653  inline static bool CompareAndSet_Sync(volatile O_T* p, O_T c, O_T val) { O_FullSync sync; KY_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::CompareAndSet_NoSync(p,c,val); }
654  inline static bool CompareAndSet_Release(volatile O_T* p, O_T c, O_T val) { O_ReleaseSync sync; KY_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::CompareAndSet_NoSync(p,c,val); }
655  inline static bool CompareAndSet_Acquire(volatile O_T* p, O_T c, O_T val) { O_AcquireSync sync; KY_UNUSED(sync); return AtomicOpsRaw_DefImpl<O>::CompareAndSet_NoSync(p,c,val); }
656 
657  // Loads and stores with memory fence. These have only the relevant versions.
658 #ifdef KY_CPU_X86
659  // On X86, Store_Release is implemented as exchange. Note that we can also
660  // consider 'sfence' in the future, although it is not as compatible with older CPUs.
661  inline static void Store_Release(volatile O_T* p, O_T val) { Exchange_Release(p, val); }
662 #else
663  inline static void Store_Release(volatile O_T* p, O_T val) { O_ReleaseSync sync; KY_UNUSED(sync); *p = val; }
664 #endif
665  inline static O_T Load_Acquire(const volatile O_T* p) { O_AcquireSync sync; KY_UNUSED(sync); return *p; }
666 };
667 
668 
669 template<int size>
670 struct AtomicOpsRaw : public AtomicOpsRawBase { };
671 
672 template<>
673 struct AtomicOpsRaw<4> : public AtomicOpsRaw_DefImpl<AtomicOpsRaw_4ByteImpl>
674 {
675  // Ensure that assigned type size is correct.
676  AtomicOpsRaw()
677  { KY_COMPILER_ASSERT(sizeof(AtomicOpsRaw_DefImpl<AtomicOpsRaw_4ByteImpl>::T) == 4); }
678 };
679 template<>
680 struct AtomicOpsRaw<8> : public AtomicOpsRaw_DefImpl<AtomicOpsRaw_8ByteImpl>
681 {
682  AtomicOpsRaw()
683  { KY_COMPILER_ASSERT(sizeof(AtomicOpsRaw_DefImpl<AtomicOpsRaw_8ByteImpl>::T) == 8); }
684 };
685 
686 
687 // *** AtomicOps - implementation of atomic Ops for specified class
688 
689 // Implements atomic ops on a class, provided that the object is either
690 // 4 or 8 bytes in size (depending on the AtomicOpsRaw specializations
691 // available). Relies on AtomicOpsRaw for much of implementation.
692 
693 template<class C>
694 class AtomicOps
695 {
696  typedef AtomicOpsRaw<sizeof(C)> Ops;
697  typedef typename Ops::T T;
698  typedef volatile typename Ops::T* PT;
699  // We cast through unions to (1) avoid pointer size compiler warnings
700  // and (2) ensure that there are no problems with strict pointer aliasing.
701  union C2T_union { C c; T t; };
702 
703 public:
704  // General purpose implementation for standard syncs.
705  inline static C Exchange_Sync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_Sync((PT)p, u.t); return u.c; }
706  inline static C Exchange_Release(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_Release((PT)p, u.t); return u.c; }
707  inline static C Exchange_Acquire(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_Acquire((PT)p, u.t); return u.c; }
708  inline static C Exchange_NoSync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::Exchange_NoSync((PT)p, u.t); return u.c; }
709  inline static C ExchangeAdd_Sync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_Sync((PT)p, u.t); return u.c; }
710  inline static C ExchangeAdd_Release(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_Release((PT)p, u.t); return u.c; }
711  inline static C ExchangeAdd_Acquire(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_Acquire((PT)p, u.t); return u.c; }
712  inline static C ExchangeAdd_NoSync(volatile C* p, C val) { C2T_union u; u.c = val; u.t = Ops::ExchangeAdd_NoSync((PT)p, u.t); return u.c; }
713  inline static bool CompareAndSet_Sync(volatile C* p, C c, C val) { C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_Sync((PT)p, cu.t, u.t); }
714  inline static bool CompareAndSet_Release(volatile C* p, C c, C val){ C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_Release((PT)p, cu.t, u.t); }
715  inline static bool CompareAndSet_Acquire(volatile C* p, C c, C val){ C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_Acquire((PT)p, cu.t, u.t); }
716  inline static bool CompareAndSet_NoSync(volatile C* p, C c, C val) { C2T_union u,cu; u.c = val; cu.c = c; return Ops::CompareAndSet_NoSync((PT)p, cu.t, u.t); }
717  // Loads and stores with memory fence. These have only the relevant versions.
718  inline static void Store_Release(volatile C* p, C val) { C2T_union u; u.c = val; Ops::Store_Release((PT)p, u.t); }
719  inline static C Load_Acquire(const volatile C* p) { C2T_union u; u.t = Ops::Load_Acquire((PT)p); return u.c; }
720 };
721 
722 
723 
724 // Atomic value base class - implements operations shared for integers and pointers.
725 template<class T>
726 class AtomicValueBase
727 {
728 protected:
729  typedef AtomicOps<T> Ops;
730 public:
731 
732  volatile T Value;
733 
734  inline AtomicValueBase() { }
735  explicit inline AtomicValueBase(T val) { Ops::Store_Release(&Value, val); }
736 
737  // Most libraries (TBB and Joshua Scholar's) library do not do Load_Acquire
738  // here, since most algorithms do not require atomic loads. Needs some research.
739  inline operator T() const { return Value; }
740 
741  // *** Standard Atomic inlines
742  inline T Exchange_Sync(T val) { return Ops::Exchange_Sync(&Value, val); }
743  inline T Exchange_Release(T val) { return Ops::Exchange_Release(&Value, val); }
744  inline T Exchange_Acquire(T val) { return Ops::Exchange_Acquire(&Value, val); }
745  inline T Exchange_NoSync(T val) { return Ops::Exchange_NoSync(&Value, val); }
746  inline bool CompareAndSet_Sync(T c, T val) { return Ops::CompareAndSet_Sync(&Value, c, val); }
747  inline bool CompareAndSet_Release(T c, T val) { return Ops::CompareAndSet_Release(&Value, c, val); }
748  inline bool CompareAndSet_Acquire(T c, T val) { return Ops::CompareAndSet_Acquire(&Value, c, val); }
749  inline bool CompareAndSet_NoSync(T c, T val) { return Ops::CompareAndSet_NoSync(&Value, c, val); }
750  // Load & Store.
751  inline void Store_Release(T val) { Ops::Store_Release(&Value, val); }
752  inline T Load_Acquire() const { return Ops::Load_Acquire(&Value); }
753 };
754 
755 
756 // ***** AtomicPtr - Atomic pointer template
757 
758 // This pointer class supports atomic assignments with release,
759 // increment / decrement operations, and conditional compare + set.
760 
761 template<class T>
762 class AtomicPtr : public AtomicValueBase<T*>
763 {
764  typedef typename AtomicValueBase<T*>::Ops Ops;
765 
766 public:
767  // Initialize pointer value to 0 by default; use Store_Release only with explicit constructor.
768  inline AtomicPtr() : AtomicValueBase<T*>() { this->Value = 0; }
769  explicit inline AtomicPtr(T* val) : AtomicValueBase<T*>(val) { }
770 
771  // Pointer access.
772  inline T* operator -> () const { return this->Load_Acquire(); }
773 
774  // It looks like it is convenient to have Load_Acquire characteristics
775  // for this, since that is convenient for algorithms such as linked
776  // list traversals that can be added to bu another thread.
777  inline operator T* () const { return this->Load_Acquire(); }
778 
779 
780  // *** Standard Atomic inlines (applicable to pointers)
781 
782  // ExhangeAdd considers pointer size for pointers.
783  template<class I>
784  inline T* ExchangeAdd_Sync(I incr) { return Ops::ExchangeAdd_Sync(&this->Value, ((T*)0) + incr); }
785  template<class I>
786  inline T* ExchangeAdd_Release(I incr) { return Ops::ExchangeAdd_Release(&this->Value, ((T*)0) + incr); }
787  template<class I>
788  inline T* ExchangeAdd_Acquire(I incr) { return Ops::ExchangeAdd_Acquire(&this->Value, ((T*)0) + incr); }
789  template<class I>
790  inline T* ExchangeAdd_NoSync(I incr) { return Ops::ExchangeAdd_NoSync(&this->Value, ((T*)0) + incr); }
791 
792  // *** Atomic Operators
793 
794  inline T* operator = (T* val) { this->Store_Release(val); return val; }
795 
796  template<class I>
797  inline T* operator += (I val) { return ExchangeAdd_Sync(val) + val; }
798  template<class I>
799  inline T* operator -= (I val) { return operator += (-val); }
800 
801  inline T* operator ++ () { return ExchangeAdd_Sync(1) + 1; }
802  inline T* operator -- () { return ExchangeAdd_Sync(-1) - 1; }
803  inline T* operator ++ (int) { return ExchangeAdd_Sync(1); }
804  inline T* operator -- (int) { return ExchangeAdd_Sync(-1); }
805 };
806 
807 
808 // ***** AtomicInt - Atomic integer template
809 
810 // Implements an atomic integer type; the exact type to use is provided
811 // as an argument. Supports atomic Acquire / Release semantics, atomic
812 // arithmetic operations, and atomic conditional compare + set.
813 
814 template<class T>
815 class AtomicInt : public AtomicValueBase<T>
816 {
817  typedef typename AtomicValueBase<T>::Ops Ops;
818 
819 public:
820  inline AtomicInt() : AtomicValueBase<T>() { }
821  explicit inline AtomicInt(T val) : AtomicValueBase<T>(val) { }
822 
823 
824  // *** Standard Atomic inlines (applicable to int)
825  inline T ExchangeAdd_Sync(T val) { return Ops::ExchangeAdd_Sync(&this->Value, val); }
826  inline T ExchangeAdd_Release(T val) { return Ops::ExchangeAdd_Release(&this->Value, val); }
827  inline T ExchangeAdd_Acquire(T val) { return Ops::ExchangeAdd_Acquire(&this->Value, val); }
828  inline T ExchangeAdd_NoSync(T val) { return Ops::ExchangeAdd_NoSync(&this->Value, val); }
829  // These increments could be more efficient because they don't return a value.
830  inline void Increment_Sync() { ExchangeAdd_Sync((T)1); }
831  inline void Increment_Release() { ExchangeAdd_Release((T)1); }
832  inline void Increment_Acquire() { ExchangeAdd_Acquire((T)1); }
833  inline void Increment_NoSync() { ExchangeAdd_NoSync((T)1); }
834 
835  // *** Atomic Operators
836 
837  inline T operator = (T val) { this->Store_Release(val); return val; }
838  inline T operator += (T val) { return ExchangeAdd_Sync(val) + val; }
839  inline T operator -= (T val) { return ExchangeAdd_Sync(0 - val) - val; }
840 
841  inline T operator ++ () { return ExchangeAdd_Sync((T)1) + 1; }
842  inline T operator -- () { return ExchangeAdd_Sync(((T)0)-1) - 1; }
843  inline T operator ++ (int) { return ExchangeAdd_Sync((T)1); }
844  inline T operator -- (int) { return ExchangeAdd_Sync(((T)0)-1); }
845 
846  // More complex atomic operations. Leave it to compiler whether to optimize them or not.
847  T operator &= (T arg)
848  {
849  T comp, newVal;
850  do {
851  comp = this->Value;
852  newVal = comp & arg;
853  } while(!this->CompareAndSet_Sync(comp, newVal));
854  return newVal;
855  }
856 
857  T operator |= (T arg)
858  {
859  T comp, newVal;
860  do {
861  comp = this->Value;
862  newVal = comp | arg;
863  } while(!this->CompareAndSet_Sync(comp, newVal));
864  return newVal;
865  }
866 
867  T operator ^= (T arg)
868  {
869  T comp, newVal;
870  do {
871  comp = this->Value;
872  newVal = comp ^ arg;
873  } while(!this->CompareAndSet_Sync(comp, newVal));
874  return newVal;
875  }
876 
877  T operator *= (T arg)
878  {
879  T comp, newVal;
880  do {
881  comp = this->Value;
882  newVal = comp * arg;
883  } while(!this->CompareAndSet_Sync(comp, newVal));
884  return newVal;
885  }
886 
887  T operator /= (T arg)
888  {
889  T comp, newVal;
890  do {
891  comp = this->Value;
892  newVal = comp / arg;
893  } while(!CompareAndSet_Sync(comp, newVal));
894  return newVal;
895  }
896 
897  T operator >>= (unsigned bits)
898  {
899  T comp, newVal;
900  do {
901  comp = this->Value;
902  newVal = comp >> bits;
903  } while(!CompareAndSet_Sync(comp, newVal));
904  return newVal;
905  }
906 
907  T operator <<= (unsigned bits)
908  {
909  T comp, newVal;
910  do {
911  comp = this->Value;
912  newVal = comp << bits;
913  } while(!this->CompareAndSet_Sync(comp, newVal));
914  return newVal;
915  }
916 };
917 
918 
919 
920 
921 // ***** Lock
922 
923 // Lock is a simplest and most efficient mutial-exclusion lock class.
924 // Unlike Mutex, it cannot be waited on.
925 
926 class Lock
927 {
928  // NOTE: Locks are not allocatable and they themselves should not allocate
929  // memory by standard means. This is the case because StandardAllocator
930  // relies on this class.
931  // Make 'delete' private. Don't do this for 'new' since it can be redefined.
932  void operator delete(void*) {}
933 
934 
935  // *** Lock implementation for various platforms.
936 
937 #if !defined(KY_ENABLE_THREADS)
938 
939 public:
940  // With no thread support, lock does nothing.
941  inline Lock() { }
942  inline Lock(unsigned) { }
943  inline ~Lock() { }
944  inline void DoLock() { }
945  inline void Unlock() { }
946 
947  // Windows.
948 #elif defined(KY_OS_WIN32)
949 
950  // Optimized Win32 CriticalSection similar to code provided by
951  // "Fast critical sections with timeout" by Vladislav Gelfer on code project.
952  // Promises 2x lock/unlock performance improvement.
953  //#define KY_FAST_LOCK
954 
955 #if defined(KY_FAST_LOCK)
956  AtomicInt<DWORD> LockedThreadId;
957  AtomicInt<int> WaiterCount;
958  volatile HANDLE hSemaphore;
959 
960  unsigned SpinMax;
961  unsigned RecursiveLockCount;
962 
963  inline bool PerfLockImmediate(DWORD threadId) { return LockedThreadId.CompareAndSet_Acquire(0, threadId); }
964  inline void WaiterPlus() { WaiterCount.ExchangeAdd_NoSync(1); }
965  inline void WaiterMinus() { WaiterCount.ExchangeAdd_NoSync(-1); }
966 
967  void PerfLock(DWORD threadId);
968  void PerfUnlock();
969  void AllocateKernelSemaphore();
970  void SetSpinMax(unsigned maxCount);
971 #else
972  CRITICAL_SECTION cs;
973 #endif
974 
975 public:
976  Lock(unsigned spinCount = 0);
977  ~Lock();
978  // Locking functions.
979 #if !defined(KY_FAST_LOCK)
980  inline void DoLock() { ::EnterCriticalSection(&cs); }
981  inline void Unlock() { ::LeaveCriticalSection(&cs); }
982 #else
983  void DoLock();
984  void Unlock();
985 #endif
986 
987 #elif defined(KY_OS_WINCE) || defined(KY_OS_XBOX) || defined(KY_OS_XBOX360)
988 
989  CRITICAL_SECTION cs;
990 public:
991  KY_EXPORT Lock(unsigned spinCount = 0);
992  KY_EXPORT ~Lock();
993  // Locking functions.
994  inline void DoLock() { ::EnterCriticalSection(&cs); }
995  inline void Unlock() { ::LeaveCriticalSection(&cs); }
996 
997 #elif defined(KY_OS_PS3)
998 
999  UByte mutex[sizeof(sys_lwmutex_t) + 4] __attribute__((aligned(4)));
1000  sys_lwmutex_t* pmutex;
1001  static sys_lwmutex_attribute_t LockAttr;
1002 
1003 public:
1004 // static pthread_mutexattr_t RecursiveAttr;
1005 // static bool RecursiveAttrInit;
1006 
1007  Lock (unsigned dummy = 0)
1008  {
1009  pmutex = (sys_lwmutex_t *) (UPInt(&mutex) & 4 ? mutex+4 : mutex);
1010  sys_lwmutex_create(pmutex,&LockAttr);
1011  KY_UNUSED(dummy);
1012  }
1013  ~Lock () { sys_lwmutex_destroy(pmutex); }
1014  inline void DoLock() { sys_lwmutex_lock(pmutex,SYS_NO_TIMEOUT); }
1015  inline void Unlock() { sys_lwmutex_unlock(pmutex); }
1016 
1017 #elif defined(KY_OS_WII) || defined(KY_OS_WIIU)
1018  OSMutex mutex;
1019 public:
1020  Lock (unsigned dummy = 0) { OSInitMutex(&mutex); KY_UNUSED(dummy); }
1021  inline void DoLock() { OSLockMutex(&mutex); }
1022  inline void Unlock() { OSUnlockMutex(&mutex); }
1023 
1024 #elif defined(KY_OS_PSVITA)
1025  UByte mutex[sizeof(SceKernelLwMutexWork) + 4] __attribute__((aligned(4)));
1026  SceKernelLwMutexWork* pmutex;
1027 
1028 public:
1029  Lock (unsigned dummy = 0)
1030  {
1031  pmutex = (SceKernelLwMutexWork *) (UPInt(&mutex) & 4 ? mutex+4 : mutex);
1032  int result = sceKernelCreateLwMutex(pmutex, "SF::Lock", SCE_KERNEL_LW_MUTEX_ATTR_RECURSIVE | SCE_KERNEL_ATTR_TH_FIFO, 0, NULL);
1033  KY_ASSERT(result == SCE_OK);
1034  KY_UNUSED(dummy);
1035  }
1036  ~Lock() { sceKernelDeleteLwMutex(pmutex); memset(mutex, 0xfe, sizeof(mutex)); }
1037  inline void DoLock() { int result = sceKernelLockLwMutex(pmutex, 1, NULL); KY_ASSERT(result == SCE_OK); }
1038  inline void Unlock() { int result = sceKernelUnlockLwMutex(pmutex, 1); KY_ASSERT(result == SCE_OK); }
1039 
1040 #elif defined(KY_OS_3DS)
1041  nn::os::CriticalSection cs;
1042 
1043 public:
1044  Lock (unsigned dummy = 0) { KY_UNUSED(dummy); cs.Initialize(); }
1045  inline void DoLock() { cs.Enter(); }
1046  inline void Unlock() { cs.Leave(); }
1047 
1048 #else
1049  pthread_mutex_t mutex;
1050 
1051 public:
1052  static pthread_mutexattr_t RecursiveAttr;
1053  static bool RecursiveAttrInit;
1054 
1055  Lock (unsigned dummy = 0)
1056  {
1057  if (!RecursiveAttrInit)
1058  {
1059  pthread_mutexattr_init(&RecursiveAttr);
1060  pthread_mutexattr_settype(&RecursiveAttr, PTHREAD_MUTEX_RECURSIVE);
1061  RecursiveAttrInit = 1;
1062  KY_UNUSED(dummy);
1063  }
1064  pthread_mutex_init(&mutex,&RecursiveAttr);
1065  }
1066  ~Lock () { pthread_mutex_destroy(&mutex); }
1067  inline void DoLock() { pthread_mutex_lock(&mutex); }
1068  inline void Unlock() { pthread_mutex_unlock(&mutex); }
1069 
1070 #endif // KY_ENABLE_THREDS
1071 
1072 
1073 public:
1074  // Locker class, used for automatic locking
1075  class Locker
1076  {
1077  public:
1078  Lock *pLock;
1079  inline Locker(Lock *plock)
1080  { pLock = plock; pLock->DoLock(); }
1081  inline ~Locker()
1082  { pLock->Unlock(); }
1083  };
1084 };
1085 
1086 
1087 
1088 // Safe lock for defensive design with assertion. Also contains
1089 // temporary unlocker.
1090 //-------------------------------------------
1091 class LockSafe
1092 {
1093 public:
1094  LockSafe(unsigned spinCount = 0) : mLock(spinCount)
1095 #ifdef KY_BUILD_DEBUG
1096  , LockCount(0)
1097 #endif
1098  {}
1099 
1100  void DoLock()
1101  {
1102 #ifdef KY_BUILD_DEBUG
1103  LockCount++;
1104 #endif
1105  mLock.DoLock();
1106  }
1107 
1108  void Unlock()
1109  {
1110  mLock.Unlock();
1111 #ifdef KY_BUILD_DEBUG
1112  KY_ASSERT(LockCount.ExchangeAdd_NoSync(-1) > 0);
1113 #endif
1114  }
1115 
1116 #ifdef KY_BUILD_DEBUG
1117  bool IsLocked() const
1118  {
1119  return LockCount != 0;
1120  }
1121 #endif
1122 
1123  class Locker
1124  {
1125  public:
1126  LockSafe *pLock;
1127  Locker(LockSafe *lock)
1128  {
1129  pLock = lock;
1130  pLock->DoLock();
1131  }
1132  ~Locker()
1133  {
1134  pLock->Unlock();
1135  }
1136  };
1137 
1138  class TmpUnlocker
1139  {
1140  public:
1141  LockSafe *pLock;
1142  TmpUnlocker(LockSafe *lock)
1143  {
1144  pLock = lock;
1145  pLock->Unlock();
1146  }
1147  ~TmpUnlocker()
1148  {
1149  pLock->DoLock();
1150  }
1151  };
1152 
1153 
1154 private:
1155  Lock mLock;
1156 #ifdef KY_BUILD_DEBUG
1157  AtomicInt<int> LockCount;
1158 #endif
1159 };
1160 
1161 
1162 
1163 } // Scaleform
1164 
1165 #endif
Definition: gamekitcrowddispersion.h:20