reference, declaration → definition definition → references, declarations, derived classes, virtual overrides reference to multiple definitions → definitions unreferenced |
240 if (dst->size_ == 0 && cached_idx_ != 0) { 248 dst->tab_ = ctx->clock_alloc.Map(cached_idx_); 249 dst->tab_idx_ = cached_idx_; 250 dst->size_ = cached_size_; 251 dst->blocks_ = cached_blocks_; 252 CHECK_EQ(dst->dirty_[0].tid, kInvalidTid); 255 dst->dirty_[0].tid = tid_; 256 dst->dirty_[0].epoch = clk_[tid_]; 257 dst->release_store_tid_ = tid_; 258 dst->release_store_reused_ = reused_; 260 dst->elem(tid_).reused = reused_; 262 atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed); 267 if (dst->size_ < nclk_) 268 dst->Resize(c, nclk_); 270 if (dst->release_store_tid_ == tid_ && 271 dst->release_store_reused_ == reused_ && 272 dst->elem(tid_).epoch > last_acquire_) { 274 UpdateCurrentThread(c, dst); 280 dst->Unshare(c); 284 for (ClockElem &ce : *dst) { 290 dst->dirty_[i].tid = kInvalidTid; 291 dst->release_store_tid_ = tid_; 292 dst->release_store_reused_ = reused_; 294 dst->elem(tid_).reused = reused_; 298 if (cached_idx_ == 0 && dst->Cachable()) { 300 atomic_uint32_t *ref = ref_ptr(dst->tab_); 304 atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed); 305 cached_idx_ = dst->tab_idx_; 306 cached_size_ = dst->size_; 307 cached_blocks_ = dst->blocks_;