1#include "producer_channel.h"
2
3#include <log/log.h>
4#include <sync/sync.h>
5#include <sys/epoll.h>
6#include <sys/eventfd.h>
7#include <sys/poll.h>
8#include <utils/Trace.h>
9
10#include <algorithm>
11#include <atomic>
12#include <thread>
13
14#include <private/dvr/bufferhub_rpc.h>
15#include "consumer_channel.h"
16#include "detached_buffer_channel.h"
17
18using android::pdx::BorrowedHandle;
19using android::pdx::ErrorStatus;
20using android::pdx::Message;
21using android::pdx::RemoteChannelHandle;
22using android::pdx::Status;
23using android::pdx::rpc::BufferWrapper;
24using android::pdx::rpc::DispatchRemoteMethod;
25using android::pdx::rpc::WrapBuffer;
26
27namespace android {
28namespace dvr {
29
30namespace {
31
32static inline uint64_t FindNextClearedBit(uint64_t bits) {
33  return ~bits - (~bits & (~bits - 1));
34}
35
36}  // namespace
37
38ProducerChannel::ProducerChannel(BufferHubService* service, int buffer_id,
39                                 int channel_id, IonBuffer buffer,
40                                 IonBuffer metadata_buffer,
41                                 size_t user_metadata_size, int* error)
42    : BufferHubChannel(service, buffer_id, channel_id, kProducerType),
43      buffer_(std::move(buffer)),
44      metadata_buffer_(std::move(metadata_buffer)),
45      user_metadata_size_(user_metadata_size),
46      metadata_buf_size_(BufferHubDefs::kMetadataHeaderSize +
47                         user_metadata_size) {
48  if (!buffer_.IsValid()) {
49    ALOGE("ProducerChannel::ProducerChannel: Invalid buffer.");
50    *error = -EINVAL;
51    return;
52  }
53  if (!metadata_buffer_.IsValid()) {
54    ALOGE("ProducerChannel::ProducerChannel: Invalid metadata buffer.");
55    *error = -EINVAL;
56    return;
57  }
58
59  *error = InitializeBuffer();
60}
61
62ProducerChannel::ProducerChannel(BufferHubService* service, int channel_id,
63                                 uint32_t width, uint32_t height,
64                                 uint32_t layer_count, uint32_t format,
65                                 uint64_t usage, size_t user_metadata_size,
66                                 int* error)
67    : BufferHubChannel(service, channel_id, channel_id, kProducerType),
68      pending_consumers_(0),
69      producer_owns_(true),
70      user_metadata_size_(user_metadata_size),
71      metadata_buf_size_(BufferHubDefs::kMetadataHeaderSize +
72                         user_metadata_size) {
73  if (int ret = buffer_.Alloc(width, height, layer_count, format, usage)) {
74    ALOGE("ProducerChannel::ProducerChannel: Failed to allocate buffer: %s",
75          strerror(-ret));
76    *error = ret;
77    return;
78  }
79
80  if (int ret = metadata_buffer_.Alloc(metadata_buf_size_, /*height=*/1,
81                                       /*layer_count=*/1,
82                                       BufferHubDefs::kMetadataFormat,
83                                       BufferHubDefs::kMetadataUsage)) {
84    ALOGE("ProducerChannel::ProducerChannel: Failed to allocate metadata: %s",
85          strerror(-ret));
86    *error = ret;
87    return;
88  }
89
90  *error = InitializeBuffer();
91}
92
93int ProducerChannel::InitializeBuffer() {
94  void* metadata_ptr = nullptr;
95  if (int ret = metadata_buffer_.Lock(BufferHubDefs::kMetadataUsage, /*x=*/0,
96                                      /*y=*/0, metadata_buf_size_,
97                                      /*height=*/1, &metadata_ptr)) {
98    ALOGE("ProducerChannel::ProducerChannel: Failed to lock metadata.");
99    return ret;
100  }
101  metadata_header_ =
102      reinterpret_cast<BufferHubDefs::MetadataHeader*>(metadata_ptr);
103
104  // Using placement new here to reuse shared memory instead of new allocation
105  // and also initialize the value to zero.
106  buffer_state_ =
107      new (&metadata_header_->buffer_state) std::atomic<uint64_t>(0);
108  fence_state_ =
109      new (&metadata_header_->fence_state) std::atomic<uint64_t>(0);
110
111  acquire_fence_fd_.Reset(epoll_create1(EPOLL_CLOEXEC));
112  release_fence_fd_.Reset(epoll_create1(EPOLL_CLOEXEC));
113  if (!acquire_fence_fd_ || !release_fence_fd_) {
114    ALOGE("ProducerChannel::ProducerChannel: Failed to create shared fences.");
115    return -EIO;
116  }
117
118  dummy_fence_fd_.Reset(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
119  if (!dummy_fence_fd_) {
120    ALOGE("ProducerChannel::ProducerChannel: Failed to create dummy fences.");
121    return EIO;
122  }
123
124  epoll_event event;
125  event.events = 0;
126  event.data.u64 = 0ULL;
127  if (epoll_ctl(release_fence_fd_.Get(), EPOLL_CTL_ADD, dummy_fence_fd_.Get(),
128                &event) < 0) {
129    ALOGE(
130        "ProducerChannel::ProducerChannel: Failed to modify the shared "
131        "release fence to include the dummy fence: %s",
132        strerror(errno));
133    return -EIO;
134  }
135
136  // Success.
137  return 0;
138}
139
140std::unique_ptr<ProducerChannel> ProducerChannel::Create(
141    BufferHubService* service, int buffer_id, int channel_id, IonBuffer buffer,
142    IonBuffer metadata_buffer, size_t user_metadata_size) {
143  int error = 0;
144  std::unique_ptr<ProducerChannel> producer(new ProducerChannel(
145      service, buffer_id, channel_id, std::move(buffer),
146      std::move(metadata_buffer), user_metadata_size, &error));
147
148  if (error < 0)
149    return nullptr;
150  else
151    return producer;
152}
153
154Status<std::shared_ptr<ProducerChannel>> ProducerChannel::Create(
155    BufferHubService* service, int channel_id, uint32_t width, uint32_t height,
156    uint32_t layer_count, uint32_t format, uint64_t usage,
157    size_t user_metadata_size) {
158  int error;
159  std::shared_ptr<ProducerChannel> producer(
160      new ProducerChannel(service, channel_id, width, height, layer_count,
161                          format, usage, user_metadata_size, &error));
162  if (error < 0)
163    return ErrorStatus(-error);
164  else
165    return {std::move(producer)};
166}
167
168ProducerChannel::~ProducerChannel() {
169  ALOGD_IF(TRACE,
170           "ProducerChannel::~ProducerChannel: channel_id=%d buffer_id=%d "
171           "state=%" PRIx64 ".",
172           channel_id(), buffer_id(), buffer_state_->load());
173  for (auto consumer : consumer_channels_) {
174    consumer->OnProducerClosed();
175  }
176  Hangup();
177}
178
179BufferHubChannel::BufferInfo ProducerChannel::GetBufferInfo() const {
180  // Derive the mask of signaled buffers in this producer / consumer set.
181  uint64_t signaled_mask = signaled() ? BufferHubDefs::kProducerStateBit : 0;
182  for (const ConsumerChannel* consumer : consumer_channels_) {
183    signaled_mask |= consumer->signaled() ? consumer->consumer_state_bit() : 0;
184  }
185
186  return BufferInfo(buffer_id(), consumer_channels_.size(), buffer_.width(),
187                    buffer_.height(), buffer_.layer_count(), buffer_.format(),
188                    buffer_.usage(), pending_consumers_, buffer_state_->load(),
189                    signaled_mask, metadata_header_->queue_index);
190}
191
192void ProducerChannel::HandleImpulse(Message& message) {
193  ATRACE_NAME("ProducerChannel::HandleImpulse");
194  switch (message.GetOp()) {
195    case BufferHubRPC::ProducerGain::Opcode:
196      OnProducerGain(message);
197      break;
198    case BufferHubRPC::ProducerPost::Opcode:
199      OnProducerPost(message, {});
200      break;
201  }
202}
203
204bool ProducerChannel::HandleMessage(Message& message) {
205  ATRACE_NAME("ProducerChannel::HandleMessage");
206  switch (message.GetOp()) {
207    case BufferHubRPC::GetBuffer::Opcode:
208      DispatchRemoteMethod<BufferHubRPC::GetBuffer>(
209          *this, &ProducerChannel::OnGetBuffer, message);
210      return true;
211
212    case BufferHubRPC::NewConsumer::Opcode:
213      DispatchRemoteMethod<BufferHubRPC::NewConsumer>(
214          *this, &ProducerChannel::OnNewConsumer, message);
215      return true;
216
217    case BufferHubRPC::ProducerPost::Opcode:
218      DispatchRemoteMethod<BufferHubRPC::ProducerPost>(
219          *this, &ProducerChannel::OnProducerPost, message);
220      return true;
221
222    case BufferHubRPC::ProducerGain::Opcode:
223      DispatchRemoteMethod<BufferHubRPC::ProducerGain>(
224          *this, &ProducerChannel::OnProducerGain, message);
225      return true;
226
227    case BufferHubRPC::ProducerBufferDetach::Opcode:
228      DispatchRemoteMethod<BufferHubRPC::ProducerBufferDetach>(
229          *this, &ProducerChannel::OnProducerDetach, message);
230      return true;
231
232    default:
233      return false;
234  }
235}
236
237BufferDescription<BorrowedHandle> ProducerChannel::GetBuffer(
238    uint64_t buffer_state_bit) {
239  return {
240      buffer_,          metadata_buffer_,           buffer_id(),
241      buffer_state_bit, acquire_fence_fd_.Borrow(), release_fence_fd_.Borrow()};
242}
243
244Status<BufferDescription<BorrowedHandle>> ProducerChannel::OnGetBuffer(
245    Message& /*message*/) {
246  ATRACE_NAME("ProducerChannel::OnGetBuffer");
247  ALOGD_IF(TRACE, "ProducerChannel::OnGetBuffer: buffer=%d, state=%" PRIx64 ".",
248           buffer_id(), buffer_state_->load());
249  return {GetBuffer(BufferHubDefs::kProducerStateBit)};
250}
251
252Status<RemoteChannelHandle> ProducerChannel::CreateConsumer(Message& message) {
253  ATRACE_NAME("ProducerChannel::CreateConsumer");
254  ALOGD_IF(TRACE,
255           "ProducerChannel::CreateConsumer: buffer_id=%d, producer_owns=%d",
256           buffer_id(), producer_owns_);
257
258  int channel_id;
259  auto status = message.PushChannel(0, nullptr, &channel_id);
260  if (!status) {
261    ALOGE(
262        "ProducerChannel::CreateConsumer: Failed to push consumer channel: %s",
263        status.GetErrorMessage().c_str());
264    return ErrorStatus(ENOMEM);
265  }
266
267  // Try find the next consumer state bit which has not been claimed by any
268  // consumer yet.
269  uint64_t consumer_state_bit = FindNextClearedBit(
270      active_consumer_bit_mask_ | orphaned_consumer_bit_mask_ |
271      BufferHubDefs::kProducerStateBit);
272  if (consumer_state_bit == 0ULL) {
273    ALOGE(
274        "ProducerChannel::CreateConsumer: reached the maximum mumber of "
275        "consumers per producer: 63.");
276    return ErrorStatus(E2BIG);
277  }
278
279  auto consumer =
280      std::make_shared<ConsumerChannel>(service(), buffer_id(), channel_id,
281                                        consumer_state_bit, shared_from_this());
282  const auto channel_status = service()->SetChannel(channel_id, consumer);
283  if (!channel_status) {
284    ALOGE(
285        "ProducerChannel::CreateConsumer: failed to set new consumer channel: "
286        "%s",
287        channel_status.GetErrorMessage().c_str());
288    return ErrorStatus(ENOMEM);
289  }
290
291  if (!producer_owns_ &&
292      !BufferHubDefs::IsBufferReleased(buffer_state_->load())) {
293    // Signal the new consumer when adding it to a posted producer.
294    if (consumer->OnProducerPosted())
295      pending_consumers_++;
296  }
297
298  active_consumer_bit_mask_ |= consumer_state_bit;
299  return {status.take()};
300}
301
302Status<RemoteChannelHandle> ProducerChannel::OnNewConsumer(Message& message) {
303  ATRACE_NAME("ProducerChannel::OnNewConsumer");
304  ALOGD_IF(TRACE, "ProducerChannel::OnNewConsumer: buffer_id=%d", buffer_id());
305  return CreateConsumer(message);
306}
307
308Status<void> ProducerChannel::OnProducerPost(
309    Message&, LocalFence acquire_fence) {
310  ATRACE_NAME("ProducerChannel::OnProducerPost");
311  ALOGD_IF(TRACE, "ProducerChannel::OnProducerPost: buffer_id=%d", buffer_id());
312  if (!producer_owns_) {
313    ALOGE("ProducerChannel::OnProducerPost: Not in gained state!");
314    return ErrorStatus(EBUSY);
315  }
316
317  epoll_event event;
318  event.events = 0;
319  event.data.u64 = 0ULL;
320  int ret = epoll_ctl(release_fence_fd_.Get(), EPOLL_CTL_MOD,
321                      dummy_fence_fd_.Get(), &event);
322  ALOGE_IF(ret < 0,
323      "ProducerChannel::OnProducerPost: Failed to modify the shared "
324      "release fence to include the dummy fence: %s",
325      strerror(errno));
326
327  eventfd_t dummy_fence_count = 0ULL;
328  if (eventfd_read(dummy_fence_fd_.Get(), &dummy_fence_count) < 0) {
329    const int error = errno;
330    if (error != EAGAIN) {
331      ALOGE(
332          "ProducerChannel::ProducerChannel: Failed to read dummy fence, "
333          "error: %s",
334          strerror(error));
335      return ErrorStatus(error);
336    }
337  }
338
339  ALOGW_IF(dummy_fence_count > 0,
340           "ProducerChannel::ProducerChannel: %" PRIu64
341           " dummy fence(s) was signaled during last release/gain cycle "
342           "buffer_id=%d.",
343           dummy_fence_count, buffer_id());
344
345  post_fence_ = std::move(acquire_fence);
346  producer_owns_ = false;
347
348  // Signal any interested consumers. If there are none, the buffer will stay
349  // in posted state until a consumer comes online. This behavior guarantees
350  // that no frame is silently dropped.
351  pending_consumers_ = 0;
352  for (auto consumer : consumer_channels_) {
353    if (consumer->OnProducerPosted())
354      pending_consumers_++;
355  }
356  ALOGD_IF(TRACE, "ProducerChannel::OnProducerPost: %d pending consumers",
357           pending_consumers_);
358
359  return {};
360}
361
362Status<LocalFence> ProducerChannel::OnProducerGain(Message& /*message*/) {
363  ATRACE_NAME("ProducerChannel::OnGain");
364  ALOGD_IF(TRACE, "ProducerChannel::OnGain: buffer_id=%d", buffer_id());
365  if (producer_owns_) {
366    ALOGE("ProducerChanneL::OnGain: Already in gained state: channel=%d",
367          channel_id());
368    return ErrorStatus(EALREADY);
369  }
370
371  // There are still pending consumers, return busy.
372  if (pending_consumers_ > 0) {
373    ALOGE(
374        "ProducerChannel::OnGain: Producer (id=%d) is gaining a buffer that "
375        "still has %d pending consumer(s).",
376        buffer_id(), pending_consumers_);
377    return ErrorStatus(EBUSY);
378  }
379
380  ClearAvailable();
381  producer_owns_ = true;
382  post_fence_.close();
383  return {std::move(returned_fence_)};
384}
385
386Status<RemoteChannelHandle> ProducerChannel::OnProducerDetach(
387    Message& message) {
388  ATRACE_NAME("ProducerChannel::OnProducerDetach");
389  ALOGD_IF(TRACE, "ProducerChannel::OnProducerDetach: buffer_id=%d",
390           buffer_id());
391
392  uint64_t buffer_state = buffer_state_->load();
393  if (!BufferHubDefs::IsBufferGained(buffer_state)) {
394    // Can only detach a BufferProducer when it's in gained state.
395    ALOGW(
396        "ProducerChannel::OnProducerDetach: The buffer (id=%d, state=0x%" PRIx64
397        ") is not in gained state.",
398        buffer_id(), buffer_state);
399    return {};
400  }
401
402  int channel_id;
403  auto status = message.PushChannel(0, nullptr, &channel_id);
404  if (!status) {
405    ALOGE(
406        "ProducerChannel::OnProducerDetach: Failed to push detached buffer "
407        "channel: %s",
408        status.GetErrorMessage().c_str());
409    return ErrorStatus(ENOMEM);
410  }
411
412  // Make sure we unlock the buffer.
413  if (int ret = metadata_buffer_.Unlock()) {
414    ALOGE("ProducerChannel::OnProducerDetach: Failed to unlock metadata.");
415    return ErrorStatus(-ret);
416  };
417
418  std::unique_ptr<DetachedBufferChannel> channel =
419      DetachedBufferChannel::Create(
420          service(), buffer_id(), channel_id, std::move(buffer_),
421          std::move(metadata_buffer_), user_metadata_size_);
422  if (!channel) {
423    ALOGE("ProducerChannel::OnProducerDetach: Invalid buffer.");
424    return ErrorStatus(EINVAL);
425  }
426
427  const auto channel_status =
428      service()->SetChannel(channel_id, std::move(channel));
429  if (!channel_status) {
430    // Technically, this should never fail, as we just pushed the channel. Note
431    // that LOG_FATAL will be stripped out in non-debug build.
432    LOG_FATAL(
433        "ProducerChannel::OnProducerDetach: Failed to set new detached buffer "
434        "channel: %s.",
435        channel_status.GetErrorMessage().c_str());
436  }
437
438  return status;
439}
440
441Status<LocalFence> ProducerChannel::OnConsumerAcquire(Message& /*message*/) {
442  ATRACE_NAME("ProducerChannel::OnConsumerAcquire");
443  ALOGD_IF(TRACE, "ProducerChannel::OnConsumerAcquire: buffer_id=%d",
444           buffer_id());
445  if (producer_owns_) {
446    ALOGE("ProducerChannel::OnConsumerAcquire: Not in posted state!");
447    return ErrorStatus(EBUSY);
448  }
449
450  // Return a borrowed fd to avoid unnecessary duplication of the underlying fd.
451  // Serialization just needs to read the handle.
452  return {std::move(post_fence_)};
453}
454
455Status<void> ProducerChannel::OnConsumerRelease(Message&,
456                                                LocalFence release_fence) {
457  ATRACE_NAME("ProducerChannel::OnConsumerRelease");
458  ALOGD_IF(TRACE, "ProducerChannel::OnConsumerRelease: buffer_id=%d",
459           buffer_id());
460  if (producer_owns_) {
461    ALOGE("ProducerChannel::OnConsumerRelease: Not in acquired state!");
462    return ErrorStatus(EBUSY);
463  }
464
465  // Attempt to merge the fences if necessary.
466  if (release_fence) {
467    if (returned_fence_) {
468      LocalFence merged_fence(sync_merge("bufferhub_merged",
469                                         returned_fence_.get_fd(),
470                                         release_fence.get_fd()));
471      const int error = errno;
472      if (!merged_fence) {
473        ALOGE("ProducerChannel::OnConsumerRelease: Failed to merge fences: %s",
474              strerror(error));
475        return ErrorStatus(error);
476      }
477      returned_fence_ = std::move(merged_fence);
478    } else {
479      returned_fence_ = std::move(release_fence);
480    }
481  }
482
483  OnConsumerIgnored();
484  if (pending_consumers_ == 0) {
485    // Clear the producer bit atomically to transit into released state. This
486    // has to done by BufferHub as it requries synchronization among all
487    // consumers.
488    BufferHubDefs::ModifyBufferState(buffer_state_,
489                                     BufferHubDefs::kProducerStateBit, 0ULL);
490    ALOGD_IF(TRACE,
491             "ProducerChannel::OnConsumerRelease: releasing last consumer: "
492             "buffer_id=%d state=%" PRIx64 ".",
493             buffer_id(), buffer_state_->load());
494
495    if (orphaned_consumer_bit_mask_) {
496      ALOGW(
497          "ProducerChannel::OnConsumerRelease: orphaned buffer detected "
498          "during the this acquire/release cycle: id=%d orphaned=0x%" PRIx64
499          " queue_index=%" PRIu64 ".",
500          buffer_id(), orphaned_consumer_bit_mask_,
501          metadata_header_->queue_index);
502      orphaned_consumer_bit_mask_ = 0;
503    }
504
505    SignalAvailable();
506  }
507
508  ALOGE_IF(pending_consumers_ &&
509               BufferHubDefs::IsBufferReleased(buffer_state_->load()),
510           "ProducerChannel::OnConsumerRelease: buffer state inconsistent: "
511           "pending_consumers=%d, buffer buffer is in releaed state.",
512           pending_consumers_);
513  return {};
514}
515
516void ProducerChannel::OnConsumerIgnored() {
517  if (pending_consumers_ == 0) {
518    ALOGE("ProducerChannel::OnConsumerIgnored: no pending consumer.");
519    return;
520  }
521
522  --pending_consumers_;
523  ALOGD_IF(TRACE,
524           "ProducerChannel::OnConsumerIgnored: buffer_id=%d %d consumers left",
525           buffer_id(), pending_consumers_);
526}
527
528void ProducerChannel::OnConsumerOrphaned(ConsumerChannel* channel) {
529  // Ignore the orphaned consumer.
530  OnConsumerIgnored();
531
532  const uint64_t consumer_state_bit = channel->consumer_state_bit();
533  ALOGE_IF(orphaned_consumer_bit_mask_ & consumer_state_bit,
534           "ProducerChannel::OnConsumerOrphaned: Consumer "
535           "(consumer_state_bit=%" PRIx64 ") is already orphaned.",
536           consumer_state_bit);
537  orphaned_consumer_bit_mask_ |= consumer_state_bit;
538
539  // Atomically clear the fence state bit as an orphaned consumer will never
540  // signal a release fence. Also clear the buffer state as it won't be released
541  // as well.
542  fence_state_->fetch_and(~consumer_state_bit);
543  BufferHubDefs::ModifyBufferState(buffer_state_, consumer_state_bit, 0ULL);
544
545  ALOGW(
546      "ProducerChannel::OnConsumerOrphaned: detected new orphaned consumer "
547      "buffer_id=%d consumer_state_bit=%" PRIx64 " queue_index=%" PRIu64
548      " buffer_state=%" PRIx64 " fence_state=%" PRIx64 ".",
549      buffer_id(), consumer_state_bit, metadata_header_->queue_index,
550      buffer_state_->load(), fence_state_->load());
551}
552
553void ProducerChannel::AddConsumer(ConsumerChannel* channel) {
554  consumer_channels_.push_back(channel);
555}
556
557void ProducerChannel::RemoveConsumer(ConsumerChannel* channel) {
558  consumer_channels_.erase(
559      std::find(consumer_channels_.begin(), consumer_channels_.end(), channel));
560  active_consumer_bit_mask_ &= ~channel->consumer_state_bit();
561
562  const uint64_t buffer_state = buffer_state_->load();
563  if (BufferHubDefs::IsBufferPosted(buffer_state) ||
564      BufferHubDefs::IsBufferAcquired(buffer_state)) {
565    // The consumer client is being destoryed without releasing. This could
566    // happen in corner cases when the consumer crashes. Here we mark it
567    // orphaned before remove it from producer.
568    OnConsumerOrphaned(channel);
569  }
570
571  if (BufferHubDefs::IsBufferReleased(buffer_state) ||
572      BufferHubDefs::IsBufferGained(buffer_state)) {
573    // The consumer is being close while it is suppose to signal a release
574    // fence. Signal the dummy fence here.
575    if (fence_state_->load() & channel->consumer_state_bit()) {
576      epoll_event event;
577      event.events = EPOLLIN;
578      event.data.u64 = channel->consumer_state_bit();
579      if (epoll_ctl(release_fence_fd_.Get(), EPOLL_CTL_MOD,
580                    dummy_fence_fd_.Get(), &event) < 0) {
581        ALOGE(
582            "ProducerChannel::RemoveConsumer: Failed to modify the shared "
583            "release fence to include the dummy fence: %s",
584            strerror(errno));
585        return;
586      }
587      ALOGW(
588          "ProducerChannel::RemoveConsumer: signal dummy release fence "
589          "buffer_id=%d",
590          buffer_id());
591      eventfd_write(dummy_fence_fd_.Get(), 1);
592    }
593  }
594}
595
596// Returns true if the given parameters match the underlying buffer parameters.
597bool ProducerChannel::CheckParameters(uint32_t width, uint32_t height,
598                                      uint32_t layer_count, uint32_t format,
599                                      uint64_t usage,
600                                      size_t user_metadata_size) {
601  return user_metadata_size == user_metadata_size_ &&
602         buffer_.width() == width && buffer_.height() == height &&
603         buffer_.layer_count() == layer_count && buffer_.format() == format &&
604         buffer_.usage() == usage;
605}
606
607}  // namespace dvr
608}  // namespace android
609