idx
int64 | func_before
string | Vulnerability Classification
string | vul
int64 | func_after
string | patch
string | CWE ID
string | lines_before
string | lines_after
string |
---|---|---|---|---|---|---|---|---|
150,800 |
Tracks::~Tracks()
{
Track** i = m_trackEntries;
Track** const j = m_trackEntriesEnd;
while (i != j)
{
Track* const pTrack = *i++;
delete pTrack;
}
delete[] m_trackEntries;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
Tracks::~Tracks()
|
@@ -12,1380 +12,1208 @@
#include <new>
#include <climits>
-mkvparser::IMkvReader::~IMkvReader()
-{
+#ifdef _MSC_VER
+// Disable MSVC warnings that suggest making code non-portable.
+#pragma warning(disable : 4996)
+#endif
+
+mkvparser::IMkvReader::~IMkvReader() {}
+
+void mkvparser::GetVersion(int& major, int& minor, int& build, int& revision) {
+ major = 1;
+ minor = 0;
+ build = 0;
+ revision = 28;
}
-void mkvparser::GetVersion(int& major, int& minor, int& build, int& revision)
-{
- major = 1;
- minor = 0;
- build = 0;
- revision = 27;
-}
+long long mkvparser::ReadUInt(IMkvReader* pReader, long long pos, long& len) {
+ assert(pReader);
+ assert(pos >= 0);
-long long mkvparser::ReadUInt(IMkvReader* pReader, long long pos, long& len)
-{
- assert(pReader);
- assert(pos >= 0);
+ int status;
- int status;
+ //#ifdef _DEBUG
+ // long long total, available;
+ // status = pReader->Length(&total, &available);
+ // assert(status >= 0);
+ // assert((total < 0) || (available <= total));
+ // assert(pos < available);
+ // assert((available - pos) >= 1); //assume here max u-int len is 8
+ //#endif
-//#ifdef _DEBUG
-// long long total, available;
-// status = pReader->Length(&total, &available);
-// assert(status >= 0);
-// assert((total < 0) || (available <= total));
-// assert(pos < available);
-// assert((available - pos) >= 1); //assume here max u-int len is 8
-//#endif
+ len = 1;
- len = 1;
+ unsigned char b;
- unsigned char b;
+ status = pReader->Read(pos, 1, &b);
+ if (status < 0) // error or underflow
+ return status;
+
+ if (status > 0) // interpreted as "underflow"
+ return E_BUFFER_NOT_FULL;
+
+ if (b == 0) // we can't handle u-int values larger than 8 bytes
+ return E_FILE_FORMAT_INVALID;
+
+ unsigned char m = 0x80;
+
+ while (!(b & m)) {
+ m >>= 1;
+ ++len;
+ }
+
+ //#ifdef _DEBUG
+ // assert((available - pos) >= len);
+ //#endif
+
+ long long result = b & (~m);
+ ++pos;
+
+ for (int i = 1; i < len; ++i) {
status = pReader->Read(pos, 1, &b);
- if (status < 0) //error or underflow
- return status;
-
- if (status > 0) //interpreted as "underflow"
- return E_BUFFER_NOT_FULL;
-
- if (b == 0) //we can't handle u-int values larger than 8 bytes
- return E_FILE_FORMAT_INVALID;
-
- unsigned char m = 0x80;
-
- while (!(b & m))
- {
- m >>= 1;
- ++len;
+ if (status < 0) {
+ len = 1;
+ return status;
}
-//#ifdef _DEBUG
-// assert((available - pos) >= len);
-//#endif
+ if (status > 0) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- long long result = b & (~m);
+ result <<= 8;
+ result |= b;
+
++pos;
+ }
- for (int i = 1; i < len; ++i)
- {
- status = pReader->Read(pos, 1, &b);
-
- if (status < 0)
- {
- len = 1;
- return status;
- }
-
- if (status > 0)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- result <<= 8;
- result |= b;
-
- ++pos;
- }
-
- return result;
+ return result;
}
-long long mkvparser::GetUIntLength(
- IMkvReader* pReader,
- long long pos,
- long& len)
-{
- assert(pReader);
- assert(pos >= 0);
+long long mkvparser::GetUIntLength(IMkvReader* pReader, long long pos,
+ long& len) {
+ assert(pReader);
+ assert(pos >= 0);
- long long total, available;
+ long long total, available;
- int status = pReader->Length(&total, &available);
- assert(status >= 0);
- assert((total < 0) || (available <= total));
+ int status = pReader->Length(&total, &available);
+ assert(status >= 0);
+ assert((total < 0) || (available <= total));
- len = 1;
+ len = 1;
- if (pos >= available)
- return pos; //too few bytes available
+ if (pos >= available)
+ return pos; // too few bytes available
+ unsigned char b;
+
+ status = pReader->Read(pos, 1, &b);
+
+ if (status < 0)
+ return status;
+
+ assert(status == 0);
+
+ if (b == 0) // we can't handle u-int values larger than 8 bytes
+ return E_FILE_FORMAT_INVALID;
+
+ unsigned char m = 0x80;
+
+ while (!(b & m)) {
+ m >>= 1;
+ ++len;
+ }
+
+ return 0; // success
+}
+
+// TODO(vigneshv): This function assumes that unsigned values never have their
+// high bit set.
+long long mkvparser::UnserializeUInt(IMkvReader* pReader, long long pos,
+ long long size) {
+ assert(pReader);
+ assert(pos >= 0);
+
+ if ((size <= 0) || (size > 8))
+ return E_FILE_FORMAT_INVALID;
+
+ long long result = 0;
+
+ for (long long i = 0; i < size; ++i) {
unsigned char b;
- status = pReader->Read(pos, 1, &b);
+ const long status = pReader->Read(pos, 1, &b);
if (status < 0)
- return status;
+ return status;
- assert(status == 0);
+ result <<= 8;
+ result |= b;
- if (b == 0) //we can't handle u-int values larger than 8 bytes
- return E_FILE_FORMAT_INVALID;
+ ++pos;
+ }
- unsigned char m = 0x80;
-
- while (!(b & m))
- {
- m >>= 1;
- ++len;
- }
-
- return 0; //success
+ return result;
}
+long mkvparser::UnserializeFloat(IMkvReader* pReader, long long pos,
+ long long size_, double& result) {
+ assert(pReader);
+ assert(pos >= 0);
-long long mkvparser::UnserializeUInt(
- IMkvReader* pReader,
- long long pos,
- long long size)
-{
- assert(pReader);
- assert(pos >= 0);
+ if ((size_ != 4) && (size_ != 8))
+ return E_FILE_FORMAT_INVALID;
- if ((size <= 0) || (size > 8))
- return E_FILE_FORMAT_INVALID;
+ const long size = static_cast<long>(size_);
- long long result = 0;
+ unsigned char buf[8];
- for (long long i = 0; i < size; ++i)
- {
- unsigned char b;
+ const int status = pReader->Read(pos, size, buf);
- const long status = pReader->Read(pos, 1, &b);
+ if (status < 0) // error
+ return status;
- if (status < 0)
- return status;
+ if (size == 4) {
+ union {
+ float f;
+ unsigned long ff;
+ };
- result <<= 8;
- result |= b;
+ ff = 0;
- ++pos;
+ for (int i = 0;;) {
+ ff |= buf[i];
+
+ if (++i >= 4)
+ break;
+
+ ff <<= 8;
}
- return result;
+ result = f;
+ } else {
+ assert(size == 8);
+
+ union {
+ double d;
+ unsigned long long dd;
+ };
+
+ dd = 0;
+
+ for (int i = 0;;) {
+ dd |= buf[i];
+
+ if (++i >= 8)
+ break;
+
+ dd <<= 8;
+ }
+
+ result = d;
+ }
+
+ return 0;
}
+long mkvparser::UnserializeInt(IMkvReader* pReader, long long pos, long size,
+ long long& result) {
+ assert(pReader);
+ assert(pos >= 0);
+ assert(size > 0);
+ assert(size <= 8);
-long mkvparser::UnserializeFloat(
- IMkvReader* pReader,
- long long pos,
- long long size_,
- double& result)
-{
- assert(pReader);
- assert(pos >= 0);
+ {
+ signed char b;
- if ((size_ != 4) && (size_ != 8))
- return E_FILE_FORMAT_INVALID;
+ const long status = pReader->Read(pos, 1, (unsigned char*)&b);
- const long size = static_cast<long>(size_);
+ if (status < 0)
+ return status;
- unsigned char buf[8];
+ result = b;
- const int status = pReader->Read(pos, size, buf);
+ ++pos;
+ }
- if (status < 0) //error
- return status;
+ for (long i = 1; i < size; ++i) {
+ unsigned char b;
- if (size == 4)
- {
- union
- {
- float f;
- unsigned long ff;
- };
+ const long status = pReader->Read(pos, 1, &b);
- ff = 0;
+ if (status < 0)
+ return status;
- for (int i = 0;;)
- {
- ff |= buf[i];
+ result <<= 8;
+ result |= b;
- if (++i >= 4)
- break;
+ ++pos;
+ }
- ff <<= 8;
- }
-
- result = f;
- }
- else
- {
- assert(size == 8);
-
- union
- {
- double d;
- unsigned long long dd;
- };
-
- dd = 0;
-
- for (int i = 0;;)
- {
- dd |= buf[i];
-
- if (++i >= 8)
- break;
-
- dd <<= 8;
- }
-
- result = d;
- }
-
- return 0;
+ return 0; // success
}
+long mkvparser::UnserializeString(IMkvReader* pReader, long long pos,
+ long long size_, char*& str) {
+ delete[] str;
+ str = NULL;
-long mkvparser::UnserializeInt(
- IMkvReader* pReader,
- long long pos,
- long size,
- long long& result)
-{
- assert(pReader);
- assert(pos >= 0);
- assert(size > 0);
- assert(size <= 8);
+ if (size_ >= LONG_MAX) // we need (size+1) chars
+ return E_FILE_FORMAT_INVALID;
- {
- signed char b;
+ const long size = static_cast<long>(size_);
- const long status = pReader->Read(pos, 1, (unsigned char*)&b);
+ str = new (std::nothrow) char[size + 1];
- if (status < 0)
- return status;
+ if (str == NULL)
+ return -1;
- result = b;
+ unsigned char* const buf = reinterpret_cast<unsigned char*>(str);
- ++pos;
- }
+ const long status = pReader->Read(pos, size, buf);
- for (long i = 1; i < size; ++i)
- {
- unsigned char b;
-
- const long status = pReader->Read(pos, 1, &b);
-
- if (status < 0)
- return status;
-
- result <<= 8;
- result |= b;
-
- ++pos;
- }
-
- return 0; //success
-}
-
-
-long mkvparser::UnserializeString(
- IMkvReader* pReader,
- long long pos,
- long long size_,
- char*& str)
-{
+ if (status) {
delete[] str;
str = NULL;
- if (size_ >= LONG_MAX) //we need (size+1) chars
- return E_FILE_FORMAT_INVALID;
+ return status;
+ }
- const long size = static_cast<long>(size_);
+ str[size] = '\0';
- str = new (std::nothrow) char[size+1];
-
- if (str == NULL)
- return -1;
-
- unsigned char* const buf = reinterpret_cast<unsigned char*>(str);
-
- const long status = pReader->Read(pos, size, buf);
-
- if (status)
- {
- delete[] str;
- str = NULL;
-
- return status;
- }
-
- str[size] = '\0';
-
- return 0; //success
+ return 0; // success
}
+long mkvparser::ParseElementHeader(IMkvReader* pReader, long long& pos,
+ long long stop, long long& id,
+ long long& size) {
+ if ((stop >= 0) && (pos >= stop))
+ return E_FILE_FORMAT_INVALID;
-long mkvparser::ParseElementHeader(
- IMkvReader* pReader,
- long long& pos,
- long long stop,
- long long& id,
- long long& size)
-{
- if ((stop >= 0) && (pos >= stop))
- return E_FILE_FORMAT_INVALID;
+ long len;
- long len;
+ id = ReadUInt(pReader, pos, len);
- id = ReadUInt(pReader, pos, len);
+ if (id < 0)
+ return E_FILE_FORMAT_INVALID;
- if (id < 0)
- return E_FILE_FORMAT_INVALID;
+ pos += len; // consume id
- pos += len; //consume id
+ if ((stop >= 0) && (pos >= stop))
+ return E_FILE_FORMAT_INVALID;
- if ((stop >= 0) && (pos >= stop))
- return E_FILE_FORMAT_INVALID;
+ size = ReadUInt(pReader, pos, len);
- size = ReadUInt(pReader, pos, len);
+ if (size < 0)
+ return E_FILE_FORMAT_INVALID;
- if (size < 0)
- return E_FILE_FORMAT_INVALID;
+ pos += len; // consume length of size
- pos += len; //consume length of size
+ // pos now designates payload
- //pos now designates payload
+ if ((stop >= 0) && ((pos + size) > stop))
+ return E_FILE_FORMAT_INVALID;
- if ((stop >= 0) && ((pos + size) > stop))
- return E_FILE_FORMAT_INVALID;
-
- return 0; //success
+ return 0; // success
}
+bool mkvparser::Match(IMkvReader* pReader, long long& pos, unsigned long id_,
+ long long& val) {
+ assert(pReader);
+ assert(pos >= 0);
-bool mkvparser::Match(
- IMkvReader* pReader,
- long long& pos,
- unsigned long id_,
- long long& val)
-{
- assert(pReader);
- assert(pos >= 0);
+ long long total, available;
- long long total, available;
+ const long status = pReader->Length(&total, &available);
+ assert(status >= 0);
+ assert((total < 0) || (available <= total));
+ if (status < 0)
+ return false;
- const long status = pReader->Length(&total, &available);
- assert(status >= 0);
- assert((total < 0) || (available <= total));
- if (status < 0)
- return false;
+ long len;
- long len;
+ const long long id = ReadUInt(pReader, pos, len);
+ assert(id >= 0);
+ assert(len > 0);
+ assert(len <= 8);
+ assert((pos + len) <= available);
- const long long id = ReadUInt(pReader, pos, len);
- assert(id >= 0);
- assert(len > 0);
- assert(len <= 8);
- assert((pos + len) <= available);
+ if ((unsigned long)id != id_)
+ return false;
- if ((unsigned long)id != id_)
- return false;
+ pos += len; // consume id
- pos += len; //consume id
+ const long long size = ReadUInt(pReader, pos, len);
+ assert(size >= 0);
+ assert(size <= 8);
+ assert(len > 0);
+ assert(len <= 8);
+ assert((pos + len) <= available);
- const long long size = ReadUInt(pReader, pos, len);
- assert(size >= 0);
- assert(size <= 8);
- assert(len > 0);
- assert(len <= 8);
- assert((pos + len) <= available);
+ pos += len; // consume length of size of payload
- pos += len; //consume length of size of payload
+ val = UnserializeUInt(pReader, pos, size);
+ assert(val >= 0);
- val = UnserializeUInt(pReader, pos, size);
- assert(val >= 0);
+ pos += size; // consume size of payload
- pos += size; //consume size of payload
-
- return true;
+ return true;
}
-bool mkvparser::Match(
- IMkvReader* pReader,
- long long& pos,
- unsigned long id_,
- unsigned char*& buf,
- size_t& buflen)
-{
- assert(pReader);
- assert(pos >= 0);
+bool mkvparser::Match(IMkvReader* pReader, long long& pos, unsigned long id_,
+ unsigned char*& buf, size_t& buflen) {
+ assert(pReader);
+ assert(pos >= 0);
- long long total, available;
+ long long total, available;
- long status = pReader->Length(&total, &available);
- assert(status >= 0);
- assert((total < 0) || (available <= total));
- if (status < 0)
- return false;
+ long status = pReader->Length(&total, &available);
+ assert(status >= 0);
+ assert((total < 0) || (available <= total));
+ if (status < 0)
+ return false;
- long len;
- const long long id = ReadUInt(pReader, pos, len);
- assert(id >= 0);
- assert(len > 0);
- assert(len <= 8);
- assert((pos + len) <= available);
+ long len;
+ const long long id = ReadUInt(pReader, pos, len);
+ assert(id >= 0);
+ assert(len > 0);
+ assert(len <= 8);
+ assert((pos + len) <= available);
- if ((unsigned long)id != id_)
- return false;
+ if ((unsigned long)id != id_)
+ return false;
- pos += len; //consume id
+ pos += len; // consume id
- const long long size_ = ReadUInt(pReader, pos, len);
- assert(size_ >= 0);
- assert(len > 0);
- assert(len <= 8);
- assert((pos + len) <= available);
+ const long long size_ = ReadUInt(pReader, pos, len);
+ assert(size_ >= 0);
+ assert(len > 0);
+ assert(len <= 8);
+ assert((pos + len) <= available);
- pos += len; //consume length of size of payload
- assert((pos + size_) <= available);
+ pos += len; // consume length of size of payload
+ assert((pos + size_) <= available);
- const long buflen_ = static_cast<long>(size_);
+ const long buflen_ = static_cast<long>(size_);
- buf = new (std::nothrow) unsigned char[buflen_];
- assert(buf); //TODO
+ buf = new (std::nothrow) unsigned char[buflen_];
+ assert(buf); // TODO
- status = pReader->Read(pos, buflen_, buf);
- assert(status == 0); //TODO
+ status = pReader->Read(pos, buflen_, buf);
+ assert(status == 0); // TODO
- buflen = buflen_;
+ buflen = buflen_;
- pos += size_; //consume size of payload
- return true;
+ pos += size_; // consume size of payload
+ return true;
}
+namespace mkvparser {
-namespace mkvparser
-{
+EBMLHeader::EBMLHeader() : m_docType(NULL) { Init(); }
-EBMLHeader::EBMLHeader() :
- m_docType(NULL)
-{
- Init();
-}
+EBMLHeader::~EBMLHeader() { delete[] m_docType; }
-EBMLHeader::~EBMLHeader()
-{
+void EBMLHeader::Init() {
+ m_version = 1;
+ m_readVersion = 1;
+ m_maxIdLength = 4;
+ m_maxSizeLength = 8;
+
+ if (m_docType) {
delete[] m_docType;
+ m_docType = NULL;
+ }
+
+ m_docTypeVersion = 1;
+ m_docTypeReadVersion = 1;
}
-void EBMLHeader::Init()
-{
- m_version = 1;
- m_readVersion = 1;
- m_maxIdLength = 4;
- m_maxSizeLength = 8;
+long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) {
+ assert(pReader);
- if (m_docType)
- {
- delete[] m_docType;
- m_docType = NULL;
- }
+ long long total, available;
- m_docTypeVersion = 1;
- m_docTypeReadVersion = 1;
-}
+ long status = pReader->Length(&total, &available);
-long long EBMLHeader::Parse(
- IMkvReader* pReader,
- long long& pos)
-{
- assert(pReader);
+ if (status < 0) // error
+ return status;
- long long total, available;
+ pos = 0;
+ long long end = (available >= 1024) ? 1024 : available;
- long status = pReader->Length(&total, &available);
+ for (;;) {
+ unsigned char b = 0;
- if (status < 0) //error
+ while (pos < end) {
+ status = pReader->Read(pos, 1, &b);
+
+ if (status < 0) // error
return status;
- pos = 0;
- long long end = (available >= 1024) ? 1024 : available;
+ if (b == 0x1A)
+ break;
- for (;;)
- {
- unsigned char b = 0;
-
- while (pos < end)
- {
- status = pReader->Read(pos, 1, &b);
-
- if (status < 0) //error
- return status;
-
- if (b == 0x1A)
- break;
-
- ++pos;
- }
-
- if (b != 0x1A)
- {
- if (pos >= 1024)
- return E_FILE_FORMAT_INVALID; //don't bother looking anymore
-
- if ((total >= 0) && ((total - available) < 5))
- return E_FILE_FORMAT_INVALID;
-
- return available + 5; //5 = 4-byte ID + 1st byte of size
- }
-
- if ((total >= 0) && ((total - pos) < 5))
- return E_FILE_FORMAT_INVALID;
-
- if ((available - pos) < 5)
- return pos + 5; //try again later
-
- long len;
-
- const long long result = ReadUInt(pReader, pos, len);
-
- if (result < 0) //error
- return result;
-
- if (result == 0x0A45DFA3) //EBML Header ID
- {
- pos += len; //consume ID
- break;
- }
-
- ++pos; //throw away just the 0x1A byte, and try again
+ ++pos;
}
- //pos designates start of size field
+ if (b != 0x1A) {
+ if (pos >= 1024)
+ return E_FILE_FORMAT_INVALID; // don't bother looking anymore
- //get length of size field
+ if ((total >= 0) && ((total - available) < 5))
+ return E_FILE_FORMAT_INVALID;
+ return available + 5; // 5 = 4-byte ID + 1st byte of size
+ }
+
+ if ((total >= 0) && ((total - pos) < 5))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((available - pos) < 5)
+ return pos + 5; // try again later
+
+ long len;
+
+ const long long result = ReadUInt(pReader, pos, len);
+
+ if (result < 0) // error
+ return result;
+
+ if (result == 0x0A45DFA3) { // EBML Header ID
+ pos += len; // consume ID
+ break;
+ }
+
+ ++pos; // throw away just the 0x1A byte, and try again
+ }
+
+ // pos designates start of size field
+
+ // get length of size field
+
+ long len;
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return result;
+
+ if (result > 0) // need more data
+ return result;
+
+ assert(len > 0);
+ assert(len <= 8);
+
+ if ((total >= 0) && ((total - pos) < len))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((available - pos) < len)
+ return pos + len; // try again later
+
+ // get the EBML header size
+
+ result = ReadUInt(pReader, pos, len);
+
+ if (result < 0) // error
+ return result;
+
+ pos += len; // consume size field
+
+ // pos now designates start of payload
+
+ if ((total >= 0) && ((total - pos) < result))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((available - pos) < result)
+ return pos + result;
+
+ end = pos + result;
+
+ Init();
+
+ while (pos < end) {
+ long long id, size;
+
+ status = ParseElementHeader(pReader, pos, end, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0) // weird
+ return E_FILE_FORMAT_INVALID;
+
+ if (id == 0x0286) { // version
+ m_version = UnserializeUInt(pReader, pos, size);
+
+ if (m_version <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x02F7) { // read version
+ m_readVersion = UnserializeUInt(pReader, pos, size);
+
+ if (m_readVersion <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x02F2) { // max id length
+ m_maxIdLength = UnserializeUInt(pReader, pos, size);
+
+ if (m_maxIdLength <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x02F3) { // max size length
+ m_maxSizeLength = UnserializeUInt(pReader, pos, size);
+
+ if (m_maxSizeLength <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x0282) { // doctype
+ if (m_docType)
+ return E_FILE_FORMAT_INVALID;
+
+ status = UnserializeString(pReader, pos, size, m_docType);
+
+ if (status) // error
+ return status;
+ } else if (id == 0x0287) { // doctype version
+ m_docTypeVersion = UnserializeUInt(pReader, pos, size);
+
+ if (m_docTypeVersion <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x0285) { // doctype read version
+ m_docTypeReadVersion = UnserializeUInt(pReader, pos, size);
+
+ if (m_docTypeReadVersion <= 0)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ pos += size;
+ }
+
+ assert(pos == end);
+ return 0;
+}
+
+Segment::Segment(IMkvReader* pReader, long long elem_start,
+ // long long elem_size,
+ long long start, long long size)
+ : m_pReader(pReader),
+ m_element_start(elem_start),
+ // m_element_size(elem_size),
+ m_start(start),
+ m_size(size),
+ m_pos(start),
+ m_pUnknownSize(0),
+ m_pSeekHead(NULL),
+ m_pInfo(NULL),
+ m_pTracks(NULL),
+ m_pCues(NULL),
+ m_pChapters(NULL),
+ m_clusters(NULL),
+ m_clusterCount(0),
+ m_clusterPreloadCount(0),
+ m_clusterSize(0) {}
+
+Segment::~Segment() {
+ const long count = m_clusterCount + m_clusterPreloadCount;
+
+ Cluster** i = m_clusters;
+ Cluster** j = m_clusters + count;
+
+ while (i != j) {
+ Cluster* const p = *i++;
+ assert(p);
+
+ delete p;
+ }
+
+ delete[] m_clusters;
+
+ delete m_pTracks;
+ delete m_pInfo;
+ delete m_pCues;
+ delete m_pChapters;
+ delete m_pSeekHead;
+}
+
+long long Segment::CreateInstance(IMkvReader* pReader, long long pos,
+ Segment*& pSegment) {
+ assert(pReader);
+ assert(pos >= 0);
+
+ pSegment = NULL;
+
+ long long total, available;
+
+ const long status = pReader->Length(&total, &available);
+
+ if (status < 0) // error
+ return status;
+
+ if (available < 0)
+ return -1;
+
+ if ((total >= 0) && (available > total))
+ return -1;
+
+ // I would assume that in practice this loop would execute
+ // exactly once, but we allow for other elements (e.g. Void)
+ // to immediately follow the EBML header. This is fine for
+ // the source filter case (since the entire file is available),
+ // but in the splitter case over a network we should probably
+ // just give up early. We could for example decide only to
+ // execute this loop a maximum of, say, 10 times.
+ // TODO:
+ // There is an implied "give up early" by only parsing up
+ // to the available limit. We do do that, but only if the
+ // total file size is unknown. We could decide to always
+ // use what's available as our limit (irrespective of whether
+ // we happen to know the total file length). This would have
+ // as its sense "parse this much of the file before giving up",
+ // which a slightly different sense from "try to parse up to
+ // 10 EMBL elements before giving up".
+
+ for (;;) {
+ if ((total >= 0) && (pos >= total))
+ return E_FILE_FORMAT_INVALID;
+
+ // Read ID
long len;
long long result = GetUIntLength(pReader, pos, len);
- if (result < 0) //error
- return result;
+ if (result) // error, or too few available bytes
+ return result;
- if (result > 0) //need more data
- return result;
+ if ((total >= 0) && ((pos + len) > total))
+ return E_FILE_FORMAT_INVALID;
- assert(len > 0);
- assert(len <= 8);
+ if ((pos + len) > available)
+ return pos + len;
- if ((total >= 0) && ((total - pos) < len))
- return E_FILE_FORMAT_INVALID;
+ const long long idpos = pos;
+ const long long id = ReadUInt(pReader, pos, len);
- if ((available - pos) < len)
- return pos + len; //try again later
+ if (id < 0) // error
+ return id;
- //get the EBML header size
+ pos += len; // consume ID
- result = ReadUInt(pReader, pos, len);
+ // Read Size
- if (result < 0) //error
- return result;
+ result = GetUIntLength(pReader, pos, len);
- pos += len; //consume size field
+ if (result) // error, or too few available bytes
+ return result;
- //pos now designates start of payload
+ if ((total >= 0) && ((pos + len) > total))
+ return E_FILE_FORMAT_INVALID;
- if ((total >= 0) && ((total - pos) < result))
- return E_FILE_FORMAT_INVALID;
+ if ((pos + len) > available)
+ return pos + len;
- if ((available - pos) < result)
- return pos + result;
+ long long size = ReadUInt(pReader, pos, len);
- end = pos + result;
+ if (size < 0) // error
+ return size;
- Init();
+ pos += len; // consume length of size of element
- while (pos < end)
- {
- long long id, size;
+ // Pos now points to start of payload
- status = ParseElementHeader(
- pReader,
- pos,
- end,
- id,
- size);
+ // Handle "unknown size" for live streaming of webm files.
+ const long long unknown_size = (1LL << (7 * len)) - 1;
- if (status < 0) //error
- return status;
+ if (id == 0x08538067) { // Segment ID
+ if (size == unknown_size)
+ size = -1;
- if (size == 0) //weird
- return E_FILE_FORMAT_INVALID;
+ else if (total < 0)
+ size = -1;
- if (id == 0x0286) //version
- {
- m_version = UnserializeUInt(pReader, pos, size);
+ else if ((pos + size) > total)
+ size = -1;
- if (m_version <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x02F7) //read version
- {
- m_readVersion = UnserializeUInt(pReader, pos, size);
+ pSegment = new (std::nothrow) Segment(pReader, idpos,
+ // elem_size
+ pos, size);
- if (m_readVersion <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x02F2) //max id length
- {
- m_maxIdLength = UnserializeUInt(pReader, pos, size);
+ if (pSegment == 0)
+ return -1; // generic error
- if (m_maxIdLength <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x02F3) //max size length
- {
- m_maxSizeLength = UnserializeUInt(pReader, pos, size);
-
- if (m_maxSizeLength <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x0282) //doctype
- {
- if (m_docType)
- return E_FILE_FORMAT_INVALID;
-
- status = UnserializeString(pReader, pos, size, m_docType);
-
- if (status) //error
- return status;
- }
- else if (id == 0x0287) //doctype version
- {
- m_docTypeVersion = UnserializeUInt(pReader, pos, size);
-
- if (m_docTypeVersion <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x0285) //doctype read version
- {
- m_docTypeReadVersion = UnserializeUInt(pReader, pos, size);
-
- if (m_docTypeReadVersion <= 0)
- return E_FILE_FORMAT_INVALID;
- }
-
- pos += size;
+ return 0; // success
}
- assert(pos == end);
- return 0;
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && ((pos + size) > total))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + size) > available)
+ return pos + size;
+
+ pos += size; // consume payload
+ }
}
+long long Segment::ParseHeaders() {
+ // Outermost (level 0) segment object has been constructed,
+ // and pos designates start of payload. We need to find the
+ // inner (level 1) elements.
+ long long total, available;
-Segment::Segment(
- IMkvReader* pReader,
- long long elem_start,
- //long long elem_size,
- long long start,
- long long size) :
- m_pReader(pReader),
- m_element_start(elem_start),
- //m_element_size(elem_size),
- m_start(start),
- m_size(size),
- m_pos(start),
- m_pUnknownSize(0),
- m_pSeekHead(NULL),
- m_pInfo(NULL),
- m_pTracks(NULL),
- m_pCues(NULL),
- m_pChapters(NULL),
- m_clusters(NULL),
- m_clusterCount(0),
- m_clusterPreloadCount(0),
- m_clusterSize(0)
-{
-}
+ const int status = m_pReader->Length(&total, &available);
+ if (status < 0) // error
+ return status;
-Segment::~Segment()
-{
- const long count = m_clusterCount + m_clusterPreloadCount;
+ assert((total < 0) || (available <= total));
- Cluster** i = m_clusters;
- Cluster** j = m_clusters + count;
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+ assert((segment_stop < 0) || (total < 0) || (segment_stop <= total));
+ assert((segment_stop < 0) || (m_pos <= segment_stop));
- while (i != j)
- {
- Cluster* const p = *i++;
- assert(p);
+ for (;;) {
+ if ((total >= 0) && (m_pos >= total))
+ break;
- delete p;
- }
+ if ((segment_stop >= 0) && (m_pos >= segment_stop))
+ break;
- delete[] m_clusters;
+ long long pos = m_pos;
+ const long long element_start = pos;
- delete m_pTracks;
- delete m_pInfo;
- delete m_pCues;
- delete m_pChapters;
- delete m_pSeekHead;
-}
+ if ((pos + 1) > available)
+ return (pos + 1);
+ long len;
+ long long result = GetUIntLength(m_pReader, pos, len);
-long long Segment::CreateInstance(
- IMkvReader* pReader,
- long long pos,
- Segment*& pSegment)
-{
- assert(pReader);
- assert(pos >= 0);
+ if (result < 0) // error
+ return result;
- pSegment = NULL;
+ if (result > 0) // underflow (weird)
+ return (pos + 1);
- long long total, available;
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- const long status = pReader->Length(&total, &available);
+ if ((pos + len) > available)
+ return pos + len;
- if (status < 0) //error
- return status;
+ const long long idpos = pos;
+ const long long id = ReadUInt(m_pReader, idpos, len);
- if (available < 0)
+ if (id < 0) // error
+ return id;
+
+ if (id == 0x0F43B675) // Cluster ID
+ break;
+
+ pos += len; // consume ID
+
+ if ((pos + 1) > available)
+ return (pos + 1);
+
+ // Read Size
+ result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return result;
+
+ if (result > 0) // underflow (weird)
+ return (pos + 1);
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > available)
+ return pos + len;
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+
+ if (size < 0) // error
+ return size;
+
+ pos += len; // consume length of size of element
+
+ const long long element_size = size + pos - element_start;
+
+ // Pos now points to start of payload
+
+ if ((segment_stop >= 0) && ((pos + size) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ // We read EBML elements either in total or nothing at all.
+
+ if ((pos + size) > available)
+ return pos + size;
+
+ if (id == 0x0549A966) { // Segment Info ID
+ if (m_pInfo)
+ return E_FILE_FORMAT_INVALID;
+
+ m_pInfo = new (std::nothrow)
+ SegmentInfo(this, pos, size, element_start, element_size);
+
+ if (m_pInfo == NULL)
return -1;
- if ((total >= 0) && (available > total))
+ const long status = m_pInfo->Parse();
+
+ if (status)
+ return status;
+ } else if (id == 0x0654AE6B) { // Tracks ID
+ if (m_pTracks)
+ return E_FILE_FORMAT_INVALID;
+
+ m_pTracks = new (std::nothrow)
+ Tracks(this, pos, size, element_start, element_size);
+
+ if (m_pTracks == NULL)
return -1;
- //I would assume that in practice this loop would execute
- //exactly once, but we allow for other elements (e.g. Void)
- //to immediately follow the EBML header. This is fine for
- //the source filter case (since the entire file is available),
- //but in the splitter case over a network we should probably
- //just give up early. We could for example decide only to
- //execute this loop a maximum of, say, 10 times.
- //TODO:
- //There is an implied "give up early" by only parsing up
- //to the available limit. We do do that, but only if the
- //total file size is unknown. We could decide to always
- //use what's available as our limit (irrespective of whether
- //we happen to know the total file length). This would have
- //as its sense "parse this much of the file before giving up",
- //which a slightly different sense from "try to parse up to
- //10 EMBL elements before giving up".
+ const long status = m_pTracks->Parse();
- for (;;)
- {
- if ((total >= 0) && (pos >= total))
- return E_FILE_FORMAT_INVALID;
-
- //Read ID
- long len;
- long long result = GetUIntLength(pReader, pos, len);
-
- if (result) //error, or too few available bytes
- return result;
-
- if ((total >= 0) && ((pos + len) > total))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > available)
- return pos + len;
-
- const long long idpos = pos;
- const long long id = ReadUInt(pReader, pos, len);
-
- if (id < 0) //error
- return id;
-
- pos += len; //consume ID
-
- //Read Size
-
- result = GetUIntLength(pReader, pos, len);
-
- if (result) //error, or too few available bytes
- return result;
-
- if ((total >= 0) && ((pos + len) > total))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > available)
- return pos + len;
-
- long long size = ReadUInt(pReader, pos, len);
-
- if (size < 0) //error
- return size;
-
- pos += len; //consume length of size of element
-
- //Pos now points to start of payload
-
- //Handle "unknown size" for live streaming of webm files.
- const long long unknown_size = (1LL << (7 * len)) - 1;
-
- if (id == 0x08538067) //Segment ID
- {
- if (size == unknown_size)
- size = -1;
-
- else if (total < 0)
- size = -1;
-
- else if ((pos + size) > total)
- size = -1;
-
- pSegment = new (std::nothrow) Segment(
- pReader,
- idpos,
- //elem_size
- pos,
- size);
-
- if (pSegment == 0)
- return -1; //generic error
-
- return 0; //success
- }
-
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID;
-
- if ((total >= 0) && ((pos + size) > total))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + size) > available)
- return pos + size;
-
- pos += size; //consume payload
- }
-}
-
-
-long long Segment::ParseHeaders()
-{
- //Outermost (level 0) segment object has been constructed,
- //and pos designates start of payload. We need to find the
- //inner (level 1) elements.
- long long total, available;
-
- const int status = m_pReader->Length(&total, &available);
-
- if (status < 0) //error
+ if (status)
return status;
+ } else if (id == 0x0C53BB6B) { // Cues ID
+ if (m_pCues == NULL) {
+ m_pCues = new (std::nothrow)
+ Cues(this, pos, size, element_start, element_size);
- assert((total < 0) || (available <= total));
+ if (m_pCues == NULL)
+ return -1;
+ }
+ } else if (id == 0x014D9B74) { // SeekHead ID
+ if (m_pSeekHead == NULL) {
+ m_pSeekHead = new (std::nothrow)
+ SeekHead(this, pos, size, element_start, element_size);
- const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
- assert((segment_stop < 0) || (total < 0) || (segment_stop <= total));
- assert((segment_stop < 0) || (m_pos <= segment_stop));
+ if (m_pSeekHead == NULL)
+ return -1;
- for (;;)
- {
- if ((total >= 0) && (m_pos >= total))
- break;
+ const long status = m_pSeekHead->Parse();
- if ((segment_stop >= 0) && (m_pos >= segment_stop))
- break;
+ if (status)
+ return status;
+ }
+ } else if (id == 0x0043A770) { // Chapters ID
+ if (m_pChapters == NULL) {
+ m_pChapters = new (std::nothrow)
+ Chapters(this, pos, size, element_start, element_size);
- long long pos = m_pos;
- const long long element_start = pos;
+ if (m_pChapters == NULL)
+ return -1;
- if ((pos + 1) > available)
- return (pos + 1);
+ const long status = m_pChapters->Parse();
- long len;
- long long result = GetUIntLength(m_pReader, pos, len);
-
- if (result < 0) //error
- return result;
-
- if (result > 0) //underflow (weird)
- return (pos + 1);
-
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > available)
- return pos + len;
-
- const long long idpos = pos;
- const long long id = ReadUInt(m_pReader, idpos, len);
-
- if (id < 0) //error
- return id;
-
- if (id == 0x0F43B675) //Cluster ID
- break;
-
- pos += len; //consume ID
-
- if ((pos + 1) > available)
- return (pos + 1);
-
- //Read Size
- result = GetUIntLength(m_pReader, pos, len);
-
- if (result < 0) //error
- return result;
-
- if (result > 0) //underflow (weird)
- return (pos + 1);
-
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > available)
- return pos + len;
-
- const long long size = ReadUInt(m_pReader, pos, len);
-
- if (size < 0) //error
- return size;
-
- pos += len; //consume length of size of element
-
- const long long element_size = size + pos - element_start;
-
- //Pos now points to start of payload
-
- if ((segment_stop >= 0) && ((pos + size) > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- //We read EBML elements either in total or nothing at all.
-
- if ((pos + size) > available)
- return pos + size;
-
- if (id == 0x0549A966) //Segment Info ID
- {
- if (m_pInfo)
- return E_FILE_FORMAT_INVALID;
-
- m_pInfo = new (std::nothrow) SegmentInfo(
- this,
- pos,
- size,
- element_start,
- element_size);
-
- if (m_pInfo == NULL)
- return -1;
-
- const long status = m_pInfo->Parse();
-
- if (status)
- return status;
- }
- else if (id == 0x0654AE6B) //Tracks ID
- {
- if (m_pTracks)
- return E_FILE_FORMAT_INVALID;
-
- m_pTracks = new (std::nothrow) Tracks(this,
- pos,
- size,
- element_start,
- element_size);
-
- if (m_pTracks == NULL)
- return -1;
-
- const long status = m_pTracks->Parse();
-
- if (status)
- return status;
- }
- else if (id == 0x0C53BB6B) //Cues ID
- {
- if (m_pCues == NULL)
- {
- m_pCues = new (std::nothrow) Cues(
- this,
- pos,
- size,
- element_start,
- element_size);
-
- if (m_pCues == NULL)
- return -1;
- }
- }
- else if (id == 0x014D9B74) //SeekHead ID
- {
- if (m_pSeekHead == NULL)
- {
- m_pSeekHead = new (std::nothrow) SeekHead(
- this,
- pos,
- size,
- element_start,
- element_size);
-
- if (m_pSeekHead == NULL)
- return -1;
-
- const long status = m_pSeekHead->Parse();
-
- if (status)
- return status;
- }
- }
- else if (id == 0x0043A770) //Chapters ID
- {
- if (m_pChapters == NULL)
- {
- m_pChapters = new (std::nothrow) Chapters(
- this,
- pos,
- size,
- element_start,
- element_size);
-
- if (m_pChapters == NULL)
- return -1;
-
- const long status = m_pChapters->Parse();
-
- if (status)
- return status;
- }
- }
-
- m_pos = pos + size; //consume payload
+ if (status)
+ return status;
+ }
}
- assert((segment_stop < 0) || (m_pos <= segment_stop));
+ m_pos = pos + size; // consume payload
+ }
- if (m_pInfo == NULL) //TODO: liberalize this behavior
- return E_FILE_FORMAT_INVALID;
+ assert((segment_stop < 0) || (m_pos <= segment_stop));
- if (m_pTracks == NULL)
- return E_FILE_FORMAT_INVALID;
+ if (m_pInfo == NULL) // TODO: liberalize this behavior
+ return E_FILE_FORMAT_INVALID;
- return 0; //success
+ if (m_pTracks == NULL)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0; // success
}
+long Segment::LoadCluster(long long& pos, long& len) {
+ for (;;) {
+ const long result = DoLoadCluster(pos, len);
-long Segment::LoadCluster(
- long long& pos,
- long& len)
-{
- for (;;)
- {
- const long result = DoLoadCluster(pos, len);
+ if (result <= 1)
+ return result;
+ }
+}
- if (result <= 1)
- return result;
+long Segment::DoLoadCluster(long long& pos, long& len) {
+ if (m_pos < 0)
+ return DoLoadClusterUnknownSize(pos, len);
+
+ long long total, avail;
+
+ long status = m_pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+
+ long long cluster_off = -1; // offset relative to start of segment
+ long long cluster_size = -1; // size of cluster payload
+
+ for (;;) {
+ if ((total >= 0) && (m_pos >= total))
+ return 1; // no more clusters
+
+ if ((segment_stop >= 0) && (m_pos >= segment_stop))
+ return 1; // no more clusters
+
+ pos = m_pos;
+
+ // Read ID
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
}
-}
+ long long result = GetUIntLength(m_pReader, pos, len);
-long Segment::DoLoadCluster(
- long long& pos,
- long& len)
-{
- if (m_pos < 0)
- return DoLoadClusterUnknownSize(pos, len);
+ if (result < 0) // error
+ return static_cast<long>(result);
- long long total, avail;
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- long status = m_pReader->Length(&total, &avail);
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- if (status < 0) //error
- return status;
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- assert((total < 0) || (avail <= total));
+ const long long idpos = pos;
+ const long long id = ReadUInt(m_pReader, idpos, len);
- const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+ if (id < 0) // error (or underflow)
+ return static_cast<long>(id);
- long long cluster_off = -1; //offset relative to start of segment
- long long cluster_size = -1; //size of cluster payload
+ pos += len; // consume ID
- for (;;)
- {
- if ((total >= 0) && (m_pos >= total))
- return 1; //no more clusters
+ // Read Size
- if ((segment_stop >= 0) && (m_pos >= segment_stop))
- return 1; //no more clusters
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- pos = m_pos;
+ result = GetUIntLength(m_pReader, pos, len);
- //Read ID
+ if (result < 0) // error
+ return static_cast<long>(result);
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- long long result = GetUIntLength(m_pReader, pos, len);
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- if (result < 0) //error
- return static_cast<long>(result);
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
+ const long long size = ReadUInt(m_pReader, pos, len);
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
+ if (size < 0) // error
+ return static_cast<long>(size);
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
+ pos += len; // consume length of size of element
- const long long idpos = pos;
- const long long id = ReadUInt(m_pReader, idpos, len);
+ // pos now points to start of payload
- if (id < 0) //error (or underflow)
- return static_cast<long>(id);
+ if (size == 0) { // weird
+ m_pos = pos;
+ continue;
+ }
- pos += len; //consume ID
+ const long long unknown_size = (1LL << (7 * len)) - 1;
- //Read Size
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- result = GetUIntLength(m_pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long size = ReadUInt(m_pReader, pos, len);
-
- if (size < 0) //error
- return static_cast<long>(size);
-
- pos += len; //consume length of size of element
-
- //pos now points to start of payload
-
- if (size == 0) //weird
- {
- m_pos = pos;
- continue;
- }
-
- const long long unknown_size = (1LL << (7 * len)) - 1;
-
-#if 0 //we must handle this to support live webm
+#if 0 // we must handle this to support live webm
if (size == unknown_size)
return E_FILE_FORMAT_INVALID; //TODO: allow this
#endif
- if ((segment_stop >= 0) &&
- (size != unknown_size) &&
- ((pos + size) > segment_stop))
- {
- return E_FILE_FORMAT_INVALID;
- }
+ if ((segment_stop >= 0) && (size != unknown_size) &&
+ ((pos + size) > segment_stop)) {
+ return E_FILE_FORMAT_INVALID;
+ }
-#if 0 //commented-out, to support incremental cluster parsing
+#if 0 // commented-out, to support incremental cluster parsing
len = static_cast<long>(size);
if ((pos + size) > avail)
return E_BUFFER_NOT_FULL;
#endif
- if (id == 0x0C53BB6B) //Cues ID
- {
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID; //TODO: liberalize
+ if (id == 0x0C53BB6B) { // Cues ID
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID; // TODO: liberalize
- if (m_pCues == NULL)
- {
- const long long element_size = (pos - idpos) + size;
+ if (m_pCues == NULL) {
+ const long long element_size = (pos - idpos) + size;
- m_pCues = new Cues(this,
- pos,
- size,
- idpos,
- element_size);
- assert(m_pCues); //TODO
- }
+ m_pCues = new Cues(this, pos, size, idpos, element_size);
+ assert(m_pCues); // TODO
+ }
- m_pos = pos + size; //consume payload
- continue;
- }
-
- if (id != 0x0F43B675) //Cluster ID
- {
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID; //TODO: liberalize
-
- m_pos = pos + size; //consume payload
- continue;
- }
-
- //We have a cluster.
-
- cluster_off = idpos - m_start; //relative pos
-
- if (size != unknown_size)
- cluster_size = size;
-
- break;
+ m_pos = pos + size; // consume payload
+ continue;
}
- assert(cluster_off >= 0); //have cluster
+ if (id != 0x0F43B675) { // Cluster ID
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID; // TODO: liberalize
- long long pos_;
- long len_;
-
- status = Cluster::HasBlockEntries(this, cluster_off, pos_, len_);
-
- if (status < 0) //error, or underflow
- {
- pos = pos_;
- len = len_;
-
- return status;
+ m_pos = pos + size; // consume payload
+ continue;
}
- //status == 0 means "no block entries found"
- //status > 0 means "found at least one block entry"
+ // We have a cluster.
- //TODO:
- //The issue here is that the segment increments its own
- //pos ptr past the most recent cluster parsed, and then
- //starts from there to parse the next cluster. If we
- //don't know the size of the current cluster, then we
- //must either parse its payload (as we do below), looking
- //for the cluster (or cues) ID to terminate the parse.
- //This isn't really what we want: rather, we really need
- //a way to create the curr cluster object immediately.
- //The pity is that cluster::parse can determine its own
- //boundary, and we largely duplicate that same logic here.
- //
- //Maybe we need to get rid of our look-ahead preloading
- //in source::parse???
- //
- //As we're parsing the blocks in the curr cluster
- //(in cluster::parse), we should have some way to signal
- //to the segment that we have determined the boundary,
- //so it can adjust its own segment::m_pos member.
- //
- //The problem is that we're asserting in asyncreadinit,
- //because we adjust the pos down to the curr seek pos,
- //and the resulting adjusted len is > 2GB. I'm suspicious
- //that this is even correct, but even if it is, we can't
- //be loading that much data in the cache anyway.
+ cluster_off = idpos - m_start; // relative pos
- const long idx = m_clusterCount;
+ if (size != unknown_size)
+ cluster_size = size;
- if (m_clusterPreloadCount > 0)
- {
- assert(idx < m_clusterSize);
+ break;
+ }
- Cluster* const pCluster = m_clusters[idx];
- assert(pCluster);
- assert(pCluster->m_index < 0);
+ assert(cluster_off >= 0); // have cluster
- const long long off = pCluster->GetPosition();
- assert(off >= 0);
+ long long pos_;
+ long len_;
- if (off == cluster_off) //preloaded already
- {
- if (status == 0) //no entries found
- return E_FILE_FORMAT_INVALID;
+ status = Cluster::HasBlockEntries(this, cluster_off, pos_, len_);
- if (cluster_size >= 0)
- pos += cluster_size;
- else
- {
- const long long element_size = pCluster->GetElementSize();
+ if (status < 0) { // error, or underflow
+ pos = pos_;
+ len = len_;
- if (element_size <= 0)
- return E_FILE_FORMAT_INVALID; //TODO: handle this case
+ return status;
+ }
- pos = pCluster->m_element_start + element_size;
- }
+ // status == 0 means "no block entries found"
+ // status > 0 means "found at least one block entry"
- pCluster->m_index = idx; //move from preloaded to loaded
- ++m_clusterCount;
- --m_clusterPreloadCount;
+ // TODO:
+ // The issue here is that the segment increments its own
+ // pos ptr past the most recent cluster parsed, and then
+ // starts from there to parse the next cluster. If we
+ // don't know the size of the current cluster, then we
+ // must either parse its payload (as we do below), looking
+ // for the cluster (or cues) ID to terminate the parse.
+ // This isn't really what we want: rather, we really need
+ // a way to create the curr cluster object immediately.
+ // The pity is that cluster::parse can determine its own
+ // boundary, and we largely duplicate that same logic here.
+ //
+ // Maybe we need to get rid of our look-ahead preloading
+ // in source::parse???
+ //
+ // As we're parsing the blocks in the curr cluster
+ //(in cluster::parse), we should have some way to signal
+ // to the segment that we have determined the boundary,
+ // so it can adjust its own segment::m_pos member.
+ //
+ // The problem is that we're asserting in asyncreadinit,
+ // because we adjust the pos down to the curr seek pos,
+ // and the resulting adjusted len is > 2GB. I'm suspicious
+ // that this is even correct, but even if it is, we can't
+ // be loading that much data in the cache anyway.
- m_pos = pos; //consume payload
- assert((segment_stop < 0) || (m_pos <= segment_stop));
+ const long idx = m_clusterCount;
- return 0; //success
- }
- }
-
- if (status == 0) //no entries found
- {
- if (cluster_size < 0)
- return E_FILE_FORMAT_INVALID; //TODO: handle this
-
- pos += cluster_size;
-
- if ((total >= 0) && (pos >= total))
- {
- m_pos = total;
- return 1; //no more clusters
- }
-
- if ((segment_stop >= 0) && (pos >= segment_stop))
- {
- m_pos = segment_stop;
- return 1; //no more clusters
- }
-
- m_pos = pos;
- return 2; //try again
- }
-
- //status > 0 means we have an entry
-
- Cluster* const pCluster = Cluster::Create(this,
- idx,
- cluster_off);
- //element_size);
- assert(pCluster);
-
- AppendCluster(pCluster);
- assert(m_clusters);
+ if (m_clusterPreloadCount > 0) {
assert(idx < m_clusterSize);
- assert(m_clusters[idx] == pCluster);
- if (cluster_size >= 0)
- {
+ Cluster* const pCluster = m_clusters[idx];
+ assert(pCluster);
+ assert(pCluster->m_index < 0);
+
+ const long long off = pCluster->GetPosition();
+ assert(off >= 0);
+
+ if (off == cluster_off) { // preloaded already
+ if (status == 0) // no entries found
+ return E_FILE_FORMAT_INVALID;
+
+ if (cluster_size >= 0)
pos += cluster_size;
+ else {
+ const long long element_size = pCluster->GetElementSize();
- m_pos = pos;
- assert((segment_stop < 0) || (m_pos <= segment_stop));
+ if (element_size <= 0)
+ return E_FILE_FORMAT_INVALID; // TODO: handle this case
- return 0;
+ pos = pCluster->m_element_start + element_size;
+ }
+
+ pCluster->m_index = idx; // move from preloaded to loaded
+ ++m_clusterCount;
+ --m_clusterPreloadCount;
+
+ m_pos = pos; // consume payload
+ assert((segment_stop < 0) || (m_pos <= segment_stop));
+
+ return 0; // success
+ }
+ }
+
+ if (status == 0) { // no entries found
+ if (cluster_size < 0)
+ return E_FILE_FORMAT_INVALID; // TODO: handle this
+
+ pos += cluster_size;
+
+ if ((total >= 0) && (pos >= total)) {
+ m_pos = total;
+ return 1; // no more clusters
}
- m_pUnknownSize = pCluster;
- m_pos = -pos;
+ if ((segment_stop >= 0) && (pos >= segment_stop)) {
+ m_pos = segment_stop;
+ return 1; // no more clusters
+ }
- return 0; //partial success, since we have a new cluster
+ m_pos = pos;
+ return 2; // try again
+ }
- //status == 0 means "no block entries found"
+ // status > 0 means we have an entry
- //pos designates start of payload
- //m_pos has NOT been adjusted yet (in case we need to come back here)
+ Cluster* const pCluster = Cluster::Create(this, idx, cluster_off);
+ // element_size);
+ assert(pCluster);
+
+ AppendCluster(pCluster);
+ assert(m_clusters);
+ assert(idx < m_clusterSize);
+ assert(m_clusters[idx] == pCluster);
+
+ if (cluster_size >= 0) {
+ pos += cluster_size;
+
+ m_pos = pos;
+ assert((segment_stop < 0) || (m_pos <= segment_stop));
+
+ return 0;
+ }
+
+ m_pUnknownSize = pCluster;
+ m_pos = -pos;
+
+ return 0; // partial success, since we have a new cluster
+
+// status == 0 means "no block entries found"
+
+// pos designates start of payload
+// m_pos has NOT been adjusted yet (in case we need to come back here)
#if 0
- if (cluster_size < 0) //unknown size
- {
+ if (cluster_size < 0) { //unknown size
const long long payload_pos = pos; //absolute pos of cluster payload
- for (;;) //determine cluster size
- {
+ for (;;) { //determine cluster size
if ((total >= 0) && (pos >= total))
break;
@@ -1518,16 +1346,11 @@
return 2; //try to find another cluster
#endif
-
}
-
-long Segment::DoLoadClusterUnknownSize(
- long long& pos,
- long& len)
-{
- assert(m_pos < 0);
- assert(m_pUnknownSize);
+long Segment::DoLoadClusterUnknownSize(long long& pos, long& len) {
+ assert(m_pos < 0);
+ assert(m_pUnknownSize);
#if 0
assert(m_pUnknownSize->GetElementSize() < 0); //TODO: verify this
@@ -1554,8 +1377,7 @@
long long element_size = -1;
- for (;;) //determine cluster size
- {
+ for (;;) { //determine cluster size
if ((total >= 0) && (pos >= total))
{
element_size = total - element_start;
@@ -1604,8 +1426,7 @@
//that we have exhausted the sub-element's inside the cluster
//whose ID we parsed earlier.
- if ((id == 0x0F43B675) || (id == 0x0C53BB6B)) //Cluster ID or Cues ID
- {
+ if ((id == 0x0F43B675) || (id == 0x0C53BB6B)) { //Cluster ID or Cues ID
element_size = pos - element_start;
assert(element_size > 0);
@@ -1682,348 +1503,299 @@
return 2; //continue parsing
#else
- const long status = m_pUnknownSize->Parse(pos, len);
+ const long status = m_pUnknownSize->Parse(pos, len);
- if (status < 0) //error or underflow
- return status;
+ if (status < 0) // error or underflow
+ return status;
- if (status == 0) //parsed a block
- return 2; //continue parsing
+ if (status == 0) // parsed a block
+ return 2; // continue parsing
- assert(status > 0); //nothing left to parse of this cluster
+ assert(status > 0); // nothing left to parse of this cluster
- const long long start = m_pUnknownSize->m_element_start;
+ const long long start = m_pUnknownSize->m_element_start;
- const long long size = m_pUnknownSize->GetElementSize();
- assert(size >= 0);
+ const long long size = m_pUnknownSize->GetElementSize();
+ assert(size >= 0);
- pos = start + size;
- m_pos = pos;
+ pos = start + size;
+ m_pos = pos;
- m_pUnknownSize = 0;
+ m_pUnknownSize = 0;
- return 2; //continue parsing
+ return 2; // continue parsing
#endif
}
+void Segment::AppendCluster(Cluster* pCluster) {
+ assert(pCluster);
+ assert(pCluster->m_index >= 0);
-void Segment::AppendCluster(Cluster* pCluster)
-{
- assert(pCluster);
- assert(pCluster->m_index >= 0);
+ const long count = m_clusterCount + m_clusterPreloadCount;
- const long count = m_clusterCount + m_clusterPreloadCount;
+ long& size = m_clusterSize;
+ assert(size >= count);
- long& size = m_clusterSize;
- assert(size >= count);
+ const long idx = pCluster->m_index;
+ assert(idx == m_clusterCount);
- const long idx = pCluster->m_index;
- assert(idx == m_clusterCount);
+ if (count >= size) {
+ const long n = (size <= 0) ? 2048 : 2 * size;
- if (count >= size)
- {
- const long n = (size <= 0) ? 2048 : 2*size;
+ Cluster** const qq = new Cluster* [n];
+ Cluster** q = qq;
- Cluster** const qq = new Cluster*[n];
- Cluster** q = qq;
+ Cluster** p = m_clusters;
+ Cluster** const pp = p + count;
- Cluster** p = m_clusters;
- Cluster** const pp = p + count;
+ while (p != pp)
+ *q++ = *p++;
- while (p != pp)
- *q++ = *p++;
+ delete[] m_clusters;
- delete[] m_clusters;
+ m_clusters = qq;
+ size = n;
+ }
- m_clusters = qq;
- size = n;
- }
-
- if (m_clusterPreloadCount > 0)
- {
- assert(m_clusters);
-
- Cluster** const p = m_clusters + m_clusterCount;
- assert(*p);
- assert((*p)->m_index < 0);
-
- Cluster** q = p + m_clusterPreloadCount;
- assert(q < (m_clusters + size));
-
- for (;;)
- {
- Cluster** const qq = q - 1;
- assert((*qq)->m_index < 0);
-
- *q = *qq;
- q = qq;
-
- if (q == p)
- break;
- }
- }
-
- m_clusters[idx] = pCluster;
- ++m_clusterCount;
-}
-
-
-void Segment::PreloadCluster(Cluster* pCluster, ptrdiff_t idx)
-{
- assert(pCluster);
- assert(pCluster->m_index < 0);
- assert(idx >= m_clusterCount);
-
- const long count = m_clusterCount + m_clusterPreloadCount;
-
- long& size = m_clusterSize;
- assert(size >= count);
-
- if (count >= size)
- {
- const long n = (size <= 0) ? 2048 : 2*size;
-
- Cluster** const qq = new Cluster*[n];
- Cluster** q = qq;
-
- Cluster** p = m_clusters;
- Cluster** const pp = p + count;
-
- while (p != pp)
- *q++ = *p++;
-
- delete[] m_clusters;
-
- m_clusters = qq;
- size = n;
- }
-
+ if (m_clusterPreloadCount > 0) {
assert(m_clusters);
- Cluster** const p = m_clusters + idx;
+ Cluster** const p = m_clusters + m_clusterCount;
+ assert(*p);
+ assert((*p)->m_index < 0);
- Cluster** q = m_clusters + count;
- assert(q >= p);
+ Cluster** q = p + m_clusterPreloadCount;
assert(q < (m_clusters + size));
- while (q > p)
- {
- Cluster** const qq = q - 1;
- assert((*qq)->m_index < 0);
+ for (;;) {
+ Cluster** const qq = q - 1;
+ assert((*qq)->m_index < 0);
- *q = *qq;
- q = qq;
+ *q = *qq;
+ q = qq;
+
+ if (q == p)
+ break;
}
+ }
- m_clusters[idx] = pCluster;
- ++m_clusterPreloadCount;
+ m_clusters[idx] = pCluster;
+ ++m_clusterCount;
}
+void Segment::PreloadCluster(Cluster* pCluster, ptrdiff_t idx) {
+ assert(pCluster);
+ assert(pCluster->m_index < 0);
+ assert(idx >= m_clusterCount);
-long Segment::Load()
-{
- assert(m_clusters == NULL);
- assert(m_clusterSize == 0);
- assert(m_clusterCount == 0);
- //assert(m_size >= 0);
+ const long count = m_clusterCount + m_clusterPreloadCount;
- //Outermost (level 0) segment object has been constructed,
- //and pos designates start of payload. We need to find the
- //inner (level 1) elements.
+ long& size = m_clusterSize;
+ assert(size >= count);
- const long long header_status = ParseHeaders();
+ if (count >= size) {
+ const long n = (size <= 0) ? 2048 : 2 * size;
- if (header_status < 0) //error
- return static_cast<long>(header_status);
+ Cluster** const qq = new Cluster* [n];
+ Cluster** q = qq;
- if (header_status > 0) //underflow
- return E_BUFFER_NOT_FULL;
+ Cluster** p = m_clusters;
+ Cluster** const pp = p + count;
- assert(m_pInfo);
- assert(m_pTracks);
+ while (p != pp)
+ *q++ = *p++;
- for (;;)
- {
- const int status = LoadCluster();
+ delete[] m_clusters;
- if (status < 0) //error
- return status;
+ m_clusters = qq;
+ size = n;
+ }
- if (status >= 1) //no more clusters
- return 0;
- }
+ assert(m_clusters);
+
+ Cluster** const p = m_clusters + idx;
+
+ Cluster** q = m_clusters + count;
+ assert(q >= p);
+ assert(q < (m_clusters + size));
+
+ while (q > p) {
+ Cluster** const qq = q - 1;
+ assert((*qq)->m_index < 0);
+
+ *q = *qq;
+ q = qq;
+ }
+
+ m_clusters[idx] = pCluster;
+ ++m_clusterPreloadCount;
}
+long Segment::Load() {
+ assert(m_clusters == NULL);
+ assert(m_clusterSize == 0);
+ assert(m_clusterCount == 0);
+ // assert(m_size >= 0);
-SeekHead::SeekHead(
- Segment* pSegment,
- long long start,
- long long size_,
- long long element_start,
- long long element_size) :
- m_pSegment(pSegment),
- m_start(start),
- m_size(size_),
- m_element_start(element_start),
- m_element_size(element_size),
- m_entries(0),
- m_entry_count(0),
- m_void_elements(0),
- m_void_element_count(0)
-{
+ // Outermost (level 0) segment object has been constructed,
+ // and pos designates start of payload. We need to find the
+ // inner (level 1) elements.
+
+ const long long header_status = ParseHeaders();
+
+ if (header_status < 0) // error
+ return static_cast<long>(header_status);
+
+ if (header_status > 0) // underflow
+ return E_BUFFER_NOT_FULL;
+
+ assert(m_pInfo);
+ assert(m_pTracks);
+
+ for (;;) {
+ const int status = LoadCluster();
+
+ if (status < 0) // error
+ return status;
+
+ if (status >= 1) // no more clusters
+ return 0;
+ }
}
+SeekHead::SeekHead(Segment* pSegment, long long start, long long size_,
+ long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_start(start),
+ m_size(size_),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_entries(0),
+ m_entry_count(0),
+ m_void_elements(0),
+ m_void_element_count(0) {}
-SeekHead::~SeekHead()
-{
- delete[] m_entries;
- delete[] m_void_elements;
+SeekHead::~SeekHead() {
+ delete[] m_entries;
+ delete[] m_void_elements;
}
+long SeekHead::Parse() {
+ IMkvReader* const pReader = m_pSegment->m_pReader;
-long SeekHead::Parse()
-{
- IMkvReader* const pReader = m_pSegment->m_pReader;
+ long long pos = m_start;
+ const long long stop = m_start + m_size;
- long long pos = m_start;
- const long long stop = m_start + m_size;
+ // first count the seek head entries
- //first count the seek head entries
+ int entry_count = 0;
+ int void_element_count = 0;
- int entry_count = 0;
- int void_element_count = 0;
+ while (pos < stop) {
+ long long id, size;
- while (pos < stop)
- {
- long long id, size;
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
- const long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
+ if (status < 0) // error
+ return status;
- if (status < 0) //error
- return status;
+ if (id == 0x0DBB) // SeekEntry ID
+ ++entry_count;
+ else if (id == 0x6C) // Void ID
+ ++void_element_count;
- if (id == 0x0DBB) //SeekEntry ID
- ++entry_count;
- else if (id == 0x6C) //Void ID
- ++void_element_count;
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
- pos += size; //consume payload
- assert(pos <= stop);
+ assert(pos == stop);
+
+ m_entries = new (std::nothrow) Entry[entry_count];
+
+ if (m_entries == NULL)
+ return -1;
+
+ m_void_elements = new (std::nothrow) VoidElement[void_element_count];
+
+ if (m_void_elements == NULL)
+ return -1;
+
+ // now parse the entries and void elements
+
+ Entry* pEntry = m_entries;
+ VoidElement* pVoidElement = m_void_elements;
+
+ pos = m_start;
+
+ while (pos < stop) {
+ const long long idpos = pos;
+
+ long long id, size;
+
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (id == 0x0DBB) { // SeekEntry ID
+ if (ParseEntry(pReader, pos, size, pEntry)) {
+ Entry& e = *pEntry++;
+
+ e.element_start = idpos;
+ e.element_size = (pos + size) - idpos;
+ }
+ } else if (id == 0x6C) { // Void ID
+ VoidElement& e = *pVoidElement++;
+
+ e.element_start = idpos;
+ e.element_size = (pos + size) - idpos;
}
- assert(pos == stop);
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
- m_entries = new (std::nothrow) Entry[entry_count];
+ assert(pos == stop);
- if (m_entries == NULL)
- return -1;
+ ptrdiff_t count_ = ptrdiff_t(pEntry - m_entries);
+ assert(count_ >= 0);
+ assert(count_ <= entry_count);
- m_void_elements = new (std::nothrow) VoidElement[void_element_count];
+ m_entry_count = static_cast<int>(count_);
- if (m_void_elements == NULL)
- return -1;
+ count_ = ptrdiff_t(pVoidElement - m_void_elements);
+ assert(count_ >= 0);
+ assert(count_ <= void_element_count);
- //now parse the entries and void elements
+ m_void_element_count = static_cast<int>(count_);
- Entry* pEntry = m_entries;
- VoidElement* pVoidElement = m_void_elements;
+ return 0;
+}
- pos = m_start;
+int SeekHead::GetCount() const { return m_entry_count; }
- while (pos < stop)
- {
- const long long idpos = pos;
-
- long long id, size;
-
- const long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
-
- if (status < 0) //error
- return status;
-
- if (id == 0x0DBB) //SeekEntry ID
- {
- if (ParseEntry(pReader, pos, size, pEntry))
- {
- Entry& e = *pEntry++;
-
- e.element_start = idpos;
- e.element_size = (pos + size) - idpos;
- }
- }
- else if (id == 0x6C) //Void ID
- {
- VoidElement& e = *pVoidElement++;
-
- e.element_start = idpos;
- e.element_size = (pos + size) - idpos;
- }
-
- pos += size; //consume payload
- assert(pos <= stop);
- }
-
- assert(pos == stop);
-
- ptrdiff_t count_ = ptrdiff_t(pEntry - m_entries);
- assert(count_ >= 0);
- assert(count_ <= entry_count);
-
- m_entry_count = static_cast<int>(count_);
-
- count_ = ptrdiff_t(pVoidElement - m_void_elements);
- assert(count_ >= 0);
- assert(count_ <= void_element_count);
-
- m_void_element_count = static_cast<int>(count_);
-
+const SeekHead::Entry* SeekHead::GetEntry(int idx) const {
+ if (idx < 0)
return 0;
+
+ if (idx >= m_entry_count)
+ return 0;
+
+ return m_entries + idx;
}
+int SeekHead::GetVoidElementCount() const { return m_void_element_count; }
-int SeekHead::GetCount() const
-{
- return m_entry_count;
+const SeekHead::VoidElement* SeekHead::GetVoidElement(int idx) const {
+ if (idx < 0)
+ return 0;
+
+ if (idx >= m_void_element_count)
+ return 0;
+
+ return m_void_elements + idx;
}
-const SeekHead::Entry* SeekHead::GetEntry(int idx) const
-{
- if (idx < 0)
- return 0;
-
- if (idx >= m_entry_count)
- return 0;
-
- return m_entries + idx;
-}
-
-int SeekHead::GetVoidElementCount() const
-{
- return m_void_element_count;
-}
-
-const SeekHead::VoidElement* SeekHead::GetVoidElement(int idx) const
-{
- if (idx < 0)
- return 0;
-
- if (idx >= m_void_element_count)
- return 0;
-
- return m_void_elements + idx;
-}
-
-
#if 0
void Segment::ParseCues(long long off)
{
@@ -2073,133 +1845,122 @@
//os << "Segment::ParseCues (end)" << endl;
}
#else
-long Segment::ParseCues(
- long long off,
- long long& pos,
- long& len)
-{
- if (m_pCues)
- return 0; //success
+long Segment::ParseCues(long long off, long long& pos, long& len) {
+ if (m_pCues)
+ return 0; // success
- if (off < 0)
- return -1;
+ if (off < 0)
+ return -1;
- long long total, avail;
+ long long total, avail;
- const int status = m_pReader->Length(&total, &avail);
+ const int status = m_pReader->Length(&total, &avail);
- if (status < 0) //error
- return status;
+ if (status < 0) // error
+ return status;
- assert((total < 0) || (avail <= total));
+ assert((total < 0) || (avail <= total));
- pos = m_start + off;
+ pos = m_start + off;
- if ((total < 0) || (pos >= total))
- return 1; //don't bother parsing cues
+ if ((total < 0) || (pos >= total))
+ return 1; // don't bother parsing cues
- const long long element_start = pos;
- const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+ const long long element_start = pos;
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- long long result = GetUIntLength(m_pReader, pos, len);
+ long long result = GetUIntLength(m_pReader, pos, len);
- if (result < 0) //error
- return static_cast<long>(result);
+ if (result < 0) // error
+ return static_cast<long>(result);
- if (result > 0) //underflow (weird)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if (result > 0) // underflow (weird)
+ {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- const long long idpos = pos;
+ const long long idpos = pos;
- const long long id = ReadUInt(m_pReader, idpos, len);
+ const long long id = ReadUInt(m_pReader, idpos, len);
- if (id != 0x0C53BB6B) //Cues ID
- return E_FILE_FORMAT_INVALID;
+ if (id != 0x0C53BB6B) // Cues ID
+ return E_FILE_FORMAT_INVALID;
- pos += len; //consume ID
- assert((segment_stop < 0) || (pos <= segment_stop));
+ pos += len; // consume ID
+ assert((segment_stop < 0) || (pos <= segment_stop));
- //Read Size
+ // Read Size
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- result = GetUIntLength(m_pReader, pos, len);
+ result = GetUIntLength(m_pReader, pos, len);
- if (result < 0) //error
- return static_cast<long>(result);
+ if (result < 0) // error
+ return static_cast<long>(result);
- if (result > 0) //underflow (weird)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if (result > 0) // underflow (weird)
+ {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- const long long size = ReadUInt(m_pReader, pos, len);
+ const long long size = ReadUInt(m_pReader, pos, len);
- if (size < 0) //error
- return static_cast<long>(size);
+ if (size < 0) // error
+ return static_cast<long>(size);
- if (size == 0) //weird, although technically not illegal
- return 1; //done
+ if (size == 0) // weird, although technically not illegal
+ return 1; // done
- pos += len; //consume length of size of element
- assert((segment_stop < 0) || (pos <= segment_stop));
+ pos += len; // consume length of size of element
+ assert((segment_stop < 0) || (pos <= segment_stop));
- //Pos now points to start of payload
+ // Pos now points to start of payload
- const long long element_stop = pos + size;
+ const long long element_stop = pos + size;
- if ((segment_stop >= 0) && (element_stop > segment_stop))
- return E_FILE_FORMAT_INVALID;
+ if ((segment_stop >= 0) && (element_stop > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- if ((total >= 0) && (element_stop > total))
- return 1; //don't bother parsing anymore
+ if ((total >= 0) && (element_stop > total))
+ return 1; // don't bother parsing anymore
- len = static_cast<long>(size);
+ len = static_cast<long>(size);
- if (element_stop > avail)
- return E_BUFFER_NOT_FULL;
+ if (element_stop > avail)
+ return E_BUFFER_NOT_FULL;
- const long long element_size = element_stop - element_start;
+ const long long element_size = element_stop - element_start;
- m_pCues = new (std::nothrow) Cues(
- this,
- pos,
- size,
- element_start,
- element_size);
- assert(m_pCues); //TODO
+ m_pCues =
+ new (std::nothrow) Cues(this, pos, size, element_start, element_size);
+ assert(m_pCues); // TODO
- return 0; //success
+ return 0; // success
}
#endif
-
#if 0
void Segment::ParseSeekEntry(
long long start,
@@ -2259,304 +2020,269 @@
ParseCues(seekOff);
}
#else
-bool SeekHead::ParseEntry(
- IMkvReader* pReader,
- long long start,
- long long size_,
- Entry* pEntry)
-{
- if (size_ <= 0)
- return false;
+bool SeekHead::ParseEntry(IMkvReader* pReader, long long start, long long size_,
+ Entry* pEntry) {
+ if (size_ <= 0)
+ return false;
- long long pos = start;
- const long long stop = start + size_;
+ long long pos = start;
+ const long long stop = start + size_;
- long len;
+ long len;
- //parse the container for the level-1 element ID
+ // parse the container for the level-1 element ID
- const long long seekIdId = ReadUInt(pReader, pos, len);
- //seekIdId;
+ const long long seekIdId = ReadUInt(pReader, pos, len);
+ // seekIdId;
- if (seekIdId != 0x13AB) //SeekID ID
- return false;
+ if (seekIdId != 0x13AB) // SeekID ID
+ return false;
- if ((pos + len) > stop)
- return false;
+ if ((pos + len) > stop)
+ return false;
- pos += len; //consume SeekID id
+ pos += len; // consume SeekID id
- const long long seekIdSize = ReadUInt(pReader, pos, len);
+ const long long seekIdSize = ReadUInt(pReader, pos, len);
- if (seekIdSize <= 0)
- return false;
+ if (seekIdSize <= 0)
+ return false;
- if ((pos + len) > stop)
- return false;
+ if ((pos + len) > stop)
+ return false;
- pos += len; //consume size of field
+ pos += len; // consume size of field
- if ((pos + seekIdSize) > stop)
- return false;
+ if ((pos + seekIdSize) > stop)
+ return false;
- //Note that the SeekId payload really is serialized
- //as a "Matroska integer", not as a plain binary value.
- //In fact, Matroska requires that ID values in the
- //stream exactly match the binary representation as listed
- //in the Matroska specification.
- //
- //This parser is more liberal, and permits IDs to have
- //any width. (This could make the representation in the stream
- //different from what's in the spec, but it doesn't matter here,
- //since we always normalize "Matroska integer" values.)
+ // Note that the SeekId payload really is serialized
+ // as a "Matroska integer", not as a plain binary value.
+ // In fact, Matroska requires that ID values in the
+ // stream exactly match the binary representation as listed
+ // in the Matroska specification.
+ //
+ // This parser is more liberal, and permits IDs to have
+ // any width. (This could make the representation in the stream
+ // different from what's in the spec, but it doesn't matter here,
+ // since we always normalize "Matroska integer" values.)
- pEntry->id = ReadUInt(pReader, pos, len); //payload
+ pEntry->id = ReadUInt(pReader, pos, len); // payload
- if (pEntry->id <= 0)
- return false;
+ if (pEntry->id <= 0)
+ return false;
- if (len != seekIdSize)
- return false;
+ if (len != seekIdSize)
+ return false;
- pos += seekIdSize; //consume SeekID payload
+ pos += seekIdSize; // consume SeekID payload
- const long long seekPosId = ReadUInt(pReader, pos, len);
+ const long long seekPosId = ReadUInt(pReader, pos, len);
- if (seekPosId != 0x13AC) //SeekPos ID
- return false;
+ if (seekPosId != 0x13AC) // SeekPos ID
+ return false;
- if ((pos + len) > stop)
- return false;
+ if ((pos + len) > stop)
+ return false;
- pos += len; //consume id
+ pos += len; // consume id
- const long long seekPosSize = ReadUInt(pReader, pos, len);
+ const long long seekPosSize = ReadUInt(pReader, pos, len);
- if (seekPosSize <= 0)
- return false;
+ if (seekPosSize <= 0)
+ return false;
- if ((pos + len) > stop)
- return false;
+ if ((pos + len) > stop)
+ return false;
- pos += len; //consume size
+ pos += len; // consume size
- if ((pos + seekPosSize) > stop)
- return false;
+ if ((pos + seekPosSize) > stop)
+ return false;
- pEntry->pos = UnserializeUInt(pReader, pos, seekPosSize);
+ pEntry->pos = UnserializeUInt(pReader, pos, seekPosSize);
- if (pEntry->pos < 0)
- return false;
+ if (pEntry->pos < 0)
+ return false;
- pos += seekPosSize; //consume payload
+ pos += seekPosSize; // consume payload
- if (pos != stop)
- return false;
+ if (pos != stop)
+ return false;
- return true;
+ return true;
}
#endif
+Cues::Cues(Segment* pSegment, long long start_, long long size_,
+ long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_start(start_),
+ m_size(size_),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_cue_points(NULL),
+ m_count(0),
+ m_preload_count(0),
+ m_pos(start_) {}
-Cues::Cues(
- Segment* pSegment,
- long long start_,
- long long size_,
- long long element_start,
- long long element_size) :
- m_pSegment(pSegment),
- m_start(start_),
- m_size(size_),
- m_element_start(element_start),
- m_element_size(element_size),
- m_cue_points(NULL),
- m_count(0),
- m_preload_count(0),
- m_pos(start_)
-{
+Cues::~Cues() {
+ const long n = m_count + m_preload_count;
+
+ CuePoint** p = m_cue_points;
+ CuePoint** const q = p + n;
+
+ while (p != q) {
+ CuePoint* const pCP = *p++;
+ assert(pCP);
+
+ delete pCP;
+ }
+
+ delete[] m_cue_points;
}
+long Cues::GetCount() const {
+ if (m_cue_points == NULL)
+ return -1;
-Cues::~Cues()
-{
- const long n = m_count + m_preload_count;
+ return m_count; // TODO: really ignore preload count?
+}
- CuePoint** p = m_cue_points;
- CuePoint** const q = p + n;
+bool Cues::DoneParsing() const {
+ const long long stop = m_start + m_size;
+ return (m_pos >= stop);
+}
- while (p != q)
- {
- CuePoint* const pCP = *p++;
- assert(pCP);
+void Cues::Init() const {
+ if (m_cue_points)
+ return;
- delete pCP;
- }
+ assert(m_count == 0);
+ assert(m_preload_count == 0);
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ const long long stop = m_start + m_size;
+ long long pos = m_start;
+
+ long cue_points_size = 0;
+
+ while (pos < stop) {
+ const long long idpos = pos;
+
+ long len;
+
+ const long long id = ReadUInt(pReader, pos, len);
+ assert(id >= 0); // TODO
+ assert((pos + len) <= stop);
+
+ pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos, len);
+ assert(size >= 0);
+ assert((pos + len) <= stop);
+
+ pos += len; // consume Size field
+ assert((pos + size) <= stop);
+
+ if (id == 0x3B) // CuePoint ID
+ PreloadCuePoint(cue_points_size, idpos);
+
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
+}
+
+void Cues::PreloadCuePoint(long& cue_points_size, long long pos) const {
+ assert(m_count == 0);
+
+ if (m_preload_count >= cue_points_size) {
+ const long n = (cue_points_size <= 0) ? 2048 : 2 * cue_points_size;
+
+ CuePoint** const qq = new CuePoint* [n];
+ CuePoint** q = qq; // beginning of target
+
+ CuePoint** p = m_cue_points; // beginning of source
+ CuePoint** const pp = p + m_preload_count; // end of source
+
+ while (p != pp)
+ *q++ = *p++;
delete[] m_cue_points;
+
+ m_cue_points = qq;
+ cue_points_size = n;
+ }
+
+ CuePoint* const pCP = new CuePoint(m_preload_count, pos);
+ m_cue_points[m_preload_count++] = pCP;
}
+bool Cues::LoadCuePoint() const {
+ // odbgstream os;
+ // os << "Cues::LoadCuePoint" << endl;
-long Cues::GetCount() const
-{
- if (m_cue_points == NULL)
- return -1;
+ const long long stop = m_start + m_size;
- return m_count; //TODO: really ignore preload count?
-}
+ if (m_pos >= stop)
+ return false; // nothing else to do
+ Init();
-bool Cues::DoneParsing() const
-{
- const long long stop = m_start + m_size;
- return (m_pos >= stop);
-}
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+ while (m_pos < stop) {
+ const long long idpos = m_pos;
-void Cues::Init() const
-{
- if (m_cue_points)
- return;
+ long len;
- assert(m_count == 0);
- assert(m_preload_count == 0);
+ const long long id = ReadUInt(pReader, m_pos, len);
+ assert(id >= 0); // TODO
+ assert((m_pos + len) <= stop);
- IMkvReader* const pReader = m_pSegment->m_pReader;
+ m_pos += len; // consume ID
- const long long stop = m_start + m_size;
- long long pos = m_start;
+ const long long size = ReadUInt(pReader, m_pos, len);
+ assert(size >= 0);
+ assert((m_pos + len) <= stop);
- long cue_points_size = 0;
+ m_pos += len; // consume Size field
+ assert((m_pos + size) <= stop);
- while (pos < stop)
- {
- const long long idpos = pos;
+ if (id != 0x3B) { // CuePoint ID
+ m_pos += size; // consume payload
+ assert(m_pos <= stop);
- long len;
-
- const long long id = ReadUInt(pReader, pos, len);
- assert(id >= 0); //TODO
- assert((pos + len) <= stop);
-
- pos += len; //consume ID
-
- const long long size = ReadUInt(pReader, pos, len);
- assert(size >= 0);
- assert((pos + len) <= stop);
-
- pos += len; //consume Size field
- assert((pos + size) <= stop);
-
- if (id == 0x3B) //CuePoint ID
- PreloadCuePoint(cue_points_size, idpos);
-
- pos += size; //consume payload
- assert(pos <= stop);
- }
-}
-
-
-void Cues::PreloadCuePoint(
- long& cue_points_size,
- long long pos) const
-{
- assert(m_count == 0);
-
- if (m_preload_count >= cue_points_size)
- {
- const long n = (cue_points_size <= 0) ? 2048 : 2*cue_points_size;
-
- CuePoint** const qq = new CuePoint*[n];
- CuePoint** q = qq; //beginning of target
-
- CuePoint** p = m_cue_points; //beginning of source
- CuePoint** const pp = p + m_preload_count; //end of source
-
- while (p != pp)
- *q++ = *p++;
-
- delete[] m_cue_points;
-
- m_cue_points = qq;
- cue_points_size = n;
+ continue;
}
- CuePoint* const pCP = new CuePoint(m_preload_count, pos);
- m_cue_points[m_preload_count++] = pCP;
+ assert(m_preload_count > 0);
+
+ CuePoint* const pCP = m_cue_points[m_count];
+ assert(pCP);
+ assert((pCP->GetTimeCode() >= 0) || (-pCP->GetTimeCode() == idpos));
+ if (pCP->GetTimeCode() < 0 && (-pCP->GetTimeCode() != idpos))
+ return false;
+
+ pCP->Load(pReader);
+ ++m_count;
+ --m_preload_count;
+
+ m_pos += size; // consume payload
+ assert(m_pos <= stop);
+
+ return true; // yes, we loaded a cue point
+ }
+
+ // return (m_pos < stop);
+ return false; // no, we did not load a cue point
}
-
-bool Cues::LoadCuePoint() const
-{
- //odbgstream os;
- //os << "Cues::LoadCuePoint" << endl;
-
- const long long stop = m_start + m_size;
-
- if (m_pos >= stop)
- return false; //nothing else to do
-
- Init();
-
- IMkvReader* const pReader = m_pSegment->m_pReader;
-
- while (m_pos < stop)
- {
- const long long idpos = m_pos;
-
- long len;
-
- const long long id = ReadUInt(pReader, m_pos, len);
- assert(id >= 0); //TODO
- assert((m_pos + len) <= stop);
-
- m_pos += len; //consume ID
-
- const long long size = ReadUInt(pReader, m_pos, len);
- assert(size >= 0);
- assert((m_pos + len) <= stop);
-
- m_pos += len; //consume Size field
- assert((m_pos + size) <= stop);
-
- if (id != 0x3B) //CuePoint ID
- {
- m_pos += size; //consume payload
- assert(m_pos <= stop);
-
- continue;
- }
-
- assert(m_preload_count > 0);
-
- CuePoint* const pCP = m_cue_points[m_count];
- assert(pCP);
- assert((pCP->GetTimeCode() >= 0) || (-pCP->GetTimeCode() == idpos));
- if (pCP->GetTimeCode() < 0 && (-pCP->GetTimeCode() != idpos))
- return false;
-
- pCP->Load(pReader);
- ++m_count;
- --m_preload_count;
-
- m_pos += size; //consume payload
- assert(m_pos <= stop);
-
- return true; //yes, we loaded a cue point
- }
-
- //return (m_pos < stop);
- return false; //no, we did not load a cue point
-}
-
-
-bool Cues::Find(
- long long time_ns,
- const Track* pTrack,
- const CuePoint*& pCP,
- const CuePoint::TrackPosition*& pTP) const
-{
- assert(time_ns >= 0);
- assert(pTrack);
+bool Cues::Find(long long time_ns, const Track* pTrack, const CuePoint*& pCP,
+ const CuePoint::TrackPosition*& pTP) const {
+ assert(time_ns >= 0);
+ assert(pTrack);
#if 0
LoadCuePoint(); //establish invariant
@@ -2614,71 +2340,68 @@
assert(pCP);
assert(pCP->GetTime(m_pSegment) <= time_ns);
#else
- if (m_cue_points == NULL)
- return false;
+ if (m_cue_points == NULL)
+ return false;
- if (m_count == 0)
- return false;
+ if (m_count == 0)
+ return false;
- CuePoint** const ii = m_cue_points;
- CuePoint** i = ii;
+ CuePoint** const ii = m_cue_points;
+ CuePoint** i = ii;
- CuePoint** const jj = ii + m_count;
- CuePoint** j = jj;
+ CuePoint** const jj = ii + m_count;
+ CuePoint** j = jj;
- pCP = *i;
- assert(pCP);
+ pCP = *i;
+ assert(pCP);
- if (time_ns <= pCP->GetTime(m_pSegment))
- {
- pTP = pCP->Find(pTrack);
- return (pTP != NULL);
- }
-
- while (i < j)
- {
- //INVARIANT:
- //[ii, i) <= time_ns
- //[i, j) ?
- //[j, jj) > time_ns
-
- CuePoint** const k = i + (j - i) / 2;
- assert(k < jj);
-
- CuePoint* const pCP = *k;
- assert(pCP);
-
- const long long t = pCP->GetTime(m_pSegment);
-
- if (t <= time_ns)
- i = k + 1;
- else
- j = k;
-
- assert(i <= j);
- }
-
- assert(i == j);
- assert(i <= jj);
- assert(i > ii);
-
- pCP = *--i;
- assert(pCP);
- assert(pCP->GetTime(m_pSegment) <= time_ns);
-#endif
-
- //TODO: here and elsewhere, it's probably not correct to search
- //for the cue point with this time, and then search for a matching
- //track. In principle, the matching track could be on some earlier
- //cue point, and with our current algorithm, we'd miss it. To make
- //this bullet-proof, we'd need to create a secondary structure,
- //with a list of cue points that apply to a track, and then search
- //that track-based structure for a matching cue point.
-
+ if (time_ns <= pCP->GetTime(m_pSegment)) {
pTP = pCP->Find(pTrack);
return (pTP != NULL);
-}
+ }
+ while (i < j) {
+ // INVARIANT:
+ //[ii, i) <= time_ns
+ //[i, j) ?
+ //[j, jj) > time_ns
+
+ CuePoint** const k = i + (j - i) / 2;
+ assert(k < jj);
+
+ CuePoint* const pCP = *k;
+ assert(pCP);
+
+ const long long t = pCP->GetTime(m_pSegment);
+
+ if (t <= time_ns)
+ i = k + 1;
+ else
+ j = k;
+
+ assert(i <= j);
+ }
+
+ assert(i == j);
+ assert(i <= jj);
+ assert(i > ii);
+
+ pCP = *--i;
+ assert(pCP);
+ assert(pCP->GetTime(m_pSegment) <= time_ns);
+#endif
+
+ // TODO: here and elsewhere, it's probably not correct to search
+ // for the cue point with this time, and then search for a matching
+ // track. In principle, the matching track could be on some earlier
+ // cue point, and with our current algorithm, we'd miss it. To make
+ // this bullet-proof, we'd need to create a secondary structure,
+ // with a list of cue points that apply to a track, and then search
+ // that track-based structure for a matching cue point.
+
+ pTP = pCP->Find(pTrack);
+ return (pTP != NULL);
+}
#if 0
bool Cues::FindNext(
@@ -2739,14 +2462,12 @@
}
#endif
+const CuePoint* Cues::GetFirst() const {
+ if (m_cue_points == NULL)
+ return NULL;
-const CuePoint* Cues::GetFirst() const
-{
- if (m_cue_points == NULL)
- return NULL;
-
- if (m_count == 0)
- return NULL;
+ if (m_count == 0)
+ return NULL;
#if 0
LoadCuePoint(); //init cues
@@ -2757,24 +2478,22 @@
return NULL;
#endif
- CuePoint* const* const pp = m_cue_points;
- assert(pp);
+ CuePoint* const* const pp = m_cue_points;
+ assert(pp);
- CuePoint* const pCP = pp[0];
- assert(pCP);
- assert(pCP->GetTimeCode() >= 0);
+ CuePoint* const pCP = pp[0];
+ assert(pCP);
+ assert(pCP->GetTimeCode() >= 0);
- return pCP;
+ return pCP;
}
+const CuePoint* Cues::GetLast() const {
+ if (m_cue_points == NULL)
+ return NULL;
-const CuePoint* Cues::GetLast() const
-{
- if (m_cue_points == NULL)
- return NULL;
-
- if (m_count <= 0)
- return NULL;
+ if (m_count <= 0)
+ return NULL;
#if 0
LoadCuePoint(); //init cues
@@ -2795,28 +2514,26 @@
pCP->Load(m_pSegment->m_pReader);
assert(pCP->GetTimeCode() >= 0);
#else
- const long index = m_count - 1;
+ const long index = m_count - 1;
- CuePoint* const* const pp = m_cue_points;
- assert(pp);
+ CuePoint* const* const pp = m_cue_points;
+ assert(pp);
- CuePoint* const pCP = pp[index];
- assert(pCP);
- assert(pCP->GetTimeCode() >= 0);
+ CuePoint* const pCP = pp[index];
+ assert(pCP);
+ assert(pCP->GetTimeCode() >= 0);
#endif
- return pCP;
+ return pCP;
}
+const CuePoint* Cues::GetNext(const CuePoint* pCurr) const {
+ if (pCurr == NULL)
+ return NULL;
-const CuePoint* Cues::GetNext(const CuePoint* pCurr) const
-{
- if (pCurr == NULL)
- return NULL;
-
- assert(pCurr->GetTimeCode() >= 0);
- assert(m_cue_points);
- assert(m_count >= 1);
+ assert(pCurr->GetTimeCode() >= 0);
+ assert(m_cue_points);
+ assert(m_count >= 1);
#if 0
const size_t count = m_count + m_preload_count;
@@ -2838,386 +2555,347 @@
pNext->Load(m_pSegment->m_pReader);
#else
- long index = pCurr->m_index;
- assert(index < m_count);
+ long index = pCurr->m_index;
+ assert(index < m_count);
- CuePoint* const* const pp = m_cue_points;
- assert(pp);
- assert(pp[index] == pCurr);
+ CuePoint* const* const pp = m_cue_points;
+ assert(pp);
+ assert(pp[index] == pCurr);
- ++index;
+ ++index;
- if (index >= m_count)
- return NULL;
+ if (index >= m_count)
+ return NULL;
- CuePoint* const pNext = pp[index];
- assert(pNext);
- assert(pNext->GetTimeCode() >= 0);
+ CuePoint* const pNext = pp[index];
+ assert(pNext);
+ assert(pNext->GetTimeCode() >= 0);
#endif
- return pNext;
+ return pNext;
}
+const BlockEntry* Cues::GetBlock(const CuePoint* pCP,
+ const CuePoint::TrackPosition* pTP) const {
+ if (pCP == NULL)
+ return NULL;
-const BlockEntry* Cues::GetBlock(
- const CuePoint* pCP,
- const CuePoint::TrackPosition* pTP) const
-{
- if (pCP == NULL)
- return NULL;
+ if (pTP == NULL)
+ return NULL;
- if (pTP == NULL)
- return NULL;
-
- return m_pSegment->GetBlock(*pCP, *pTP);
+ return m_pSegment->GetBlock(*pCP, *pTP);
}
+const BlockEntry* Segment::GetBlock(const CuePoint& cp,
+ const CuePoint::TrackPosition& tp) {
+ Cluster** const ii = m_clusters;
+ Cluster** i = ii;
-const BlockEntry* Segment::GetBlock(
- const CuePoint& cp,
- const CuePoint::TrackPosition& tp)
-{
- Cluster** const ii = m_clusters;
- Cluster** i = ii;
+ const long count = m_clusterCount + m_clusterPreloadCount;
- const long count = m_clusterCount + m_clusterPreloadCount;
+ Cluster** const jj = ii + count;
+ Cluster** j = jj;
- Cluster** const jj = ii + count;
- Cluster** j = jj;
+ while (i < j) {
+ // INVARIANT:
+ //[ii, i) < pTP->m_pos
+ //[i, j) ?
+ //[j, jj) > pTP->m_pos
- while (i < j)
- {
- //INVARIANT:
- //[ii, i) < pTP->m_pos
- //[i, j) ?
- //[j, jj) > pTP->m_pos
+ Cluster** const k = i + (j - i) / 2;
+ assert(k < jj);
- Cluster** const k = i + (j - i) / 2;
- assert(k < jj);
-
- Cluster* const pCluster = *k;
- assert(pCluster);
-
- //const long long pos_ = pCluster->m_pos;
- //assert(pos_);
- //const long long pos = pos_ * ((pos_ < 0) ? -1 : 1);
-
- const long long pos = pCluster->GetPosition();
- assert(pos >= 0);
-
- if (pos < tp.m_pos)
- i = k + 1;
- else if (pos > tp.m_pos)
- j = k;
- else
- return pCluster->GetEntry(cp, tp);
- }
-
- assert(i == j);
- //assert(Cluster::HasBlockEntries(this, tp.m_pos));
-
- Cluster* const pCluster = Cluster::Create(this, -1, tp.m_pos); //, -1);
+ Cluster* const pCluster = *k;
assert(pCluster);
- const ptrdiff_t idx = i - m_clusters;
+ // const long long pos_ = pCluster->m_pos;
+ // assert(pos_);
+ // const long long pos = pos_ * ((pos_ < 0) ? -1 : 1);
- PreloadCluster(pCluster, idx);
- assert(m_clusters);
- assert(m_clusterPreloadCount > 0);
- assert(m_clusters[idx] == pCluster);
+ const long long pos = pCluster->GetPosition();
+ assert(pos >= 0);
- return pCluster->GetEntry(cp, tp);
+ if (pos < tp.m_pos)
+ i = k + 1;
+ else if (pos > tp.m_pos)
+ j = k;
+ else
+ return pCluster->GetEntry(cp, tp);
+ }
+
+ assert(i == j);
+ // assert(Cluster::HasBlockEntries(this, tp.m_pos));
+
+ Cluster* const pCluster = Cluster::Create(this, -1, tp.m_pos); //, -1);
+ assert(pCluster);
+
+ const ptrdiff_t idx = i - m_clusters;
+
+ PreloadCluster(pCluster, idx);
+ assert(m_clusters);
+ assert(m_clusterPreloadCount > 0);
+ assert(m_clusters[idx] == pCluster);
+
+ return pCluster->GetEntry(cp, tp);
}
+const Cluster* Segment::FindOrPreloadCluster(long long requested_pos) {
+ if (requested_pos < 0)
+ return 0;
-const Cluster* Segment::FindOrPreloadCluster(long long requested_pos)
-{
- if (requested_pos < 0)
- return 0;
+ Cluster** const ii = m_clusters;
+ Cluster** i = ii;
- Cluster** const ii = m_clusters;
- Cluster** i = ii;
+ const long count = m_clusterCount + m_clusterPreloadCount;
- const long count = m_clusterCount + m_clusterPreloadCount;
+ Cluster** const jj = ii + count;
+ Cluster** j = jj;
- Cluster** const jj = ii + count;
- Cluster** j = jj;
+ while (i < j) {
+ // INVARIANT:
+ //[ii, i) < pTP->m_pos
+ //[i, j) ?
+ //[j, jj) > pTP->m_pos
- while (i < j)
- {
- //INVARIANT:
- //[ii, i) < pTP->m_pos
- //[i, j) ?
- //[j, jj) > pTP->m_pos
+ Cluster** const k = i + (j - i) / 2;
+ assert(k < jj);
- Cluster** const k = i + (j - i) / 2;
- assert(k < jj);
-
- Cluster* const pCluster = *k;
- assert(pCluster);
-
- //const long long pos_ = pCluster->m_pos;
- //assert(pos_);
- //const long long pos = pos_ * ((pos_ < 0) ? -1 : 1);
-
- const long long pos = pCluster->GetPosition();
- assert(pos >= 0);
-
- if (pos < requested_pos)
- i = k + 1;
- else if (pos > requested_pos)
- j = k;
- else
- return pCluster;
- }
-
- assert(i == j);
- //assert(Cluster::HasBlockEntries(this, tp.m_pos));
-
- Cluster* const pCluster = Cluster::Create(
- this,
- -1,
- requested_pos);
- //-1);
+ Cluster* const pCluster = *k;
assert(pCluster);
- const ptrdiff_t idx = i - m_clusters;
+ // const long long pos_ = pCluster->m_pos;
+ // assert(pos_);
+ // const long long pos = pos_ * ((pos_ < 0) ? -1 : 1);
- PreloadCluster(pCluster, idx);
- assert(m_clusters);
- assert(m_clusterPreloadCount > 0);
- assert(m_clusters[idx] == pCluster);
+ const long long pos = pCluster->GetPosition();
+ assert(pos >= 0);
- return pCluster;
+ if (pos < requested_pos)
+ i = k + 1;
+ else if (pos > requested_pos)
+ j = k;
+ else
+ return pCluster;
+ }
+
+ assert(i == j);
+ // assert(Cluster::HasBlockEntries(this, tp.m_pos));
+
+ Cluster* const pCluster = Cluster::Create(this, -1, requested_pos);
+ //-1);
+ assert(pCluster);
+
+ const ptrdiff_t idx = i - m_clusters;
+
+ PreloadCluster(pCluster, idx);
+ assert(m_clusters);
+ assert(m_clusterPreloadCount > 0);
+ assert(m_clusters[idx] == pCluster);
+
+ return pCluster;
}
-
-CuePoint::CuePoint(long idx, long long pos) :
- m_element_start(0),
- m_element_size(0),
- m_index(idx),
- m_timecode(-1 * pos),
- m_track_positions(NULL),
- m_track_positions_count(0)
-{
- assert(pos > 0);
+CuePoint::CuePoint(long idx, long long pos)
+ : m_element_start(0),
+ m_element_size(0),
+ m_index(idx),
+ m_timecode(-1 * pos),
+ m_track_positions(NULL),
+ m_track_positions_count(0) {
+ assert(pos > 0);
}
+CuePoint::~CuePoint() { delete[] m_track_positions; }
-CuePoint::~CuePoint()
-{
- delete[] m_track_positions;
-}
+void CuePoint::Load(IMkvReader* pReader) {
+ // odbgstream os;
+ // os << "CuePoint::Load(begin): timecode=" << m_timecode << endl;
+ if (m_timecode >= 0) // already loaded
+ return;
-void CuePoint::Load(IMkvReader* pReader)
-{
- //odbgstream os;
- //os << "CuePoint::Load(begin): timecode=" << m_timecode << endl;
+ assert(m_track_positions == NULL);
+ assert(m_track_positions_count == 0);
- if (m_timecode >= 0) //already loaded
- return;
+ long long pos_ = -m_timecode;
+ const long long element_start = pos_;
- assert(m_track_positions == NULL);
- assert(m_track_positions_count == 0);
+ long long stop;
- long long pos_ = -m_timecode;
- const long long element_start = pos_;
+ {
+ long len;
- long long stop;
+ const long long id = ReadUInt(pReader, pos_, len);
+ assert(id == 0x3B); // CuePoint ID
+ if (id != 0x3B)
+ return;
- {
- long len;
+ pos_ += len; // consume ID
- const long long id = ReadUInt(pReader, pos_, len);
- assert(id == 0x3B); //CuePoint ID
- if (id != 0x3B)
- return;
+ const long long size = ReadUInt(pReader, pos_, len);
+ assert(size >= 0);
- pos_ += len; //consume ID
+ pos_ += len; // consume Size field
+ // pos_ now points to start of payload
- const long long size = ReadUInt(pReader, pos_, len);
- assert(size >= 0);
+ stop = pos_ + size;
+ }
- pos_ += len; //consume Size field
- //pos_ now points to start of payload
+ const long long element_size = stop - element_start;
- stop = pos_ + size;
+ long long pos = pos_;
+
+ // First count number of track positions
+
+ while (pos < stop) {
+ long len;
+
+ const long long id = ReadUInt(pReader, pos, len);
+ assert(id >= 0); // TODO
+ assert((pos + len) <= stop);
+
+ pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos, len);
+ assert(size >= 0);
+ assert((pos + len) <= stop);
+
+ pos += len; // consume Size field
+ assert((pos + size) <= stop);
+
+ if (id == 0x33) // CueTime ID
+ m_timecode = UnserializeUInt(pReader, pos, size);
+
+ else if (id == 0x37) // CueTrackPosition(s) ID
+ ++m_track_positions_count;
+
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
+
+ assert(m_timecode >= 0);
+ assert(m_track_positions_count > 0);
+
+ // os << "CuePoint::Load(cont'd): idpos=" << idpos
+ // << " timecode=" << m_timecode
+ // << endl;
+
+ m_track_positions = new TrackPosition[m_track_positions_count];
+
+ // Now parse track positions
+
+ TrackPosition* p = m_track_positions;
+ pos = pos_;
+
+ while (pos < stop) {
+ long len;
+
+ const long long id = ReadUInt(pReader, pos, len);
+ assert(id >= 0); // TODO
+ assert((pos + len) <= stop);
+
+ pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos, len);
+ assert(size >= 0);
+ assert((pos + len) <= stop);
+
+ pos += len; // consume Size field
+ assert((pos + size) <= stop);
+
+ if (id == 0x37) { // CueTrackPosition(s) ID
+ TrackPosition& tp = *p++;
+ tp.Parse(pReader, pos, size);
}
- const long long element_size = stop - element_start;
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
- long long pos = pos_;
+ assert(size_t(p - m_track_positions) == m_track_positions_count);
- //First count number of track positions
-
- while (pos < stop)
- {
- long len;
-
- const long long id = ReadUInt(pReader, pos, len);
- assert(id >= 0); //TODO
- assert((pos + len) <= stop);
-
- pos += len; //consume ID
-
- const long long size = ReadUInt(pReader, pos, len);
- assert(size >= 0);
- assert((pos + len) <= stop);
-
- pos += len; //consume Size field
- assert((pos + size) <= stop);
-
- if (id == 0x33) //CueTime ID
- m_timecode = UnserializeUInt(pReader, pos, size);
-
- else if (id == 0x37) //CueTrackPosition(s) ID
- ++m_track_positions_count;
-
- pos += size; //consume payload
- assert(pos <= stop);
- }
-
- assert(m_timecode >= 0);
- assert(m_track_positions_count > 0);
-
- //os << "CuePoint::Load(cont'd): idpos=" << idpos
- // << " timecode=" << m_timecode
- // << endl;
-
- m_track_positions = new TrackPosition[m_track_positions_count];
-
- //Now parse track positions
-
- TrackPosition* p = m_track_positions;
- pos = pos_;
-
- while (pos < stop)
- {
- long len;
-
- const long long id = ReadUInt(pReader, pos, len);
- assert(id >= 0); //TODO
- assert((pos + len) <= stop);
-
- pos += len; //consume ID
-
- const long long size = ReadUInt(pReader, pos, len);
- assert(size >= 0);
- assert((pos + len) <= stop);
-
- pos += len; //consume Size field
- assert((pos + size) <= stop);
-
- if (id == 0x37) //CueTrackPosition(s) ID
- {
- TrackPosition& tp = *p++;
- tp.Parse(pReader, pos, size);
- }
-
- pos += size; //consume payload
- assert(pos <= stop);
- }
-
- assert(size_t(p - m_track_positions) == m_track_positions_count);
-
- m_element_start = element_start;
- m_element_size = element_size;
+ m_element_start = element_start;
+ m_element_size = element_size;
}
+void CuePoint::TrackPosition::Parse(IMkvReader* pReader, long long start_,
+ long long size_) {
+ const long long stop = start_ + size_;
+ long long pos = start_;
+ m_track = -1;
+ m_pos = -1;
+ m_block = 1; // default
-void CuePoint::TrackPosition::Parse(
- IMkvReader* pReader,
- long long start_,
- long long size_)
-{
- const long long stop = start_ + size_;
- long long pos = start_;
+ while (pos < stop) {
+ long len;
- m_track = -1;
- m_pos = -1;
- m_block = 1; //default
+ const long long id = ReadUInt(pReader, pos, len);
+ assert(id >= 0); // TODO
+ assert((pos + len) <= stop);
- while (pos < stop)
- {
- long len;
+ pos += len; // consume ID
- const long long id = ReadUInt(pReader, pos, len);
- assert(id >= 0); //TODO
- assert((pos + len) <= stop);
+ const long long size = ReadUInt(pReader, pos, len);
+ assert(size >= 0);
+ assert((pos + len) <= stop);
- pos += len; //consume ID
+ pos += len; // consume Size field
+ assert((pos + size) <= stop);
- const long long size = ReadUInt(pReader, pos, len);
- assert(size >= 0);
- assert((pos + len) <= stop);
+ if (id == 0x77) // CueTrack ID
+ m_track = UnserializeUInt(pReader, pos, size);
- pos += len; //consume Size field
- assert((pos + size) <= stop);
+ else if (id == 0x71) // CueClusterPos ID
+ m_pos = UnserializeUInt(pReader, pos, size);
- if (id == 0x77) //CueTrack ID
- m_track = UnserializeUInt(pReader, pos, size);
+ else if (id == 0x1378) // CueBlockNumber
+ m_block = UnserializeUInt(pReader, pos, size);
- else if (id == 0x71) //CueClusterPos ID
- m_pos = UnserializeUInt(pReader, pos, size);
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
- else if (id == 0x1378) //CueBlockNumber
- m_block = UnserializeUInt(pReader, pos, size);
-
- pos += size; //consume payload
- assert(pos <= stop);
- }
-
- assert(m_pos >= 0);
- assert(m_track > 0);
- //assert(m_block > 0);
+ assert(m_pos >= 0);
+ assert(m_track > 0);
+ // assert(m_block > 0);
}
+const CuePoint::TrackPosition* CuePoint::Find(const Track* pTrack) const {
+ assert(pTrack);
-const CuePoint::TrackPosition* CuePoint::Find(const Track* pTrack) const
-{
- assert(pTrack);
+ const long long n = pTrack->GetNumber();
- const long long n = pTrack->GetNumber();
+ const TrackPosition* i = m_track_positions;
+ const TrackPosition* const j = i + m_track_positions_count;
- const TrackPosition* i = m_track_positions;
- const TrackPosition* const j = i + m_track_positions_count;
+ while (i != j) {
+ const TrackPosition& p = *i++;
- while (i != j)
- {
- const TrackPosition& p = *i++;
+ if (p.m_track == n)
+ return &p;
+ }
- if (p.m_track == n)
- return &p;
- }
-
- return NULL; //no matching track number found
+ return NULL; // no matching track number found
}
+long long CuePoint::GetTimeCode() const { return m_timecode; }
-long long CuePoint::GetTimeCode() const
-{
- return m_timecode;
+long long CuePoint::GetTime(const Segment* pSegment) const {
+ assert(pSegment);
+ assert(m_timecode >= 0);
+
+ const SegmentInfo* const pInfo = pSegment->GetInfo();
+ assert(pInfo);
+
+ const long long scale = pInfo->GetTimeCodeScale();
+ assert(scale >= 1);
+
+ const long long time = scale * m_timecode;
+
+ return time;
}
-long long CuePoint::GetTime(const Segment* pSegment) const
-{
- assert(pSegment);
- assert(m_timecode >= 0);
-
- const SegmentInfo* const pInfo = pSegment->GetInfo();
- assert(pInfo);
-
- const long long scale = pInfo->GetTimeCodeScale();
- assert(scale >= 1);
-
- const long long time = scale * m_timecode;
-
- return time;
-}
-
-
#if 0
long long Segment::Unparsed() const
{
@@ -3232,808 +2910,745 @@
return result;
}
#else
-bool Segment::DoneParsing() const
-{
- if (m_size < 0)
- {
- long long total, avail;
+bool Segment::DoneParsing() const {
+ if (m_size < 0) {
+ long long total, avail;
- const int status = m_pReader->Length(&total, &avail);
+ const int status = m_pReader->Length(&total, &avail);
- if (status < 0) //error
- return true; //must assume done
+ if (status < 0) // error
+ return true; // must assume done
- if (total < 0)
- return false; //assume live stream
+ if (total < 0)
+ return false; // assume live stream
- return (m_pos >= total);
- }
+ return (m_pos >= total);
+ }
- const long long stop = m_start + m_size;
+ const long long stop = m_start + m_size;
- return (m_pos >= stop);
+ return (m_pos >= stop);
}
#endif
+const Cluster* Segment::GetFirst() const {
+ if ((m_clusters == NULL) || (m_clusterCount <= 0))
+ return &m_eos;
-const Cluster* Segment::GetFirst() const
-{
- if ((m_clusters == NULL) || (m_clusterCount <= 0))
- return &m_eos;
+ Cluster* const pCluster = m_clusters[0];
+ assert(pCluster);
- Cluster* const pCluster = m_clusters[0];
- assert(pCluster);
-
- return pCluster;
+ return pCluster;
}
+const Cluster* Segment::GetLast() const {
+ if ((m_clusters == NULL) || (m_clusterCount <= 0))
+ return &m_eos;
-const Cluster* Segment::GetLast() const
-{
- if ((m_clusters == NULL) || (m_clusterCount <= 0))
- return &m_eos;
+ const long idx = m_clusterCount - 1;
- const long idx = m_clusterCount - 1;
+ Cluster* const pCluster = m_clusters[idx];
+ assert(pCluster);
- Cluster* const pCluster = m_clusters[idx];
- assert(pCluster);
-
- return pCluster;
+ return pCluster;
}
+unsigned long Segment::GetCount() const { return m_clusterCount; }
-unsigned long Segment::GetCount() const
-{
- return m_clusterCount;
-}
+const Cluster* Segment::GetNext(const Cluster* pCurr) {
+ assert(pCurr);
+ assert(pCurr != &m_eos);
+ assert(m_clusters);
+ long idx = pCurr->m_index;
-const Cluster* Segment::GetNext(const Cluster* pCurr)
-{
- assert(pCurr);
- assert(pCurr != &m_eos);
- assert(m_clusters);
+ if (idx >= 0) {
+ assert(m_clusterCount > 0);
+ assert(idx < m_clusterCount);
+ assert(pCurr == m_clusters[idx]);
- long idx = pCurr->m_index;
+ ++idx;
- if (idx >= 0)
- {
- assert(m_clusterCount > 0);
- assert(idx < m_clusterCount);
- assert(pCurr == m_clusters[idx]);
+ if (idx >= m_clusterCount)
+ return &m_eos; // caller will LoadCluster as desired
- ++idx;
-
- if (idx >= m_clusterCount)
- return &m_eos; //caller will LoadCluster as desired
-
- Cluster* const pNext = m_clusters[idx];
- assert(pNext);
- assert(pNext->m_index >= 0);
- assert(pNext->m_index == idx);
-
- return pNext;
- }
-
- assert(m_clusterPreloadCount > 0);
-
- long long pos = pCurr->m_element_start;
-
- assert(m_size >= 0); //TODO
- const long long stop = m_start + m_size; //end of segment
-
- {
- long len;
-
- long long result = GetUIntLength(m_pReader, pos, len);
- assert(result == 0);
- assert((pos + len) <= stop); //TODO
- if (result != 0)
- return NULL;
-
- const long long id = ReadUInt(m_pReader, pos, len);
- assert(id == 0x0F43B675); //Cluster ID
- if (id != 0x0F43B675)
- return NULL;
-
- pos += len; //consume ID
-
- //Read Size
- result = GetUIntLength(m_pReader, pos, len);
- assert(result == 0); //TODO
- assert((pos + len) <= stop); //TODO
-
- const long long size = ReadUInt(m_pReader, pos, len);
- assert(size > 0); //TODO
- //assert((pCurr->m_size <= 0) || (pCurr->m_size == size));
-
- pos += len; //consume length of size of element
- assert((pos + size) <= stop); //TODO
-
- //Pos now points to start of payload
-
- pos += size; //consume payload
- }
-
- long long off_next = 0;
-
- while (pos < stop)
- {
- long len;
-
- long long result = GetUIntLength(m_pReader, pos, len);
- assert(result == 0);
- assert((pos + len) <= stop); //TODO
- if (result != 0)
- return NULL;
-
- const long long idpos = pos; //pos of next (potential) cluster
-
- const long long id = ReadUInt(m_pReader, idpos, len);
- assert(id > 0); //TODO
-
- pos += len; //consume ID
-
- //Read Size
- result = GetUIntLength(m_pReader, pos, len);
- assert(result == 0); //TODO
- assert((pos + len) <= stop); //TODO
-
- const long long size = ReadUInt(m_pReader, pos, len);
- assert(size >= 0); //TODO
-
- pos += len; //consume length of size of element
- assert((pos + size) <= stop); //TODO
-
- //Pos now points to start of payload
-
- if (size == 0) //weird
- continue;
-
- if (id == 0x0F43B675) //Cluster ID
- {
- const long long off_next_ = idpos - m_start;
-
- long long pos_;
- long len_;
-
- const long status = Cluster::HasBlockEntries(
- this,
- off_next_,
- pos_,
- len_);
-
- assert(status >= 0);
-
- if (status > 0)
- {
- off_next = off_next_;
- break;
- }
- }
-
- pos += size; //consume payload
- }
-
- if (off_next <= 0)
- return 0;
-
- Cluster** const ii = m_clusters + m_clusterCount;
- Cluster** i = ii;
-
- Cluster** const jj = ii + m_clusterPreloadCount;
- Cluster** j = jj;
-
- while (i < j)
- {
- //INVARIANT:
- //[0, i) < pos_next
- //[i, j) ?
- //[j, jj) > pos_next
-
- Cluster** const k = i + (j - i) / 2;
- assert(k < jj);
-
- Cluster* const pNext = *k;
- assert(pNext);
- assert(pNext->m_index < 0);
-
- //const long long pos_ = pNext->m_pos;
- //assert(pos_);
- //pos = pos_ * ((pos_ < 0) ? -1 : 1);
-
- pos = pNext->GetPosition();
-
- if (pos < off_next)
- i = k + 1;
- else if (pos > off_next)
- j = k;
- else
- return pNext;
- }
-
- assert(i == j);
-
- Cluster* const pNext = Cluster::Create(this,
- -1,
- off_next);
+ Cluster* const pNext = m_clusters[idx];
assert(pNext);
-
- const ptrdiff_t idx_next = i - m_clusters; //insertion position
-
- PreloadCluster(pNext, idx_next);
- assert(m_clusters);
- assert(idx_next < m_clusterSize);
- assert(m_clusters[idx_next] == pNext);
+ assert(pNext->m_index >= 0);
+ assert(pNext->m_index == idx);
return pNext;
-}
+ }
+ assert(m_clusterPreloadCount > 0);
-long Segment::ParseNext(
- const Cluster* pCurr,
- const Cluster*& pResult,
- long long& pos,
- long& len)
-{
- assert(pCurr);
- assert(!pCurr->EOS());
- assert(m_clusters);
+ long long pos = pCurr->m_element_start;
- pResult = 0;
+ assert(m_size >= 0); // TODO
+ const long long stop = m_start + m_size; // end of segment
- if (pCurr->m_index >= 0) //loaded (not merely preloaded)
- {
- assert(m_clusters[pCurr->m_index] == pCurr);
+ {
+ long len;
- const long next_idx = pCurr->m_index + 1;
+ long long result = GetUIntLength(m_pReader, pos, len);
+ assert(result == 0);
+ assert((pos + len) <= stop); // TODO
+ if (result != 0)
+ return NULL;
- if (next_idx < m_clusterCount)
- {
- pResult = m_clusters[next_idx];
- return 0; //success
- }
+ const long long id = ReadUInt(m_pReader, pos, len);
+ assert(id == 0x0F43B675); // Cluster ID
+ if (id != 0x0F43B675)
+ return NULL;
- //curr cluster is last among loaded
+ pos += len; // consume ID
- const long result = LoadCluster(pos, len);
+ // Read Size
+ result = GetUIntLength(m_pReader, pos, len);
+ assert(result == 0); // TODO
+ assert((pos + len) <= stop); // TODO
- if (result < 0) //error or underflow
- return result;
+ const long long size = ReadUInt(m_pReader, pos, len);
+ assert(size > 0); // TODO
+ // assert((pCurr->m_size <= 0) || (pCurr->m_size == size));
- if (result > 0) //no more clusters
- {
- //pResult = &m_eos;
- return 1;
- }
+ pos += len; // consume length of size of element
+ assert((pos + size) <= stop); // TODO
- pResult = GetLast();
- return 0; //success
+ // Pos now points to start of payload
+
+ pos += size; // consume payload
+ }
+
+ long long off_next = 0;
+
+ while (pos < stop) {
+ long len;
+
+ long long result = GetUIntLength(m_pReader, pos, len);
+ assert(result == 0);
+ assert((pos + len) <= stop); // TODO
+ if (result != 0)
+ return NULL;
+
+ const long long idpos = pos; // pos of next (potential) cluster
+
+ const long long id = ReadUInt(m_pReader, idpos, len);
+ assert(id > 0); // TODO
+
+ pos += len; // consume ID
+
+ // Read Size
+ result = GetUIntLength(m_pReader, pos, len);
+ assert(result == 0); // TODO
+ assert((pos + len) <= stop); // TODO
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+ assert(size >= 0); // TODO
+
+ pos += len; // consume length of size of element
+ assert((pos + size) <= stop); // TODO
+
+ // Pos now points to start of payload
+
+ if (size == 0) // weird
+ continue;
+
+ if (id == 0x0F43B675) { // Cluster ID
+ const long long off_next_ = idpos - m_start;
+
+ long long pos_;
+ long len_;
+
+ const long status = Cluster::HasBlockEntries(this, off_next_, pos_, len_);
+
+ assert(status >= 0);
+
+ if (status > 0) {
+ off_next = off_next_;
+ break;
+ }
}
- assert(m_pos > 0);
+ pos += size; // consume payload
+ }
- long long total, avail;
+ if (off_next <= 0)
+ return 0;
- long status = m_pReader->Length(&total, &avail);
+ Cluster** const ii = m_clusters + m_clusterCount;
+ Cluster** i = ii;
- if (status < 0) //error
- return status;
+ Cluster** const jj = ii + m_clusterPreloadCount;
+ Cluster** j = jj;
- assert((total < 0) || (avail <= total));
+ while (i < j) {
+ // INVARIANT:
+ //[0, i) < pos_next
+ //[i, j) ?
+ //[j, jj) > pos_next
- const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+ Cluster** const k = i + (j - i) / 2;
+ assert(k < jj);
- //interrogate curr cluster
+ Cluster* const pNext = *k;
+ assert(pNext);
+ assert(pNext->m_index < 0);
- pos = pCurr->m_element_start;
+ // const long long pos_ = pNext->m_pos;
+ // assert(pos_);
+ // pos = pos_ * ((pos_ < 0) ? -1 : 1);
- if (pCurr->m_element_size >= 0)
- pos += pCurr->m_element_size;
+ pos = pNext->GetPosition();
+
+ if (pos < off_next)
+ i = k + 1;
+ else if (pos > off_next)
+ j = k;
else
- {
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ return pNext;
+ }
- long long result = GetUIntLength(m_pReader, pos, len);
+ assert(i == j);
- if (result < 0) //error
- return static_cast<long>(result);
+ Cluster* const pNext = Cluster::Create(this, -1, off_next);
+ assert(pNext);
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
+ const ptrdiff_t idx_next = i - m_clusters; // insertion position
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
+ PreloadCluster(pNext, idx_next);
+ assert(m_clusters);
+ assert(idx_next < m_clusterSize);
+ assert(m_clusters[idx_next] == pNext);
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long id = ReadUInt(m_pReader, pos, len);
-
- if (id != 0x0F43B675) //weird: not Cluster ID
- return -1;
-
- pos += len; //consume ID
-
- //Read Size
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- result = GetUIntLength(m_pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long size = ReadUInt(m_pReader, pos, len);
-
- if (size < 0) //error
- return static_cast<long>(size);
-
- pos += len; //consume size field
-
- const long long unknown_size = (1LL << (7 * len)) - 1;
-
- if (size == unknown_size) //TODO: should never happen
- return E_FILE_FORMAT_INVALID; //TODO: resolve this
-
- //assert((pCurr->m_size <= 0) || (pCurr->m_size == size));
-
- if ((segment_stop >= 0) && ((pos + size) > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- //Pos now points to start of payload
-
- pos += size; //consume payload (that is, the current cluster)
- assert((segment_stop < 0) || (pos <= segment_stop));
-
- //By consuming the payload, we are assuming that the curr
- //cluster isn't interesting. That is, we don't bother checking
- //whether the payload of the curr cluster is less than what
- //happens to be available (obtained via IMkvReader::Length).
- //Presumably the caller has already dispensed with the current
- //cluster, and really does want the next cluster.
- }
-
- //pos now points to just beyond the last fully-loaded cluster
-
- for (;;)
- {
- const long status = DoParseNext(pResult, pos, len);
-
- if (status <= 1)
- return status;
- }
+ return pNext;
}
+long Segment::ParseNext(const Cluster* pCurr, const Cluster*& pResult,
+ long long& pos, long& len) {
+ assert(pCurr);
+ assert(!pCurr->EOS());
+ assert(m_clusters);
-long Segment::DoParseNext(
- const Cluster*& pResult,
- long long& pos,
- long& len)
-{
- long long total, avail;
+ pResult = 0;
- long status = m_pReader->Length(&total, &avail);
+ if (pCurr->m_index >= 0) { // loaded (not merely preloaded)
+ assert(m_clusters[pCurr->m_index] == pCurr);
- if (status < 0) //error
- return status;
+ const long next_idx = pCurr->m_index + 1;
- assert((total < 0) || (avail <= total));
+ if (next_idx < m_clusterCount) {
+ pResult = m_clusters[next_idx];
+ return 0; // success
+ }
- const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+ // curr cluster is last among loaded
- //Parse next cluster. This is strictly a parsing activity.
- //Creation of a new cluster object happens later, after the
- //parsing is done.
+ const long result = LoadCluster(pos, len);
- long long off_next = 0;
- long long cluster_size = -1;
+ if (result < 0) // error or underflow
+ return result;
- for (;;)
+ if (result > 0) // no more clusters
{
- if ((total >= 0) && (pos >= total))
- return 1; //EOF
+ // pResult = &m_eos;
+ return 1;
+ }
- if ((segment_stop >= 0) && (pos >= segment_stop))
- return 1; //EOF
+ pResult = GetLast();
+ return 0; // success
+ }
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ assert(m_pos > 0);
- long long result = GetUIntLength(m_pReader, pos, len);
+ long long total, avail;
- if (result < 0) //error
- return static_cast<long>(result);
+ long status = m_pReader->Length(&total, &avail);
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
+ if (status < 0) // error
+ return status;
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
+ assert((total < 0) || (avail <= total));
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
- const long long idpos = pos; //absolute
- const long long idoff = pos - m_start; //relative
+ // interrogate curr cluster
- const long long id = ReadUInt(m_pReader, idpos, len); //absolute
+ pos = pCurr->m_element_start;
- if (id < 0) //error
- return static_cast<long>(id);
+ if (pCurr->m_element_size >= 0)
+ pos += pCurr->m_element_size;
+ else {
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- if (id == 0) //weird
- return -1; //generic error
+ long long result = GetUIntLength(m_pReader, pos, len);
- pos += len; //consume ID
+ if (result < 0) // error
+ return static_cast<long>(result);
- //Read Size
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- result = GetUIntLength(m_pReader, pos, len);
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- if (result < 0) //error
- return static_cast<long>(result);
+ const long long id = ReadUInt(m_pReader, pos, len);
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
+ if (id != 0x0F43B675) // weird: not Cluster ID
+ return -1;
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
+ pos += len; // consume ID
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
+ // Read Size
- const long long size = ReadUInt(m_pReader, pos, len);
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- if (size < 0) //error
- return static_cast<long>(size);
+ result = GetUIntLength(m_pReader, pos, len);
- pos += len; //consume length of size of element
+ if (result < 0) // error
+ return static_cast<long>(result);
- //Pos now points to start of payload
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- if (size == 0) //weird
- continue;
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- const long long unknown_size = (1LL << (7 * len)) - 1;
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- if ((segment_stop >= 0) &&
- (size != unknown_size) &&
- ((pos + size) > segment_stop))
- {
- return E_FILE_FORMAT_INVALID;
- }
+ const long long size = ReadUInt(m_pReader, pos, len);
- if (id == 0x0C53BB6B) //Cues ID
- {
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID;
+ if (size < 0) // error
+ return static_cast<long>(size);
- const long long element_stop = pos + size;
+ pos += len; // consume size field
- if ((segment_stop >= 0) && (element_stop > segment_stop))
- return E_FILE_FORMAT_INVALID;
+ const long long unknown_size = (1LL << (7 * len)) - 1;
- const long long element_start = idpos;
- const long long element_size = element_stop - element_start;
+ if (size == unknown_size) // TODO: should never happen
+ return E_FILE_FORMAT_INVALID; // TODO: resolve this
- if (m_pCues == NULL)
- {
- m_pCues = new Cues(this,
- pos,
- size,
- element_start,
- element_size);
- assert(m_pCues); //TODO
- }
+ // assert((pCurr->m_size <= 0) || (pCurr->m_size == size));
- pos += size; //consume payload
- assert((segment_stop < 0) || (pos <= segment_stop));
+ if ((segment_stop >= 0) && ((pos + size) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- continue;
- }
+ // Pos now points to start of payload
- if (id != 0x0F43B675) //not a Cluster ID
- {
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID;
+ pos += size; // consume payload (that is, the current cluster)
+ assert((segment_stop < 0) || (pos <= segment_stop));
- pos += size; //consume payload
- assert((segment_stop < 0) || (pos <= segment_stop));
+ // By consuming the payload, we are assuming that the curr
+ // cluster isn't interesting. That is, we don't bother checking
+ // whether the payload of the curr cluster is less than what
+ // happens to be available (obtained via IMkvReader::Length).
+ // Presumably the caller has already dispensed with the current
+ // cluster, and really does want the next cluster.
+ }
- continue;
- }
+ // pos now points to just beyond the last fully-loaded cluster
-#if 0 //this is commented-out to support incremental cluster parsing
+ for (;;) {
+ const long status = DoParseNext(pResult, pos, len);
+
+ if (status <= 1)
+ return status;
+ }
+}
+
+long Segment::DoParseNext(const Cluster*& pResult, long long& pos, long& len) {
+ long long total, avail;
+
+ long status = m_pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+
+ // Parse next cluster. This is strictly a parsing activity.
+ // Creation of a new cluster object happens later, after the
+ // parsing is done.
+
+ long long off_next = 0;
+ long long cluster_size = -1;
+
+ for (;;) {
+ if ((total >= 0) && (pos >= total))
+ return 1; // EOF
+
+ if ((segment_stop >= 0) && (pos >= segment_stop))
+ return 1; // EOF
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long idpos = pos; // absolute
+ const long long idoff = pos - m_start; // relative
+
+ const long long id = ReadUInt(m_pReader, idpos, len); // absolute
+
+ if (id < 0) // error
+ return static_cast<long>(id);
+
+ if (id == 0) // weird
+ return -1; // generic error
+
+ pos += len; // consume ID
+
+ // Read Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ pos += len; // consume length of size of element
+
+ // Pos now points to start of payload
+
+ if (size == 0) // weird
+ continue;
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if ((segment_stop >= 0) && (size != unknown_size) &&
+ ((pos + size) > segment_stop)) {
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (id == 0x0C53BB6B) { // Cues ID
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long element_stop = pos + size;
+
+ if ((segment_stop >= 0) && (element_stop > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ const long long element_start = idpos;
+ const long long element_size = element_stop - element_start;
+
+ if (m_pCues == NULL) {
+ m_pCues = new Cues(this, pos, size, element_start, element_size);
+ assert(m_pCues); // TODO
+ }
+
+ pos += size; // consume payload
+ assert((segment_stop < 0) || (pos <= segment_stop));
+
+ continue;
+ }
+
+ if (id != 0x0F43B675) { // not a Cluster ID
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += size; // consume payload
+ assert((segment_stop < 0) || (pos <= segment_stop));
+
+ continue;
+ }
+
+#if 0 // this is commented-out to support incremental cluster parsing
len = static_cast<long>(size);
if (element_stop > avail)
return E_BUFFER_NOT_FULL;
#endif
- //We have a cluster.
+ // We have a cluster.
- off_next = idoff;
+ off_next = idoff;
- if (size != unknown_size)
- cluster_size = size;
+ if (size != unknown_size)
+ cluster_size = size;
+ break;
+ }
+
+ assert(off_next > 0); // have cluster
+
+ // We have parsed the next cluster.
+ // We have not created a cluster object yet. What we need
+ // to do now is determine whether it has already be preloaded
+ //(in which case, an object for this cluster has already been
+ // created), and if not, create a new cluster object.
+
+ Cluster** const ii = m_clusters + m_clusterCount;
+ Cluster** i = ii;
+
+ Cluster** const jj = ii + m_clusterPreloadCount;
+ Cluster** j = jj;
+
+ while (i < j) {
+ // INVARIANT:
+ //[0, i) < pos_next
+ //[i, j) ?
+ //[j, jj) > pos_next
+
+ Cluster** const k = i + (j - i) / 2;
+ assert(k < jj);
+
+ const Cluster* const pNext = *k;
+ assert(pNext);
+ assert(pNext->m_index < 0);
+
+ pos = pNext->GetPosition();
+ assert(pos >= 0);
+
+ if (pos < off_next)
+ i = k + 1;
+ else if (pos > off_next)
+ j = k;
+ else {
+ pResult = pNext;
+ return 0; // success
+ }
+ }
+
+ assert(i == j);
+
+ long long pos_;
+ long len_;
+
+ status = Cluster::HasBlockEntries(this, off_next, pos_, len_);
+
+ if (status < 0) { // error or underflow
+ pos = pos_;
+ len = len_;
+
+ return status;
+ }
+
+ if (status > 0) { // means "found at least one block entry"
+ Cluster* const pNext = Cluster::Create(this,
+ -1, // preloaded
+ off_next);
+ // element_size);
+ assert(pNext);
+
+ const ptrdiff_t idx_next = i - m_clusters; // insertion position
+
+ PreloadCluster(pNext, idx_next);
+ assert(m_clusters);
+ assert(idx_next < m_clusterSize);
+ assert(m_clusters[idx_next] == pNext);
+
+ pResult = pNext;
+ return 0; // success
+ }
+
+ // status == 0 means "no block entries found"
+
+ if (cluster_size < 0) { // unknown size
+ const long long payload_pos = pos; // absolute pos of cluster payload
+
+ for (;;) { // determine cluster size
+ if ((total >= 0) && (pos >= total))
break;
- }
- assert(off_next > 0); //have cluster
+ if ((segment_stop >= 0) && (pos >= segment_stop))
+ break; // no more clusters
- //We have parsed the next cluster.
- //We have not created a cluster object yet. What we need
- //to do now is determine whether it has already be preloaded
- //(in which case, an object for this cluster has already been
- //created), and if not, create a new cluster object.
+ // Read ID
- Cluster** const ii = m_clusters + m_clusterCount;
- Cluster** i = ii;
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- Cluster** const jj = ii + m_clusterPreloadCount;
- Cluster** j = jj;
+ long long result = GetUIntLength(m_pReader, pos, len);
- while (i < j)
- {
- //INVARIANT:
- //[0, i) < pos_next
- //[i, j) ?
- //[j, jj) > pos_next
+ if (result < 0) // error
+ return static_cast<long>(result);
- Cluster** const k = i + (j - i) / 2;
- assert(k < jj);
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- const Cluster* const pNext = *k;
- assert(pNext);
- assert(pNext->m_index < 0);
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- pos = pNext->GetPosition();
- assert(pos >= 0);
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- if (pos < off_next)
- i = k + 1;
- else if (pos > off_next)
- j = k;
- else
- {
- pResult = pNext;
- return 0; //success
- }
- }
+ const long long idpos = pos;
+ const long long id = ReadUInt(m_pReader, idpos, len);
- assert(i == j);
+ if (id < 0) // error (or underflow)
+ return static_cast<long>(id);
- long long pos_;
- long len_;
+ // This is the distinguished set of ID's we use to determine
+ // that we have exhausted the sub-element's inside the cluster
+ // whose ID we parsed earlier.
- status = Cluster::HasBlockEntries(this, off_next, pos_, len_);
+ if (id == 0x0F43B675) // Cluster ID
+ break;
- if (status < 0) //error or underflow
- {
- pos = pos_;
- len = len_;
+ if (id == 0x0C53BB6B) // Cues ID
+ break;
- return status;
- }
+ pos += len; // consume ID (of sub-element)
- if (status > 0) //means "found at least one block entry"
- {
- Cluster* const pNext = Cluster::Create(this,
- -1, //preloaded
- off_next);
- //element_size);
- assert(pNext);
+ // Read Size
- const ptrdiff_t idx_next = i - m_clusters; //insertion position
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- PreloadCluster(pNext, idx_next);
- assert(m_clusters);
- assert(idx_next < m_clusterSize);
- assert(m_clusters[idx_next] == pNext);
+ result = GetUIntLength(m_pReader, pos, len);
- pResult = pNext;
- return 0; //success
- }
+ if (result < 0) // error
+ return static_cast<long>(result);
- //status == 0 means "no block entries found"
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- if (cluster_size < 0) //unknown size
- {
- const long long payload_pos = pos; //absolute pos of cluster payload
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- for (;;) //determine cluster size
- {
- if ((total >= 0) && (pos >= total))
- break;
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- if ((segment_stop >= 0) && (pos >= segment_stop))
- break; //no more clusters
+ const long long size = ReadUInt(m_pReader, pos, len);
- //Read ID
+ if (size < 0) // error
+ return static_cast<long>(size);
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ pos += len; // consume size field of element
- long long result = GetUIntLength(m_pReader, pos, len);
+ // pos now points to start of sub-element's payload
- if (result < 0) //error
- return static_cast<long>(result);
+ if (size == 0) // weird
+ continue;
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
+ const long long unknown_size = (1LL << (7 * len)) - 1;
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID; // not allowed for sub-elements
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
+ if ((segment_stop >= 0) && ((pos + size) > segment_stop)) // weird
+ return E_FILE_FORMAT_INVALID;
- const long long idpos = pos;
- const long long id = ReadUInt(m_pReader, idpos, len);
+ pos += size; // consume payload of sub-element
+ assert((segment_stop < 0) || (pos <= segment_stop));
+ } // determine cluster size
- if (id < 0) //error (or underflow)
- return static_cast<long>(id);
+ cluster_size = pos - payload_pos;
+ assert(cluster_size >= 0); // TODO: handle cluster_size = 0
- //This is the distinguished set of ID's we use to determine
- //that we have exhausted the sub-element's inside the cluster
- //whose ID we parsed earlier.
+ pos = payload_pos; // reset and re-parse original cluster
+ }
- if (id == 0x0F43B675) //Cluster ID
- break;
+ pos += cluster_size; // consume payload
+ assert((segment_stop < 0) || (pos <= segment_stop));
- if (id == 0x0C53BB6B) //Cues ID
- break;
-
- pos += len; //consume ID (of sub-element)
-
- //Read Size
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- result = GetUIntLength(m_pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long size = ReadUInt(m_pReader, pos, len);
-
- if (size < 0) //error
- return static_cast<long>(size);
-
- pos += len; //consume size field of element
-
- //pos now points to start of sub-element's payload
-
- if (size == 0) //weird
- continue;
-
- const long long unknown_size = (1LL << (7 * len)) - 1;
-
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID; //not allowed for sub-elements
-
- if ((segment_stop >= 0) && ((pos + size) > segment_stop)) //weird
- return E_FILE_FORMAT_INVALID;
-
- pos += size; //consume payload of sub-element
- assert((segment_stop < 0) || (pos <= segment_stop));
- } //determine cluster size
-
- cluster_size = pos - payload_pos;
- assert(cluster_size >= 0); //TODO: handle cluster_size = 0
-
- pos = payload_pos; //reset and re-parse original cluster
- }
-
- pos += cluster_size; //consume payload
- assert((segment_stop < 0) || (pos <= segment_stop));
-
- return 2; //try to find a cluster that follows next
+ return 2; // try to find a cluster that follows next
}
+const Cluster* Segment::FindCluster(long long time_ns) const {
+ if ((m_clusters == NULL) || (m_clusterCount <= 0))
+ return &m_eos;
-const Cluster* Segment::FindCluster(long long time_ns) const
-{
- if ((m_clusters == NULL) || (m_clusterCount <= 0))
- return &m_eos;
+ {
+ Cluster* const pCluster = m_clusters[0];
+ assert(pCluster);
+ assert(pCluster->m_index == 0);
- {
- Cluster* const pCluster = m_clusters[0];
- assert(pCluster);
- assert(pCluster->m_index == 0);
+ if (time_ns <= pCluster->GetTime())
+ return pCluster;
+ }
- if (time_ns <= pCluster->GetTime())
- return pCluster;
- }
+ // Binary search of cluster array
- //Binary search of cluster array
+ long i = 0;
+ long j = m_clusterCount;
- long i = 0;
- long j = m_clusterCount;
+ while (i < j) {
+ // INVARIANT:
+ //[0, i) <= time_ns
+ //[i, j) ?
+ //[j, m_clusterCount) > time_ns
- while (i < j)
- {
- //INVARIANT:
- //[0, i) <= time_ns
- //[i, j) ?
- //[j, m_clusterCount) > time_ns
-
- const long k = i + (j - i) / 2;
- assert(k < m_clusterCount);
-
- Cluster* const pCluster = m_clusters[k];
- assert(pCluster);
- assert(pCluster->m_index == k);
-
- const long long t = pCluster->GetTime();
-
- if (t <= time_ns)
- i = k + 1;
- else
- j = k;
-
- assert(i <= j);
- }
-
- assert(i == j);
- assert(i > 0);
- assert(i <= m_clusterCount);
-
- const long k = i - 1;
+ const long k = i + (j - i) / 2;
+ assert(k < m_clusterCount);
Cluster* const pCluster = m_clusters[k];
assert(pCluster);
assert(pCluster->m_index == k);
- assert(pCluster->GetTime() <= time_ns);
- return pCluster;
+ const long long t = pCluster->GetTime();
+
+ if (t <= time_ns)
+ i = k + 1;
+ else
+ j = k;
+
+ assert(i <= j);
+ }
+
+ assert(i == j);
+ assert(i > 0);
+ assert(i <= m_clusterCount);
+
+ const long k = i - 1;
+
+ Cluster* const pCluster = m_clusters[k];
+ assert(pCluster);
+ assert(pCluster->m_index == k);
+ assert(pCluster->GetTime() <= time_ns);
+
+ return pCluster;
}
-
#if 0
const BlockEntry* Segment::Seek(
long long time_ns,
@@ -4059,8 +3674,7 @@
Cluster** const j = i + m_clusterCount;
- if (pTrack->GetType() == 2) //audio
- {
+ if (pTrack->GetType() == 2) { //audio
//TODO: we could decide to use cues for this, as we do for video.
//But we only use it for video because looking around for a keyframe
//can get expensive. Audio doesn't require anything special so a
@@ -4179,7 +3793,6 @@
}
#endif
-
#if 0
bool Segment::SearchCues(
long long time_ns,
@@ -4210,845 +3823,593 @@
}
#endif
+const Tracks* Segment::GetTracks() const { return m_pTracks; }
-const Tracks* Segment::GetTracks() const
-{
- return m_pTracks;
+const SegmentInfo* Segment::GetInfo() const { return m_pInfo; }
+
+const Cues* Segment::GetCues() const { return m_pCues; }
+
+const Chapters* Segment::GetChapters() const { return m_pChapters; }
+
+const SeekHead* Segment::GetSeekHead() const { return m_pSeekHead; }
+
+long long Segment::GetDuration() const {
+ assert(m_pInfo);
+ return m_pInfo->GetDuration();
}
+Chapters::Chapters(Segment* pSegment, long long payload_start,
+ long long payload_size, long long element_start,
+ long long element_size)
+ : m_pSegment(pSegment),
+ m_start(payload_start),
+ m_size(payload_size),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_editions(NULL),
+ m_editions_size(0),
+ m_editions_count(0) {}
-const SegmentInfo* Segment::GetInfo() const
-{
- return m_pInfo;
+Chapters::~Chapters() {
+ while (m_editions_count > 0) {
+ Edition& e = m_editions[--m_editions_count];
+ e.Clear();
+ }
}
+long Chapters::Parse() {
+ IMkvReader* const pReader = m_pSegment->m_pReader;
-const Cues* Segment::GetCues() const
-{
- return m_pCues;
-}
+ long long pos = m_start; // payload start
+ const long long stop = pos + m_size; // payload stop
+ while (pos < stop) {
+ long long id, size;
-const Chapters* Segment::GetChapters() const
-{
- return m_pChapters;
-}
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
+ return status;
-const SeekHead* Segment::GetSeekHead() const
-{
- return m_pSeekHead;
-}
+ if (size == 0) // weird
+ continue;
+ if (id == 0x05B9) { // EditionEntry ID
+ status = ParseEdition(pos, size);
-long long Segment::GetDuration() const
-{
- assert(m_pInfo);
- return m_pInfo->GetDuration();
-}
-
-
-Chapters::Chapters(
- Segment* pSegment,
- long long payload_start,
- long long payload_size,
- long long element_start,
- long long element_size) :
- m_pSegment(pSegment),
- m_start(payload_start),
- m_size(payload_size),
- m_element_start(element_start),
- m_element_size(element_size),
- m_editions(NULL),
- m_editions_size(0),
- m_editions_count(0)
-{
-}
-
-
-Chapters::~Chapters()
-{
- while (m_editions_count > 0)
- {
- Edition& e = m_editions[--m_editions_count];
- e.Clear();
- }
-}
-
-
-long Chapters::Parse()
-{
- IMkvReader* const pReader = m_pSegment->m_pReader;
-
- long long pos = m_start; // payload start
- const long long stop = pos + m_size; // payload stop
-
- while (pos < stop)
- {
- long long id, size;
-
- long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
-
- if (status < 0) // error
- return status;
-
- if (size == 0) // weird
- continue;
-
- if (id == 0x05B9) // EditionEntry ID
- {
- status = ParseEdition(pos, size);
-
- if (status < 0) // error
- return status;
- }
-
- pos += size;
- assert(pos <= stop);
+ if (status < 0) // error
+ return status;
}
- assert(pos == stop);
- return 0;
+ pos += size;
+ assert(pos <= stop);
+ }
+
+ assert(pos == stop);
+ return 0;
}
+int Chapters::GetEditionCount() const { return m_editions_count; }
-int Chapters::GetEditionCount() const
-{
- return m_editions_count;
+const Chapters::Edition* Chapters::GetEdition(int idx) const {
+ if (idx < 0)
+ return NULL;
+
+ if (idx >= m_editions_count)
+ return NULL;
+
+ return m_editions + idx;
}
+bool Chapters::ExpandEditionsArray() {
+ if (m_editions_size > m_editions_count)
+ return true; // nothing else to do
-const Chapters::Edition* Chapters::GetEdition(int idx) const
-{
- if (idx < 0)
- return NULL;
+ const int size = (m_editions_size == 0) ? 1 : 2 * m_editions_size;
- if (idx >= m_editions_count)
- return NULL;
+ Edition* const editions = new (std::nothrow) Edition[size];
- return m_editions + idx;
+ if (editions == NULL)
+ return false;
+
+ for (int idx = 0; idx < m_editions_count; ++idx) {
+ m_editions[idx].ShallowCopy(editions[idx]);
+ }
+
+ delete[] m_editions;
+ m_editions = editions;
+
+ m_editions_size = size;
+ return true;
}
+long Chapters::ParseEdition(long long pos, long long size) {
+ if (!ExpandEditionsArray())
+ return -1;
-bool Chapters::ExpandEditionsArray()
-{
- if (m_editions_size > m_editions_count)
- return true; // nothing else to do
+ Edition& e = m_editions[m_editions_count++];
+ e.Init();
- const int size = (m_editions_size == 0) ? 1 : 2 * m_editions_size;
+ return e.Parse(m_pSegment->m_pReader, pos, size);
+}
- Edition* const editions = new (std::nothrow) Edition[size];
+Chapters::Edition::Edition() {}
- if (editions == NULL)
- return false;
+Chapters::Edition::~Edition() {}
- for (int idx = 0; idx < m_editions_count; ++idx)
- {
- m_editions[idx].ShallowCopy(editions[idx]);
+int Chapters::Edition::GetAtomCount() const { return m_atoms_count; }
+
+const Chapters::Atom* Chapters::Edition::GetAtom(int index) const {
+ if (index < 0)
+ return NULL;
+
+ if (index >= m_atoms_count)
+ return NULL;
+
+ return m_atoms + index;
+}
+
+void Chapters::Edition::Init() {
+ m_atoms = NULL;
+ m_atoms_size = 0;
+ m_atoms_count = 0;
+}
+
+void Chapters::Edition::ShallowCopy(Edition& rhs) const {
+ rhs.m_atoms = m_atoms;
+ rhs.m_atoms_size = m_atoms_size;
+ rhs.m_atoms_count = m_atoms_count;
+}
+
+void Chapters::Edition::Clear() {
+ while (m_atoms_count > 0) {
+ Atom& a = m_atoms[--m_atoms_count];
+ a.Clear();
+ }
+
+ delete[] m_atoms;
+ m_atoms = NULL;
+
+ m_atoms_size = 0;
+}
+
+long Chapters::Edition::Parse(IMkvReader* pReader, long long pos,
+ long long size) {
+ const long long stop = pos + size;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0) // weird
+ continue;
+
+ if (id == 0x36) { // Atom ID
+ status = ParseAtom(pReader, pos, size);
+
+ if (status < 0) // error
+ return status;
}
- delete[] m_editions;
- m_editions = editions;
+ pos += size;
+ assert(pos <= stop);
+ }
- m_editions_size = size;
- return true;
+ assert(pos == stop);
+ return 0;
}
+long Chapters::Edition::ParseAtom(IMkvReader* pReader, long long pos,
+ long long size) {
+ if (!ExpandAtomsArray())
+ return -1;
-long Chapters::ParseEdition(
- long long pos,
- long long size)
-{
- if (!ExpandEditionsArray())
- return -1;
+ Atom& a = m_atoms[m_atoms_count++];
+ a.Init();
- Edition& e = m_editions[m_editions_count++];
- e.Init();
-
- return e.Parse(m_pSegment->m_pReader, pos, size);
+ return a.Parse(pReader, pos, size);
}
+bool Chapters::Edition::ExpandAtomsArray() {
+ if (m_atoms_size > m_atoms_count)
+ return true; // nothing else to do
-Chapters::Edition::Edition()
-{
+ const int size = (m_atoms_size == 0) ? 1 : 2 * m_atoms_size;
+
+ Atom* const atoms = new (std::nothrow) Atom[size];
+
+ if (atoms == NULL)
+ return false;
+
+ for (int idx = 0; idx < m_atoms_count; ++idx) {
+ m_atoms[idx].ShallowCopy(atoms[idx]);
+ }
+
+ delete[] m_atoms;
+ m_atoms = atoms;
+
+ m_atoms_size = size;
+ return true;
}
+Chapters::Atom::Atom() {}
-Chapters::Edition::~Edition()
-{
+Chapters::Atom::~Atom() {}
+
+unsigned long long Chapters::Atom::GetUID() const { return m_uid; }
+
+const char* Chapters::Atom::GetStringUID() const { return m_string_uid; }
+
+long long Chapters::Atom::GetStartTimecode() const { return m_start_timecode; }
+
+long long Chapters::Atom::GetStopTimecode() const { return m_stop_timecode; }
+
+long long Chapters::Atom::GetStartTime(const Chapters* pChapters) const {
+ return GetTime(pChapters, m_start_timecode);
}
-
-int Chapters::Edition::GetAtomCount() const
-{
- return m_atoms_count;
+long long Chapters::Atom::GetStopTime(const Chapters* pChapters) const {
+ return GetTime(pChapters, m_stop_timecode);
}
+int Chapters::Atom::GetDisplayCount() const { return m_displays_count; }
-const Chapters::Atom* Chapters::Edition::GetAtom(int index) const
-{
- if (index < 0)
- return NULL;
+const Chapters::Display* Chapters::Atom::GetDisplay(int index) const {
+ if (index < 0)
+ return NULL;
- if (index >= m_atoms_count)
- return NULL;
+ if (index >= m_displays_count)
+ return NULL;
- return m_atoms + index;
+ return m_displays + index;
}
+void Chapters::Atom::Init() {
+ m_string_uid = NULL;
+ m_uid = 0;
+ m_start_timecode = -1;
+ m_stop_timecode = -1;
-void Chapters::Edition::Init()
-{
- m_atoms = NULL;
- m_atoms_size = 0;
- m_atoms_count = 0;
+ m_displays = NULL;
+ m_displays_size = 0;
+ m_displays_count = 0;
}
+void Chapters::Atom::ShallowCopy(Atom& rhs) const {
+ rhs.m_string_uid = m_string_uid;
+ rhs.m_uid = m_uid;
+ rhs.m_start_timecode = m_start_timecode;
+ rhs.m_stop_timecode = m_stop_timecode;
-void Chapters::Edition::ShallowCopy(Edition& rhs) const
-{
- rhs.m_atoms = m_atoms;
- rhs.m_atoms_size = m_atoms_size;
- rhs.m_atoms_count = m_atoms_count;
+ rhs.m_displays = m_displays;
+ rhs.m_displays_size = m_displays_size;
+ rhs.m_displays_count = m_displays_count;
}
+void Chapters::Atom::Clear() {
+ delete[] m_string_uid;
+ m_string_uid = NULL;
-void Chapters::Edition::Clear()
-{
- while (m_atoms_count > 0)
- {
- Atom& a = m_atoms[--m_atoms_count];
- a.Clear();
+ while (m_displays_count > 0) {
+ Display& d = m_displays[--m_displays_count];
+ d.Clear();
+ }
+
+ delete[] m_displays;
+ m_displays = NULL;
+
+ m_displays_size = 0;
+}
+
+long Chapters::Atom::Parse(IMkvReader* pReader, long long pos, long long size) {
+ const long long stop = pos + size;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0) // weird
+ continue;
+
+ if (id == 0x00) { // Display ID
+ status = ParseDisplay(pReader, pos, size);
+
+ if (status < 0) // error
+ return status;
+ } else if (id == 0x1654) { // StringUID ID
+ status = UnserializeString(pReader, pos, size, m_string_uid);
+
+ if (status < 0) // error
+ return status;
+ } else if (id == 0x33C4) { // UID ID
+ long long val;
+ status = UnserializeInt(pReader, pos, size, val);
+
+ if (val < 0) // error
+ return status;
+
+ m_uid = static_cast<unsigned long long>(val);
+ } else if (id == 0x11) { // TimeStart ID
+ const long long val = UnserializeUInt(pReader, pos, size);
+
+ if (val < 0) // error
+ return static_cast<long>(val);
+
+ m_start_timecode = val;
+ } else if (id == 0x12) { // TimeEnd ID
+ const long long val = UnserializeUInt(pReader, pos, size);
+
+ if (val < 0) // error
+ return static_cast<long>(val);
+
+ m_stop_timecode = val;
}
- delete[] m_atoms;
- m_atoms = NULL;
+ pos += size;
+ assert(pos <= stop);
+ }
- m_atoms_size = 0;
+ assert(pos == stop);
+ return 0;
}
+long long Chapters::Atom::GetTime(const Chapters* pChapters,
+ long long timecode) {
+ if (pChapters == NULL)
+ return -1;
-long Chapters::Edition::Parse(
- IMkvReader* pReader,
- long long pos,
- long long size)
-{
- const long long stop = pos + size;
+ Segment* const pSegment = pChapters->m_pSegment;
- while (pos < stop)
- {
- long long id, size;
+ if (pSegment == NULL) // weird
+ return -1;
- long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
+ const SegmentInfo* const pInfo = pSegment->GetInfo();
- if (status < 0) // error
- return status;
+ if (pInfo == NULL)
+ return -1;
- if (size == 0) // weird
- continue;
+ const long long timecode_scale = pInfo->GetTimeCodeScale();
- if (id == 0x36) // Atom ID
- {
- status = ParseAtom(pReader, pos, size);
+ if (timecode_scale < 1) // weird
+ return -1;
- if (status < 0) // error
- return status;
- }
+ if (timecode < 0)
+ return -1;
- pos += size;
- assert(pos <= stop);
+ const long long result = timecode_scale * timecode;
+
+ return result;
+}
+
+long Chapters::Atom::ParseDisplay(IMkvReader* pReader, long long pos,
+ long long size) {
+ if (!ExpandDisplaysArray())
+ return -1;
+
+ Display& d = m_displays[m_displays_count++];
+ d.Init();
+
+ return d.Parse(pReader, pos, size);
+}
+
+bool Chapters::Atom::ExpandDisplaysArray() {
+ if (m_displays_size > m_displays_count)
+ return true; // nothing else to do
+
+ const int size = (m_displays_size == 0) ? 1 : 2 * m_displays_size;
+
+ Display* const displays = new (std::nothrow) Display[size];
+
+ if (displays == NULL)
+ return false;
+
+ for (int idx = 0; idx < m_displays_count; ++idx) {
+ m_displays[idx].ShallowCopy(displays[idx]);
+ }
+
+ delete[] m_displays;
+ m_displays = displays;
+
+ m_displays_size = size;
+ return true;
+}
+
+Chapters::Display::Display() {}
+
+Chapters::Display::~Display() {}
+
+const char* Chapters::Display::GetString() const { return m_string; }
+
+const char* Chapters::Display::GetLanguage() const { return m_language; }
+
+const char* Chapters::Display::GetCountry() const { return m_country; }
+
+void Chapters::Display::Init() {
+ m_string = NULL;
+ m_language = NULL;
+ m_country = NULL;
+}
+
+void Chapters::Display::ShallowCopy(Display& rhs) const {
+ rhs.m_string = m_string;
+ rhs.m_language = m_language;
+ rhs.m_country = m_country;
+}
+
+void Chapters::Display::Clear() {
+ delete[] m_string;
+ m_string = NULL;
+
+ delete[] m_language;
+ m_language = NULL;
+
+ delete[] m_country;
+ m_country = NULL;
+}
+
+long Chapters::Display::Parse(IMkvReader* pReader, long long pos,
+ long long size) {
+ const long long stop = pos + size;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0) // weird
+ continue;
+
+ if (id == 0x05) { // ChapterString ID
+ status = UnserializeString(pReader, pos, size, m_string);
+
+ if (status)
+ return status;
+ } else if (id == 0x037C) { // ChapterLanguage ID
+ status = UnserializeString(pReader, pos, size, m_language);
+
+ if (status)
+ return status;
+ } else if (id == 0x037E) { // ChapterCountry ID
+ status = UnserializeString(pReader, pos, size, m_country);
+
+ if (status)
+ return status;
}
- assert(pos == stop);
- return 0;
+ pos += size;
+ assert(pos <= stop);
+ }
+
+ assert(pos == stop);
+ return 0;
}
+SegmentInfo::SegmentInfo(Segment* pSegment, long long start, long long size_,
+ long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_start(start),
+ m_size(size_),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_pMuxingAppAsUTF8(NULL),
+ m_pWritingAppAsUTF8(NULL),
+ m_pTitleAsUTF8(NULL) {}
-long Chapters::Edition::ParseAtom(
- IMkvReader* pReader,
- long long pos,
- long long size)
-{
- if (!ExpandAtomsArray())
- return -1;
+SegmentInfo::~SegmentInfo() {
+ delete[] m_pMuxingAppAsUTF8;
+ m_pMuxingAppAsUTF8 = NULL;
- Atom& a = m_atoms[m_atoms_count++];
- a.Init();
+ delete[] m_pWritingAppAsUTF8;
+ m_pWritingAppAsUTF8 = NULL;
- return a.Parse(pReader, pos, size);
+ delete[] m_pTitleAsUTF8;
+ m_pTitleAsUTF8 = NULL;
}
+long SegmentInfo::Parse() {
+ assert(m_pMuxingAppAsUTF8 == NULL);
+ assert(m_pWritingAppAsUTF8 == NULL);
+ assert(m_pTitleAsUTF8 == NULL);
-bool Chapters::Edition::ExpandAtomsArray()
-{
- if (m_atoms_size > m_atoms_count)
- return true; // nothing else to do
+ IMkvReader* const pReader = m_pSegment->m_pReader;
- const int size = (m_atoms_size == 0) ? 1 : 2 * m_atoms_size;
+ long long pos = m_start;
+ const long long stop = m_start + m_size;
- Atom* const atoms = new (std::nothrow) Atom[size];
+ m_timecodeScale = 1000000;
+ m_duration = -1;
- if (atoms == NULL)
- return false;
+ while (pos < stop) {
+ long long id, size;
- for (int idx = 0; idx < m_atoms_count; ++idx)
- {
- m_atoms[idx].ShallowCopy(atoms[idx]);
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (id == 0x0AD7B1) { // Timecode Scale
+ m_timecodeScale = UnserializeUInt(pReader, pos, size);
+
+ if (m_timecodeScale <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x0489) { // Segment duration
+ const long status = UnserializeFloat(pReader, pos, size, m_duration);
+
+ if (status < 0)
+ return status;
+
+ if (m_duration < 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x0D80) { // MuxingApp
+ const long status =
+ UnserializeString(pReader, pos, size, m_pMuxingAppAsUTF8);
+
+ if (status)
+ return status;
+ } else if (id == 0x1741) { // WritingApp
+ const long status =
+ UnserializeString(pReader, pos, size, m_pWritingAppAsUTF8);
+
+ if (status)
+ return status;
+ } else if (id == 0x3BA9) { // Title
+ const long status = UnserializeString(pReader, pos, size, m_pTitleAsUTF8);
+
+ if (status)
+ return status;
}
- delete[] m_atoms;
- m_atoms = atoms;
+ pos += size;
+ assert(pos <= stop);
+ }
- m_atoms_size = size;
- return true;
+ assert(pos == stop);
+
+ return 0;
}
+long long SegmentInfo::GetTimeCodeScale() const { return m_timecodeScale; }
-Chapters::Atom::Atom()
-{
+long long SegmentInfo::GetDuration() const {
+ if (m_duration < 0)
+ return -1;
+
+ assert(m_timecodeScale >= 1);
+
+ const double dd = double(m_duration) * double(m_timecodeScale);
+ const long long d = static_cast<long long>(dd);
+
+ return d;
}
-
-Chapters::Atom::~Atom()
-{
+const char* SegmentInfo::GetMuxingAppAsUTF8() const {
+ return m_pMuxingAppAsUTF8;
}
-
-unsigned long long Chapters::Atom::GetUID() const
-{
- return m_uid;
+const char* SegmentInfo::GetWritingAppAsUTF8() const {
+ return m_pWritingAppAsUTF8;
}
-
-const char* Chapters::Atom::GetStringUID() const
-{
- return m_string_uid;
-}
-
-
-long long Chapters::Atom::GetStartTimecode() const
-{
- return m_start_timecode;
-}
-
-
-long long Chapters::Atom::GetStopTimecode() const
-{
- return m_stop_timecode;
-}
-
-
-long long Chapters::Atom::GetStartTime(const Chapters* pChapters) const
-{
- return GetTime(pChapters, m_start_timecode);
-}
-
-
-long long Chapters::Atom::GetStopTime(const Chapters* pChapters) const
-{
- return GetTime(pChapters, m_stop_timecode);
-}
-
-
-int Chapters::Atom::GetDisplayCount() const
-{
- return m_displays_count;
-}
-
-
-const Chapters::Display* Chapters::Atom::GetDisplay(int index) const
-{
- if (index < 0)
- return NULL;
-
- if (index >= m_displays_count)
- return NULL;
-
- return m_displays + index;
-}
-
-
-void Chapters::Atom::Init()
-{
- m_string_uid = NULL;
- m_uid = 0;
- m_start_timecode = -1;
- m_stop_timecode = -1;
-
- m_displays = NULL;
- m_displays_size = 0;
- m_displays_count = 0;
-}
-
-
-void Chapters::Atom::ShallowCopy(Atom& rhs) const
-{
- rhs.m_string_uid = m_string_uid;
- rhs.m_uid = m_uid;
- rhs.m_start_timecode = m_start_timecode;
- rhs.m_stop_timecode = m_stop_timecode;
-
- rhs.m_displays = m_displays;
- rhs.m_displays_size = m_displays_size;
- rhs.m_displays_count = m_displays_count;
-}
-
-
-void Chapters::Atom::Clear()
-{
- delete[] m_string_uid;
- m_string_uid = NULL;
-
- while (m_displays_count > 0)
- {
- Display& d = m_displays[--m_displays_count];
- d.Clear();
- }
-
- delete[] m_displays;
- m_displays = NULL;
-
- m_displays_size = 0;
-}
-
-
-long Chapters::Atom::Parse(
- IMkvReader* pReader,
- long long pos,
- long long size)
-{
- const long long stop = pos + size;
-
- while (pos < stop)
- {
- long long id, size;
-
- long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
-
- if (status < 0) // error
- return status;
-
- if (size == 0) // weird
- continue;
-
- if (id == 0x00) // Display ID
- {
- status = ParseDisplay(pReader, pos, size);
-
- if (status < 0) // error
- return status;
- }
- else if (id == 0x1654) // StringUID ID
- {
- status = UnserializeString(pReader, pos, size, m_string_uid);
-
- if (status < 0) // error
- return status;
- }
- else if (id == 0x33C4) // UID ID
- {
- long long val;
- status = UnserializeInt(pReader, pos, size, val);
-
- if (status < 0) // error
- return status;
-
- m_uid = val;
- }
- else if (id == 0x11) // TimeStart ID
- {
- const long long val = UnserializeUInt(pReader, pos, size);
-
- if (val < 0) // error
- return static_cast<long>(val);
-
- m_start_timecode = val;
- }
- else if (id == 0x12) // TimeEnd ID
- {
- const long long val = UnserializeUInt(pReader, pos, size);
-
- if (val < 0) // error
- return static_cast<long>(val);
-
- m_stop_timecode = val;
- }
-
- pos += size;
- assert(pos <= stop);
- }
-
- assert(pos == stop);
- return 0;
-}
-
-
-long long Chapters::Atom::GetTime(
- const Chapters* pChapters,
- long long timecode)
-{
- if (pChapters == NULL)
- return -1;
-
- Segment* const pSegment = pChapters->m_pSegment;
-
- if (pSegment == NULL) // weird
- return -1;
-
- const SegmentInfo* const pInfo = pSegment->GetInfo();
-
- if (pInfo == NULL)
- return -1;
-
- const long long timecode_scale = pInfo->GetTimeCodeScale();
-
- if (timecode_scale < 1) // weird
- return -1;
-
- if (timecode < 0)
- return -1;
-
- const long long result = timecode_scale * timecode;
-
- return result;
-}
-
-
-long Chapters::Atom::ParseDisplay(
- IMkvReader* pReader,
- long long pos,
- long long size)
-{
- if (!ExpandDisplaysArray())
- return -1;
-
- Display& d = m_displays[m_displays_count++];
- d.Init();
-
- return d.Parse(pReader, pos, size);
-}
-
-
-bool Chapters::Atom::ExpandDisplaysArray()
-{
- if (m_displays_size > m_displays_count)
- return true; // nothing else to do
-
- const int size = (m_displays_size == 0) ? 1 : 2 * m_displays_size;
-
- Display* const displays = new (std::nothrow) Display[size];
-
- if (displays == NULL)
- return false;
-
- for (int idx = 0; idx < m_displays_count; ++idx)
- {
- m_displays[idx].ShallowCopy(displays[idx]);
- }
-
- delete[] m_displays;
- m_displays = displays;
-
- m_displays_size = size;
- return true;
-}
-
-
-Chapters::Display::Display()
-{
-}
-
-
-Chapters::Display::~Display()
-{
-}
-
-
-const char* Chapters::Display::GetString() const
-{
- return m_string;
-}
-
-
-const char* Chapters::Display::GetLanguage() const
-{
- return m_language;
-}
-
-
-const char* Chapters::Display::GetCountry() const
-{
- return m_country;
-}
-
-
-void Chapters::Display::Init()
-{
- m_string = NULL;
- m_language = NULL;
- m_country = NULL;
-}
-
-
-void Chapters::Display::ShallowCopy(Display& rhs) const
-{
- rhs.m_string = m_string;
- rhs.m_language = m_language;
- rhs.m_country = m_country;
-}
-
-
-void Chapters::Display::Clear()
-{
- delete[] m_string;
- m_string = NULL;
-
- delete[] m_language;
- m_language = NULL;
-
- delete[] m_country;
- m_country = NULL;
-}
-
-
-long Chapters::Display::Parse(
- IMkvReader* pReader,
- long long pos,
- long long size)
-{
- const long long stop = pos + size;
-
- while (pos < stop)
- {
- long long id, size;
-
- long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
-
- if (status < 0) // error
- return status;
-
- if (size == 0) // weird
- continue;
-
- if (id == 0x05) // ChapterString ID
- {
- status = UnserializeString(pReader, pos, size, m_string);
-
- if (status)
- return status;
- }
- else if (id == 0x037C) // ChapterLanguage ID
- {
- status = UnserializeString(pReader, pos, size, m_language);
-
- if (status)
- return status;
- }
- else if (id == 0x037E) // ChapterCountry ID
- {
- status = UnserializeString(pReader, pos, size, m_country);
-
- if (status)
- return status;
- }
-
- pos += size;
- assert(pos <= stop);
- }
-
- assert(pos == stop);
- return 0;
-}
-
-
-SegmentInfo::SegmentInfo(
- Segment* pSegment,
- long long start,
- long long size_,
- long long element_start,
- long long element_size) :
- m_pSegment(pSegment),
- m_start(start),
- m_size(size_),
- m_element_start(element_start),
- m_element_size(element_size),
- m_pMuxingAppAsUTF8(NULL),
- m_pWritingAppAsUTF8(NULL),
- m_pTitleAsUTF8(NULL)
-{
-}
-
-SegmentInfo::~SegmentInfo()
-{
- delete[] m_pMuxingAppAsUTF8;
- m_pMuxingAppAsUTF8 = NULL;
-
- delete[] m_pWritingAppAsUTF8;
- m_pWritingAppAsUTF8 = NULL;
-
- delete[] m_pTitleAsUTF8;
- m_pTitleAsUTF8 = NULL;
-}
-
-
-long SegmentInfo::Parse()
-{
- assert(m_pMuxingAppAsUTF8 == NULL);
- assert(m_pWritingAppAsUTF8 == NULL);
- assert(m_pTitleAsUTF8 == NULL);
-
- IMkvReader* const pReader = m_pSegment->m_pReader;
-
- long long pos = m_start;
- const long long stop = m_start + m_size;
-
- m_timecodeScale = 1000000;
- m_duration = -1;
-
- while (pos < stop)
- {
- long long id, size;
-
- const long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
-
- if (status < 0) //error
- return status;
-
- if (id == 0x0AD7B1) //Timecode Scale
- {
- m_timecodeScale = UnserializeUInt(pReader, pos, size);
-
- if (m_timecodeScale <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x0489) //Segment duration
- {
- const long status = UnserializeFloat(
- pReader,
- pos,
- size,
- m_duration);
-
- if (status < 0)
- return status;
-
- if (m_duration < 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x0D80) //MuxingApp
- {
- const long status = UnserializeString(
- pReader,
- pos,
- size,
- m_pMuxingAppAsUTF8);
-
- if (status)
- return status;
- }
- else if (id == 0x1741) //WritingApp
- {
- const long status = UnserializeString(
- pReader,
- pos,
- size,
- m_pWritingAppAsUTF8);
-
- if (status)
- return status;
- }
- else if (id == 0x3BA9) //Title
- {
- const long status = UnserializeString(
- pReader,
- pos,
- size,
- m_pTitleAsUTF8);
-
- if (status)
- return status;
- }
-
- pos += size;
- assert(pos <= stop);
- }
-
- assert(pos == stop);
-
- return 0;
-}
-
-
-long long SegmentInfo::GetTimeCodeScale() const
-{
- return m_timecodeScale;
-}
-
-
-long long SegmentInfo::GetDuration() const
-{
- if (m_duration < 0)
- return -1;
-
- assert(m_timecodeScale >= 1);
-
- const double dd = double(m_duration) * double(m_timecodeScale);
- const long long d = static_cast<long long>(dd);
-
- return d;
-}
-
-const char* SegmentInfo::GetMuxingAppAsUTF8() const
-{
- return m_pMuxingAppAsUTF8;
-}
-
-
-const char* SegmentInfo::GetWritingAppAsUTF8() const
-{
- return m_pWritingAppAsUTF8;
-}
-
-const char* SegmentInfo::GetTitleAsUTF8() const
-{
- return m_pTitleAsUTF8;
-}
+const char* SegmentInfo::GetTitleAsUTF8() const { return m_pTitleAsUTF8; }
///////////////////////////////////////////////////////////////
// ContentEncoding element
ContentEncoding::ContentCompression::ContentCompression()
- : algo(0),
- settings(NULL),
- settings_len(0) {
-}
+ : algo(0), settings(NULL), settings_len(0) {}
ContentEncoding::ContentCompression::~ContentCompression() {
- delete [] settings;
+ delete[] settings;
}
ContentEncoding::ContentEncryption::ContentEncryption()
@@ -5060,13 +4421,12 @@
sig_key_id(NULL),
sig_key_id_len(0),
sig_algo(0),
- sig_hash_algo(0) {
-}
+ sig_hash_algo(0) {}
ContentEncoding::ContentEncryption::~ContentEncryption() {
- delete [] key_id;
- delete [] signature;
- delete [] sig_key_id;
+ delete[] key_id;
+ delete[] signature;
+ delete[] sig_key_id;
}
ContentEncoding::ContentEncoding()
@@ -5076,8 +4436,7 @@
encryption_entries_end_(NULL),
encoding_order_(0),
encoding_scope_(1),
- encoding_type_(0) {
-}
+ encoding_type_(0) {}
ContentEncoding::~ContentEncoding() {
ContentCompression** comp_i = compression_entries_;
@@ -5088,7 +4447,7 @@
delete comp;
}
- delete [] compression_entries_;
+ delete[] compression_entries_;
ContentEncryption** enc_i = encryption_entries_;
ContentEncryption** const enc_j = encryption_entries_end_;
@@ -5098,10 +4457,9 @@
delete enc;
}
- delete [] encryption_entries_;
+ delete[] encryption_entries_;
}
-
const ContentEncoding::ContentCompression*
ContentEncoding::GetCompressionByIndex(unsigned long idx) const {
const ptrdiff_t count = compression_entries_end_ - compression_entries_;
@@ -5120,8 +4478,8 @@
return static_cast<unsigned long>(count);
}
-const ContentEncoding::ContentEncryption*
-ContentEncoding::GetEncryptionByIndex(unsigned long idx) const {
+const ContentEncoding::ContentEncryption* ContentEncoding::GetEncryptionByIndex(
+ unsigned long idx) const {
const ptrdiff_t count = encryption_entries_end_ - encryption_entries_;
assert(count >= 0);
@@ -5139,9 +4497,7 @@
}
long ContentEncoding::ParseContentEncAESSettingsEntry(
- long long start,
- long long size,
- IMkvReader* pReader,
+ long long start, long long size, IMkvReader* pReader,
ContentEncAESSettings* aes) {
assert(pReader);
assert(aes);
@@ -5151,12 +4507,8 @@
while (pos < stop) {
long long id, size;
- const long status = ParseElementHeader(pReader,
- pos,
- stop,
- id,
- size);
- if (status < 0) //error
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
return status;
if (id == 0x7E8) {
@@ -5166,15 +4518,14 @@
return E_FILE_FORMAT_INVALID;
}
- pos += size; //consume payload
+ pos += size; // consume payload
assert(pos <= stop);
}
return 0;
}
-long ContentEncoding::ParseContentEncodingEntry(long long start,
- long long size,
+long ContentEncoding::ParseContentEncodingEntry(long long start, long long size,
IMkvReader* pReader) {
assert(pReader);
@@ -5187,12 +4538,8 @@
while (pos < stop) {
long long id, size;
- const long status = ParseElementHeader(pReader,
- pos,
- stop,
- id,
- size);
- if (status < 0) //error
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
return status;
if (id == 0x1034) // ContentCompression ID
@@ -5201,7 +4548,7 @@
if (id == 0x1035) // ContentEncryption ID
++encryption_count;
- pos += size; //consume payload
+ pos += size; // consume payload
assert(pos <= stop);
}
@@ -5210,7 +4557,7 @@
if (compression_count > 0) {
compression_entries_ =
- new (std::nothrow) ContentCompression*[compression_count];
+ new (std::nothrow) ContentCompression* [compression_count];
if (!compression_entries_)
return -1;
compression_entries_end_ = compression_entries_;
@@ -5218,9 +4565,9 @@
if (encryption_count > 0) {
encryption_entries_ =
- new (std::nothrow) ContentEncryption*[encryption_count];
+ new (std::nothrow) ContentEncryption* [encryption_count];
if (!encryption_entries_) {
- delete [] compression_entries_;
+ delete[] compression_entries_;
return -1;
}
encryption_entries_end_ = encryption_entries_;
@@ -5229,12 +4576,8 @@
pos = start;
while (pos < stop) {
long long id, size;
- long status = ParseElementHeader(pReader,
- pos,
- stop,
- id,
- size);
- if (status < 0) //error
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
return status;
if (id == 0x1031) {
@@ -5251,7 +4594,7 @@
} else if (id == 0x1034) {
// ContentCompression ID
ContentCompression* const compression =
- new (std::nothrow) ContentCompression();
+ new (std::nothrow) ContentCompression();
if (!compression)
return -1;
@@ -5276,7 +4619,7 @@
*encryption_entries_end_++ = encryption;
}
- pos += size; //consume payload
+ pos += size; // consume payload
assert(pos <= stop);
}
@@ -5284,11 +4627,9 @@
return 0;
}
-long ContentEncoding::ParseCompressionEntry(
- long long start,
- long long size,
- IMkvReader* pReader,
- ContentCompression* compression) {
+long ContentEncoding::ParseCompressionEntry(long long start, long long size,
+ IMkvReader* pReader,
+ ContentCompression* compression) {
assert(pReader);
assert(compression);
@@ -5299,12 +4640,8 @@
while (pos < stop) {
long long id, size;
- const long status = ParseElementHeader(pReader,
- pos,
- stop,
- id,
- size);
- if (status < 0) //error
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
return status;
if (id == 0x254) {
@@ -5325,9 +4662,10 @@
if (buf == NULL)
return -1;
- const int read_status = pReader->Read(pos, buflen, buf);
+ const int read_status =
+ pReader->Read(pos, static_cast<long>(buflen), buf);
if (read_status) {
- delete [] buf;
+ delete[] buf;
return status;
}
@@ -5335,7 +4673,7 @@
compression->settings_len = buflen;
}
- pos += size; //consume payload
+ pos += size; // consume payload
assert(pos <= stop);
}
@@ -5346,11 +4684,9 @@
return 0;
}
-long ContentEncoding::ParseEncryptionEntry(
- long long start,
- long long size,
- IMkvReader* pReader,
- ContentEncryption* encryption) {
+long ContentEncoding::ParseEncryptionEntry(long long start, long long size,
+ IMkvReader* pReader,
+ ContentEncryption* encryption) {
assert(pReader);
assert(encryption);
@@ -5359,12 +4695,8 @@
while (pos < stop) {
long long id, size;
- const long status = ParseElementHeader(pReader,
- pos,
- stop,
- id,
- size);
- if (status < 0) //error
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
return status;
if (id == 0x7E1) {
@@ -5374,7 +4706,7 @@
return E_FILE_FORMAT_INVALID;
} else if (id == 0x7E2) {
// ContentEncKeyID
- delete[] encryption->key_id;
+ delete[] encryption -> key_id;
encryption->key_id = NULL;
encryption->key_id_len = 0;
@@ -5387,9 +4719,10 @@
if (buf == NULL)
return -1;
- const int read_status = pReader->Read(pos, buflen, buf);
+ const int read_status =
+ pReader->Read(pos, static_cast<long>(buflen), buf);
if (read_status) {
- delete [] buf;
+ delete[] buf;
return status;
}
@@ -5397,7 +4730,7 @@
encryption->key_id_len = buflen;
} else if (id == 0x7E3) {
// ContentSignature
- delete[] encryption->signature;
+ delete[] encryption -> signature;
encryption->signature = NULL;
encryption->signature_len = 0;
@@ -5410,9 +4743,10 @@
if (buf == NULL)
return -1;
- const int read_status = pReader->Read(pos, buflen, buf);
+ const int read_status =
+ pReader->Read(pos, static_cast<long>(buflen), buf);
if (read_status) {
- delete [] buf;
+ delete[] buf;
return status;
}
@@ -5420,7 +4754,7 @@
encryption->signature_len = buflen;
} else if (id == 0x7E4) {
// ContentSigKeyID
- delete[] encryption->sig_key_id;
+ delete[] encryption -> sig_key_id;
encryption->sig_key_id = NULL;
encryption->sig_key_id_len = 0;
@@ -5433,9 +4767,10 @@
if (buf == NULL)
return -1;
- const int read_status = pReader->Read(pos, buflen, buf);
+ const int read_status =
+ pReader->Read(pos, static_cast<long>(buflen), buf);
if (read_status) {
- delete [] buf;
+ delete[] buf;
return status;
}
@@ -5450,400 +4785,322 @@
} else if (id == 0x7E7) {
// ContentEncAESSettings
const long status = ParseContentEncAESSettingsEntry(
- pos,
- size,
- pReader,
- &encryption->aes_settings);
+ pos, size, pReader, &encryption->aes_settings);
if (status)
return status;
}
- pos += size; //consume payload
+ pos += size; // consume payload
assert(pos <= stop);
}
return 0;
}
-Track::Track(
- Segment* pSegment,
- long long element_start,
- long long element_size) :
- m_pSegment(pSegment),
- m_element_start(element_start),
- m_element_size(element_size),
- content_encoding_entries_(NULL),
- content_encoding_entries_end_(NULL)
-{
+Track::Track(Segment* pSegment, long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ content_encoding_entries_(NULL),
+ content_encoding_entries_end_(NULL) {}
+
+Track::~Track() {
+ Info& info = const_cast<Info&>(m_info);
+ info.Clear();
+
+ ContentEncoding** i = content_encoding_entries_;
+ ContentEncoding** const j = content_encoding_entries_end_;
+
+ while (i != j) {
+ ContentEncoding* const encoding = *i++;
+ delete encoding;
+ }
+
+ delete[] content_encoding_entries_;
}
-Track::~Track()
-{
- Info& info = const_cast<Info&>(m_info);
- info.Clear();
+long Track::Create(Segment* pSegment, const Info& info, long long element_start,
+ long long element_size, Track*& pResult) {
+ if (pResult)
+ return -1;
- ContentEncoding** i = content_encoding_entries_;
- ContentEncoding** const j = content_encoding_entries_end_;
+ Track* const pTrack =
+ new (std::nothrow) Track(pSegment, element_start, element_size);
- while (i != j) {
- ContentEncoding* const encoding = *i++;
- delete encoding;
- }
+ if (pTrack == NULL)
+ return -1; // generic error
- delete [] content_encoding_entries_;
+ const int status = info.Copy(pTrack->m_info);
+
+ if (status) { // error
+ delete pTrack;
+ return status;
+ }
+
+ pResult = pTrack;
+ return 0; // success
}
-long Track::Create(
- Segment* pSegment,
- const Info& info,
- long long element_start,
- long long element_size,
- Track*& pResult)
-{
- if (pResult)
- return -1;
+Track::Info::Info()
+ : uid(0),
+ defaultDuration(0),
+ codecDelay(0),
+ seekPreRoll(0),
+ nameAsUTF8(NULL),
+ language(NULL),
+ codecId(NULL),
+ codecNameAsUTF8(NULL),
+ codecPrivate(NULL),
+ codecPrivateSize(0),
+ lacing(false) {}
- Track* const pTrack = new (std::nothrow) Track(pSegment,
- element_start,
- element_size);
+Track::Info::~Info() { Clear(); }
- if (pTrack == NULL)
- return -1; //generic error
+void Track::Info::Clear() {
+ delete[] nameAsUTF8;
+ nameAsUTF8 = NULL;
- const int status = info.Copy(pTrack->m_info);
+ delete[] language;
+ language = NULL;
- if (status) // error
- {
- delete pTrack;
- return status;
- }
+ delete[] codecId;
+ codecId = NULL;
- pResult = pTrack;
- return 0; //success
+ delete[] codecPrivate;
+ codecPrivate = NULL;
+ codecPrivateSize = 0;
+
+ delete[] codecNameAsUTF8;
+ codecNameAsUTF8 = NULL;
}
-Track::Info::Info():
- uid(0),
- defaultDuration(0),
- codecDelay(0),
- seekPreRoll(0),
- nameAsUTF8(NULL),
- language(NULL),
- codecId(NULL),
- codecNameAsUTF8(NULL),
- codecPrivate(NULL),
- codecPrivateSize(0),
- lacing(false)
-{
-}
+int Track::Info::CopyStr(char* Info::*str, Info& dst_) const {
+ if (str == static_cast<char * Info::*>(NULL))
+ return -1;
-Track::Info::~Info()
-{
- Clear();
-}
+ char*& dst = dst_.*str;
-void Track::Info::Clear()
-{
- delete[] nameAsUTF8;
- nameAsUTF8 = NULL;
+ if (dst) // should be NULL already
+ return -1;
- delete[] language;
- language = NULL;
+ const char* const src = this->*str;
- delete[] codecId;
- codecId = NULL;
-
- delete[] codecPrivate;
- codecPrivate = NULL;
- codecPrivateSize = 0;
-
- delete[] codecNameAsUTF8;
- codecNameAsUTF8 = NULL;
-}
-
-int Track::Info::CopyStr(char* Info::*str, Info& dst_) const
-{
- if (str == static_cast<char* Info::*>(NULL))
- return -1;
-
- char*& dst = dst_.*str;
-
- if (dst) //should be NULL already
- return -1;
-
- const char* const src = this->*str;
-
- if (src == NULL)
- return 0;
-
- const size_t len = strlen(src);
-
- dst = new (std::nothrow) char[len+1];
-
- if (dst == NULL)
- return -1;
-
- strcpy(dst, src);
-
+ if (src == NULL)
return 0;
+
+ const size_t len = strlen(src);
+
+ dst = new (std::nothrow) char[len + 1];
+
+ if (dst == NULL)
+ return -1;
+
+ strcpy(dst, src);
+
+ return 0;
}
+int Track::Info::Copy(Info& dst) const {
+ if (&dst == this)
+ return 0;
-int Track::Info::Copy(Info& dst) const
-{
- if (&dst == this)
- return 0;
+ dst.type = type;
+ dst.number = number;
+ dst.defaultDuration = defaultDuration;
+ dst.codecDelay = codecDelay;
+ dst.seekPreRoll = seekPreRoll;
+ dst.uid = uid;
+ dst.lacing = lacing;
+ dst.settings = settings;
- dst.type = type;
- dst.number = number;
- dst.defaultDuration = defaultDuration;
- dst.codecDelay = codecDelay;
- dst.seekPreRoll = seekPreRoll;
- dst.uid = uid;
- dst.lacing = lacing;
- dst.settings = settings;
+ // We now copy the string member variables from src to dst.
+ // This involves memory allocation so in principle the operation
+ // can fail (indeed, that's why we have Info::Copy), so we must
+ // report this to the caller. An error return from this function
+ // therefore implies that the copy was only partially successful.
- //We now copy the string member variables from src to dst.
- //This involves memory allocation so in principle the operation
- //can fail (indeed, that's why we have Info::Copy), so we must
- //report this to the caller. An error return from this function
- //therefore implies that the copy was only partially successful.
+ if (int status = CopyStr(&Info::nameAsUTF8, dst))
+ return status;
- if (int status = CopyStr(&Info::nameAsUTF8, dst))
- return status;
+ if (int status = CopyStr(&Info::language, dst))
+ return status;
- if (int status = CopyStr(&Info::language, dst))
- return status;
+ if (int status = CopyStr(&Info::codecId, dst))
+ return status;
- if (int status = CopyStr(&Info::codecId, dst))
- return status;
+ if (int status = CopyStr(&Info::codecNameAsUTF8, dst))
+ return status;
- if (int status = CopyStr(&Info::codecNameAsUTF8, dst))
- return status;
+ if (codecPrivateSize > 0) {
+ if (codecPrivate == NULL)
+ return -1;
- if (codecPrivateSize > 0)
- {
- if (codecPrivate == NULL)
- return -1;
+ if (dst.codecPrivate)
+ return -1;
- if (dst.codecPrivate)
- return -1;
+ if (dst.codecPrivateSize != 0)
+ return -1;
- if (dst.codecPrivateSize != 0)
- return -1;
+ dst.codecPrivate = new (std::nothrow) unsigned char[codecPrivateSize];
- dst.codecPrivate = new (std::nothrow) unsigned char[codecPrivateSize];
+ if (dst.codecPrivate == NULL)
+ return -1;
- if (dst.codecPrivate == NULL)
- return -1;
+ memcpy(dst.codecPrivate, codecPrivate, codecPrivateSize);
+ dst.codecPrivateSize = codecPrivateSize;
+ }
- memcpy(dst.codecPrivate, codecPrivate, codecPrivateSize);
- dst.codecPrivateSize = codecPrivateSize;
+ return 0;
+}
+
+const BlockEntry* Track::GetEOS() const { return &m_eos; }
+
+long Track::GetType() const { return m_info.type; }
+
+long Track::GetNumber() const { return m_info.number; }
+
+unsigned long long Track::GetUid() const { return m_info.uid; }
+
+const char* Track::GetNameAsUTF8() const { return m_info.nameAsUTF8; }
+
+const char* Track::GetLanguage() const { return m_info.language; }
+
+const char* Track::GetCodecNameAsUTF8() const { return m_info.codecNameAsUTF8; }
+
+const char* Track::GetCodecId() const { return m_info.codecId; }
+
+const unsigned char* Track::GetCodecPrivate(size_t& size) const {
+ size = m_info.codecPrivateSize;
+ return m_info.codecPrivate;
+}
+
+bool Track::GetLacing() const { return m_info.lacing; }
+
+unsigned long long Track::GetDefaultDuration() const {
+ return m_info.defaultDuration;
+}
+
+unsigned long long Track::GetCodecDelay() const { return m_info.codecDelay; }
+
+unsigned long long Track::GetSeekPreRoll() const { return m_info.seekPreRoll; }
+
+long Track::GetFirst(const BlockEntry*& pBlockEntry) const {
+ const Cluster* pCluster = m_pSegment->GetFirst();
+
+ for (int i = 0;;) {
+ if (pCluster == NULL) {
+ pBlockEntry = GetEOS();
+ return 1;
}
- return 0;
-}
-
-const BlockEntry* Track::GetEOS() const
-{
- return &m_eos;
-}
-
-long Track::GetType() const
-{
- return m_info.type;
-}
-
-long Track::GetNumber() const
-{
- return m_info.number;
-}
-
-unsigned long long Track::GetUid() const
-{
- return m_info.uid;
-}
-
-const char* Track::GetNameAsUTF8() const
-{
- return m_info.nameAsUTF8;
-}
-
-const char* Track::GetLanguage() const
-{
- return m_info.language;
-}
-
-const char* Track::GetCodecNameAsUTF8() const
-{
- return m_info.codecNameAsUTF8;
-}
-
-
-const char* Track::GetCodecId() const
-{
- return m_info.codecId;
-}
-
-const unsigned char* Track::GetCodecPrivate(size_t& size) const
-{
- size = m_info.codecPrivateSize;
- return m_info.codecPrivate;
-}
-
-
-bool Track::GetLacing() const
-{
- return m_info.lacing;
-}
-
-unsigned long long Track::GetDefaultDuration() const
-{
- return m_info.defaultDuration;
-}
-
-unsigned long long Track::GetCodecDelay() const
-{
- return m_info.codecDelay;
-}
-
-unsigned long long Track::GetSeekPreRoll() const
-{
- return m_info.seekPreRoll;
-}
-
-long Track::GetFirst(const BlockEntry*& pBlockEntry) const
-{
- const Cluster* pCluster = m_pSegment->GetFirst();
-
- for (int i = 0; ; )
- {
- if (pCluster == NULL)
- {
- pBlockEntry = GetEOS();
- return 1;
- }
-
- if (pCluster->EOS())
- {
+ if (pCluster->EOS()) {
#if 0
- if (m_pSegment->Unparsed() <= 0) //all clusters have been loaded
- {
+ if (m_pSegment->Unparsed() <= 0) { //all clusters have been loaded
pBlockEntry = GetEOS();
return 1;
}
#else
- if (m_pSegment->DoneParsing())
- {
- pBlockEntry = GetEOS();
- return 1;
- }
+ if (m_pSegment->DoneParsing()) {
+ pBlockEntry = GetEOS();
+ return 1;
+ }
#endif
- pBlockEntry = 0;
- return E_BUFFER_NOT_FULL;
- }
-
- long status = pCluster->GetFirst(pBlockEntry);
-
- if (status < 0) //error
- return status;
-
- if (pBlockEntry == 0) //empty cluster
- {
- pCluster = m_pSegment->GetNext(pCluster);
- continue;
- }
-
- for (;;)
- {
- const Block* const pBlock = pBlockEntry->GetBlock();
- assert(pBlock);
-
- const long long tn = pBlock->GetTrackNumber();
-
- if ((tn == m_info.number) && VetEntry(pBlockEntry))
- return 0;
-
- const BlockEntry* pNextEntry;
-
- status = pCluster->GetNext(pBlockEntry, pNextEntry);
-
- if (status < 0) //error
- return status;
-
- if (pNextEntry == 0)
- break;
-
- pBlockEntry = pNextEntry;
- }
-
- ++i;
-
- if (i >= 100)
- break;
-
- pCluster = m_pSegment->GetNext(pCluster);
+ pBlockEntry = 0;
+ return E_BUFFER_NOT_FULL;
}
- //NOTE: if we get here, it means that we didn't find a block with
- //a matching track number. We interpret that as an error (which
- //might be too conservative).
+ long status = pCluster->GetFirst(pBlockEntry);
- pBlockEntry = GetEOS(); //so we can return a non-NULL value
- return 1;
-}
+ if (status < 0) // error
+ return status;
+ if (pBlockEntry == 0) { // empty cluster
+ pCluster = m_pSegment->GetNext(pCluster);
+ continue;
+ }
-long Track::GetNext(
- const BlockEntry* pCurrEntry,
- const BlockEntry*& pNextEntry) const
-{
- assert(pCurrEntry);
- assert(!pCurrEntry->EOS()); //?
+ for (;;) {
+ const Block* const pBlock = pBlockEntry->GetBlock();
+ assert(pBlock);
- const Block* const pCurrBlock = pCurrEntry->GetBlock();
- assert(pCurrBlock && pCurrBlock->GetTrackNumber() == m_info.number);
- if (!pCurrBlock || pCurrBlock->GetTrackNumber() != m_info.number)
- return -1;
+ const long long tn = pBlock->GetTrackNumber();
- const Cluster* pCluster = pCurrEntry->GetCluster();
- assert(pCluster);
- assert(!pCluster->EOS());
+ if ((tn == m_info.number) && VetEntry(pBlockEntry))
+ return 0;
- long status = pCluster->GetNext(pCurrEntry, pNextEntry);
+ const BlockEntry* pNextEntry;
- if (status < 0) //error
+ status = pCluster->GetNext(pBlockEntry, pNextEntry);
+
+ if (status < 0) // error
return status;
- for (int i = 0; ; )
- {
- while (pNextEntry)
- {
- const Block* const pNextBlock = pNextEntry->GetBlock();
- assert(pNextBlock);
+ if (pNextEntry == 0)
+ break;
- if (pNextBlock->GetTrackNumber() == m_info.number)
- return 0;
+ pBlockEntry = pNextEntry;
+ }
- pCurrEntry = pNextEntry;
+ ++i;
- status = pCluster->GetNext(pCurrEntry, pNextEntry);
+ if (i >= 100)
+ break;
- if (status < 0) //error
- return status;
- }
+ pCluster = m_pSegment->GetNext(pCluster);
+ }
- pCluster = m_pSegment->GetNext(pCluster);
+ // NOTE: if we get here, it means that we didn't find a block with
+ // a matching track number. We interpret that as an error (which
+ // might be too conservative).
- if (pCluster == NULL)
- {
- pNextEntry = GetEOS();
- return 1;
- }
+ pBlockEntry = GetEOS(); // so we can return a non-NULL value
+ return 1;
+}
- if (pCluster->EOS())
- {
+long Track::GetNext(const BlockEntry* pCurrEntry,
+ const BlockEntry*& pNextEntry) const {
+ assert(pCurrEntry);
+ assert(!pCurrEntry->EOS()); //?
+
+ const Block* const pCurrBlock = pCurrEntry->GetBlock();
+ assert(pCurrBlock && pCurrBlock->GetTrackNumber() == m_info.number);
+ if (!pCurrBlock || pCurrBlock->GetTrackNumber() != m_info.number)
+ return -1;
+
+ const Cluster* pCluster = pCurrEntry->GetCluster();
+ assert(pCluster);
+ assert(!pCluster->EOS());
+
+ long status = pCluster->GetNext(pCurrEntry, pNextEntry);
+
+ if (status < 0) // error
+ return status;
+
+ for (int i = 0;;) {
+ while (pNextEntry) {
+ const Block* const pNextBlock = pNextEntry->GetBlock();
+ assert(pNextBlock);
+
+ if (pNextBlock->GetTrackNumber() == m_info.number)
+ return 0;
+
+ pCurrEntry = pNextEntry;
+
+ status = pCluster->GetNext(pCurrEntry, pNextEntry);
+
+ if (status < 0) // error
+ return status;
+ }
+
+ pCluster = m_pSegment->GetNext(pCluster);
+
+ if (pCluster == NULL) {
+ pNextEntry = GetEOS();
+ return 1;
+ }
+
+ if (pCluster->EOS()) {
#if 0
if (m_pSegment->Unparsed() <= 0) //all clusters have been loaded
{
@@ -5851,155 +5108,148 @@
return 1;
}
#else
- if (m_pSegment->DoneParsing())
- {
- pNextEntry = GetEOS();
- return 1;
- }
+ if (m_pSegment->DoneParsing()) {
+ pNextEntry = GetEOS();
+ return 1;
+ }
#endif
- //TODO: there is a potential O(n^2) problem here: we tell the
- //caller to (pre)load another cluster, which he does, but then he
- //calls GetNext again, which repeats the same search. This is
- //a pathological case, since the only way it can happen is if
- //there exists a long sequence of clusters none of which contain a
- // block from this track. One way around this problem is for the
- //caller to be smarter when he loads another cluster: don't call
- //us back until you have a cluster that contains a block from this
- //track. (Of course, that's not cheap either, since our caller
- //would have to scan the each cluster as it's loaded, so that
- //would just push back the problem.)
+ // TODO: there is a potential O(n^2) problem here: we tell the
+ // caller to (pre)load another cluster, which he does, but then he
+ // calls GetNext again, which repeats the same search. This is
+ // a pathological case, since the only way it can happen is if
+ // there exists a long sequence of clusters none of which contain a
+ // block from this track. One way around this problem is for the
+ // caller to be smarter when he loads another cluster: don't call
+ // us back until you have a cluster that contains a block from this
+ // track. (Of course, that's not cheap either, since our caller
+ // would have to scan the each cluster as it's loaded, so that
+ // would just push back the problem.)
- pNextEntry = NULL;
- return E_BUFFER_NOT_FULL;
- }
-
- status = pCluster->GetFirst(pNextEntry);
-
- if (status < 0) //error
- return status;
-
- if (pNextEntry == NULL) //empty cluster
- continue;
-
- ++i;
-
- if (i >= 100)
- break;
+ pNextEntry = NULL;
+ return E_BUFFER_NOT_FULL;
}
- //NOTE: if we get here, it means that we didn't find a block with
- //a matching track number after lots of searching, so we give
- //up trying.
+ status = pCluster->GetFirst(pNextEntry);
- pNextEntry = GetEOS(); //so we can return a non-NULL value
- return 1;
+ if (status < 0) // error
+ return status;
+
+ if (pNextEntry == NULL) // empty cluster
+ continue;
+
+ ++i;
+
+ if (i >= 100)
+ break;
+ }
+
+ // NOTE: if we get here, it means that we didn't find a block with
+ // a matching track number after lots of searching, so we give
+ // up trying.
+
+ pNextEntry = GetEOS(); // so we can return a non-NULL value
+ return 1;
}
-bool Track::VetEntry(const BlockEntry* pBlockEntry) const
-{
- assert(pBlockEntry);
- const Block* const pBlock = pBlockEntry->GetBlock();
- assert(pBlock);
- assert(pBlock->GetTrackNumber() == m_info.number);
- if (!pBlock || pBlock->GetTrackNumber() != m_info.number)
- return false;
+bool Track::VetEntry(const BlockEntry* pBlockEntry) const {
+ assert(pBlockEntry);
+ const Block* const pBlock = pBlockEntry->GetBlock();
+ assert(pBlock);
+ assert(pBlock->GetTrackNumber() == m_info.number);
+ if (!pBlock || pBlock->GetTrackNumber() != m_info.number)
+ return false;
- // This function is used during a seek to determine whether the
- // frame is a valid seek target. This default function simply
- // returns true, which means all frames are valid seek targets.
- // It gets overridden by the VideoTrack class, because only video
- // keyframes can be used as seek target.
+ // This function is used during a seek to determine whether the
+ // frame is a valid seek target. This default function simply
+ // returns true, which means all frames are valid seek targets.
+ // It gets overridden by the VideoTrack class, because only video
+ // keyframes can be used as seek target.
- return true;
+ return true;
}
-long Track::Seek(
- long long time_ns,
- const BlockEntry*& pResult) const
-{
- const long status = GetFirst(pResult);
+long Track::Seek(long long time_ns, const BlockEntry*& pResult) const {
+ const long status = GetFirst(pResult);
- if (status < 0) //buffer underflow, etc
- return status;
+ if (status < 0) // buffer underflow, etc
+ return status;
- assert(pResult);
+ assert(pResult);
- if (pResult->EOS())
- return 0;
+ if (pResult->EOS())
+ return 0;
- const Cluster* pCluster = pResult->GetCluster();
+ const Cluster* pCluster = pResult->GetCluster();
+ assert(pCluster);
+ assert(pCluster->GetIndex() >= 0);
+
+ if (time_ns <= pResult->GetBlock()->GetTime(pCluster))
+ return 0;
+
+ Cluster** const clusters = m_pSegment->m_clusters;
+ assert(clusters);
+
+ const long count = m_pSegment->GetCount(); // loaded only, not preloaded
+ assert(count > 0);
+
+ Cluster** const i = clusters + pCluster->GetIndex();
+ assert(i);
+ assert(*i == pCluster);
+ assert(pCluster->GetTime() <= time_ns);
+
+ Cluster** const j = clusters + count;
+
+ Cluster** lo = i;
+ Cluster** hi = j;
+
+ while (lo < hi) {
+ // INVARIANT:
+ //[i, lo) <= time_ns
+ //[lo, hi) ?
+ //[hi, j) > time_ns
+
+ Cluster** const mid = lo + (hi - lo) / 2;
+ assert(mid < hi);
+
+ pCluster = *mid;
assert(pCluster);
assert(pCluster->GetIndex() >= 0);
+ assert(pCluster->GetIndex() == long(mid - m_pSegment->m_clusters));
- if (time_ns <= pResult->GetBlock()->GetTime(pCluster))
- return 0;
+ const long long t = pCluster->GetTime();
- Cluster** const clusters = m_pSegment->m_clusters;
- assert(clusters);
+ if (t <= time_ns)
+ lo = mid + 1;
+ else
+ hi = mid;
- const long count = m_pSegment->GetCount(); //loaded only, not preloaded
- assert(count > 0);
+ assert(lo <= hi);
+ }
- Cluster** const i = clusters + pCluster->GetIndex();
- assert(i);
- assert(*i == pCluster);
+ assert(lo == hi);
+ assert(lo > i);
+ assert(lo <= j);
+
+ while (lo > i) {
+ pCluster = *--lo;
+ assert(pCluster);
assert(pCluster->GetTime() <= time_ns);
- Cluster** const j = clusters + count;
+ pResult = pCluster->GetEntry(this);
- Cluster** lo = i;
- Cluster** hi = j;
+ if ((pResult != 0) && !pResult->EOS())
+ return 0;
- while (lo < hi)
- {
- //INVARIANT:
- //[i, lo) <= time_ns
- //[lo, hi) ?
- //[hi, j) > time_ns
+ // landed on empty cluster (no entries)
+ }
- Cluster** const mid = lo + (hi - lo) / 2;
- assert(mid < hi);
-
- pCluster = *mid;
- assert(pCluster);
- assert(pCluster->GetIndex() >= 0);
- assert(pCluster->GetIndex() == long(mid - m_pSegment->m_clusters));
-
- const long long t = pCluster->GetTime();
-
- if (t <= time_ns)
- lo = mid + 1;
- else
- hi = mid;
-
- assert(lo <= hi);
- }
-
- assert(lo == hi);
- assert(lo > i);
- assert(lo <= j);
-
- while (lo > i)
- {
- pCluster = *--lo;
- assert(pCluster);
- assert(pCluster->GetTime() <= time_ns);
-
- pResult = pCluster->GetEntry(this);
-
- if ((pResult != 0) && !pResult->EOS())
- return 0;
-
- //landed on empty cluster (no entries)
- }
-
- pResult = GetEOS(); //weird
- return 0;
+ pResult = GetEOS(); // weird
+ return 0;
}
-const ContentEncoding*
-Track::GetContentEncodingByIndex(unsigned long idx) const {
+const ContentEncoding* Track::GetContentEncodingByIndex(
+ unsigned long idx) const {
const ptrdiff_t count =
content_encoding_entries_end_ - content_encoding_entries_;
assert(count >= 0);
@@ -6029,27 +5279,22 @@
int count = 0;
while (pos < stop) {
long long id, size;
- const long status = ParseElementHeader(pReader,
- pos,
- stop,
- id,
- size);
- if (status < 0) //error
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
return status;
-
- //pos now designates start of element
+ // pos now designates start of element
if (id == 0x2240) // ContentEncoding ID
++count;
- pos += size; //consume payload
+ pos += size; // consume payload
assert(pos <= stop);
}
if (count <= 0)
return -1;
- content_encoding_entries_ = new (std::nothrow) ContentEncoding*[count];
+ content_encoding_entries_ = new (std::nothrow) ContentEncoding* [count];
if (!content_encoding_entries_)
return -1;
@@ -6058,24 +5303,18 @@
pos = start;
while (pos < stop) {
long long id, size;
- long status = ParseElementHeader(pReader,
- pos,
- stop,
- id,
- size);
- if (status < 0) //error
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
return status;
- //pos now designates start of element
- if (id == 0x2240) { // ContentEncoding ID
+ // pos now designates start of element
+ if (id == 0x2240) { // ContentEncoding ID
ContentEncoding* const content_encoding =
new (std::nothrow) ContentEncoding();
if (!content_encoding)
return -1;
- status = content_encoding->ParseContentEncodingEntry(pos,
- size,
- pReader);
+ status = content_encoding->ParseContentEncodingEntry(pos, size, pReader);
if (status) {
delete content_encoding;
return status;
@@ -6084,7 +5323,7 @@
*content_encoding_entries_end_++ = content_encoding;
}
- pos += size; //consume payload
+ pos += size; // consume payload
assert(pos <= stop);
}
@@ -6093,219 +5332,175 @@
return 0;
}
-Track::EOSBlock::EOSBlock() :
- BlockEntry(NULL, LONG_MIN)
-{
-}
+Track::EOSBlock::EOSBlock() : BlockEntry(NULL, LONG_MIN) {}
-BlockEntry::Kind Track::EOSBlock::GetKind() const
-{
- return kBlockEOS;
-}
+BlockEntry::Kind Track::EOSBlock::GetKind() const { return kBlockEOS; }
+const Block* Track::EOSBlock::GetBlock() const { return NULL; }
-const Block* Track::EOSBlock::GetBlock() const
-{
- return NULL;
-}
+VideoTrack::VideoTrack(Segment* pSegment, long long element_start,
+ long long element_size)
+ : Track(pSegment, element_start, element_size) {}
+long VideoTrack::Parse(Segment* pSegment, const Info& info,
+ long long element_start, long long element_size,
+ VideoTrack*& pResult) {
+ if (pResult)
+ return -1;
-VideoTrack::VideoTrack(
- Segment* pSegment,
- long long element_start,
- long long element_size) :
- Track(pSegment, element_start, element_size)
-{
-}
+ if (info.type != Track::kVideo)
+ return -1;
+ long long width = 0;
+ long long height = 0;
+ double rate = 0.0;
-long VideoTrack::Parse(
- Segment* pSegment,
- const Info& info,
- long long element_start,
- long long element_size,
- VideoTrack*& pResult)
-{
- if (pResult)
- return -1;
+ IMkvReader* const pReader = pSegment->m_pReader;
- if (info.type != Track::kVideo)
- return -1;
+ const Settings& s = info.settings;
+ assert(s.start >= 0);
+ assert(s.size >= 0);
- long long width = 0;
- long long height = 0;
- double rate = 0.0;
+ long long pos = s.start;
+ assert(pos >= 0);
- IMkvReader* const pReader = pSegment->m_pReader;
+ const long long stop = pos + s.size;
- const Settings& s = info.settings;
- assert(s.start >= 0);
- assert(s.size >= 0);
+ while (pos < stop) {
+ long long id, size;
- long long pos = s.start;
- assert(pos >= 0);
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
- const long long stop = pos + s.size;
+ if (status < 0) // error
+ return status;
- while (pos < stop)
- {
- long long id, size;
+ if (id == 0x30) { // pixel width
+ width = UnserializeUInt(pReader, pos, size);
- const long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
+ if (width <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x3A) { // pixel height
+ height = UnserializeUInt(pReader, pos, size);
- if (status < 0) //error
- return status;
+ if (height <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x0383E3) { // frame rate
+ const long status = UnserializeFloat(pReader, pos, size, rate);
- if (id == 0x30) //pixel width
- {
- width = UnserializeUInt(pReader, pos, size);
-
- if (width <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x3A) //pixel height
- {
- height = UnserializeUInt(pReader, pos, size);
-
- if (height <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x0383E3) //frame rate
- {
- const long status = UnserializeFloat(
- pReader,
- pos,
- size,
- rate);
-
- if (status < 0)
- return status;
-
- if (rate <= 0)
- return E_FILE_FORMAT_INVALID;
- }
-
- pos += size; //consume payload
- assert(pos <= stop);
- }
-
- assert(pos == stop);
-
- VideoTrack* const pTrack = new (std::nothrow) VideoTrack(pSegment,
- element_start,
- element_size);
-
- if (pTrack == NULL)
- return -1; //generic error
-
- const int status = info.Copy(pTrack->m_info);
-
- if (status) // error
- {
- delete pTrack;
- return status;
- }
-
- pTrack->m_width = width;
- pTrack->m_height = height;
- pTrack->m_rate = rate;
-
- pResult = pTrack;
- return 0; //success
-}
-
-
-bool VideoTrack::VetEntry(const BlockEntry* pBlockEntry) const
-{
- return Track::VetEntry(pBlockEntry) && pBlockEntry->GetBlock()->IsKey();
-}
-
-long VideoTrack::Seek(
- long long time_ns,
- const BlockEntry*& pResult) const
-{
- const long status = GetFirst(pResult);
-
- if (status < 0) //buffer underflow, etc
+ if (status < 0)
return status;
- assert(pResult);
+ if (rate <= 0)
+ return E_FILE_FORMAT_INVALID;
+ }
- if (pResult->EOS())
- return 0;
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
- const Cluster* pCluster = pResult->GetCluster();
+ assert(pos == stop);
+
+ VideoTrack* const pTrack =
+ new (std::nothrow) VideoTrack(pSegment, element_start, element_size);
+
+ if (pTrack == NULL)
+ return -1; // generic error
+
+ const int status = info.Copy(pTrack->m_info);
+
+ if (status) { // error
+ delete pTrack;
+ return status;
+ }
+
+ pTrack->m_width = width;
+ pTrack->m_height = height;
+ pTrack->m_rate = rate;
+
+ pResult = pTrack;
+ return 0; // success
+}
+
+bool VideoTrack::VetEntry(const BlockEntry* pBlockEntry) const {
+ return Track::VetEntry(pBlockEntry) && pBlockEntry->GetBlock()->IsKey();
+}
+
+long VideoTrack::Seek(long long time_ns, const BlockEntry*& pResult) const {
+ const long status = GetFirst(pResult);
+
+ if (status < 0) // buffer underflow, etc
+ return status;
+
+ assert(pResult);
+
+ if (pResult->EOS())
+ return 0;
+
+ const Cluster* pCluster = pResult->GetCluster();
+ assert(pCluster);
+ assert(pCluster->GetIndex() >= 0);
+
+ if (time_ns <= pResult->GetBlock()->GetTime(pCluster))
+ return 0;
+
+ Cluster** const clusters = m_pSegment->m_clusters;
+ assert(clusters);
+
+ const long count = m_pSegment->GetCount(); // loaded only, not pre-loaded
+ assert(count > 0);
+
+ Cluster** const i = clusters + pCluster->GetIndex();
+ assert(i);
+ assert(*i == pCluster);
+ assert(pCluster->GetTime() <= time_ns);
+
+ Cluster** const j = clusters + count;
+
+ Cluster** lo = i;
+ Cluster** hi = j;
+
+ while (lo < hi) {
+ // INVARIANT:
+ //[i, lo) <= time_ns
+ //[lo, hi) ?
+ //[hi, j) > time_ns
+
+ Cluster** const mid = lo + (hi - lo) / 2;
+ assert(mid < hi);
+
+ pCluster = *mid;
assert(pCluster);
assert(pCluster->GetIndex() >= 0);
+ assert(pCluster->GetIndex() == long(mid - m_pSegment->m_clusters));
- if (time_ns <= pResult->GetBlock()->GetTime(pCluster))
- return 0;
+ const long long t = pCluster->GetTime();
- Cluster** const clusters = m_pSegment->m_clusters;
- assert(clusters);
+ if (t <= time_ns)
+ lo = mid + 1;
+ else
+ hi = mid;
- const long count = m_pSegment->GetCount(); //loaded only, not pre-loaded
- assert(count > 0);
+ assert(lo <= hi);
+ }
- Cluster** const i = clusters + pCluster->GetIndex();
- assert(i);
- assert(*i == pCluster);
- assert(pCluster->GetTime() <= time_ns);
+ assert(lo == hi);
+ assert(lo > i);
+ assert(lo <= j);
- Cluster** const j = clusters + count;
+ pCluster = *--lo;
+ assert(pCluster);
+ assert(pCluster->GetTime() <= time_ns);
- Cluster** lo = i;
- Cluster** hi = j;
+ pResult = pCluster->GetEntry(this, time_ns);
- while (lo < hi)
- {
- //INVARIANT:
- //[i, lo) <= time_ns
- //[lo, hi) ?
- //[hi, j) > time_ns
+ if ((pResult != 0) && !pResult->EOS()) // found a keyframe
+ return 0;
- Cluster** const mid = lo + (hi - lo) / 2;
- assert(mid < hi);
-
- pCluster = *mid;
- assert(pCluster);
- assert(pCluster->GetIndex() >= 0);
- assert(pCluster->GetIndex() == long(mid - m_pSegment->m_clusters));
-
- const long long t = pCluster->GetTime();
-
- if (t <= time_ns)
- lo = mid + 1;
- else
- hi = mid;
-
- assert(lo <= hi);
- }
-
- assert(lo == hi);
- assert(lo > i);
- assert(lo <= j);
-
+ while (lo != i) {
pCluster = *--lo;
assert(pCluster);
assert(pCluster->GetTime() <= time_ns);
- pResult = pCluster->GetEntry(this, time_ns);
-
- if ((pResult != 0) && !pResult->EOS()) //found a keyframe
- return 0;
-
- while (lo != i)
- {
- pCluster = *--lo;
- assert(pCluster);
- assert(pCluster->GetTime() <= time_ns);
-
#if 0
//TODO:
//We need to handle the case when a cluster
@@ -6314,651 +5509,501 @@
//good enough.
pResult = pCluster->GetMaxKey(this);
#else
- pResult = pCluster->GetEntry(this, time_ns);
+ pResult = pCluster->GetEntry(this, time_ns);
#endif
- if ((pResult != 0) && !pResult->EOS())
- return 0;
+ if ((pResult != 0) && !pResult->EOS())
+ return 0;
+ }
+
+ // weird: we're on the first cluster, but no keyframe found
+ // should never happen but we must return something anyway
+
+ pResult = GetEOS();
+ return 0;
+}
+
+long long VideoTrack::GetWidth() const { return m_width; }
+
+long long VideoTrack::GetHeight() const { return m_height; }
+
+double VideoTrack::GetFrameRate() const { return m_rate; }
+
+AudioTrack::AudioTrack(Segment* pSegment, long long element_start,
+ long long element_size)
+ : Track(pSegment, element_start, element_size) {}
+
+long AudioTrack::Parse(Segment* pSegment, const Info& info,
+ long long element_start, long long element_size,
+ AudioTrack*& pResult) {
+ if (pResult)
+ return -1;
+
+ if (info.type != Track::kAudio)
+ return -1;
+
+ IMkvReader* const pReader = pSegment->m_pReader;
+
+ const Settings& s = info.settings;
+ assert(s.start >= 0);
+ assert(s.size >= 0);
+
+ long long pos = s.start;
+ assert(pos >= 0);
+
+ const long long stop = pos + s.size;
+
+ double rate = 8000.0; // MKV default
+ long long channels = 1;
+ long long bit_depth = 0;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (id == 0x35) { // Sample Rate
+ status = UnserializeFloat(pReader, pos, size, rate);
+
+ if (status < 0)
+ return status;
+
+ if (rate <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x1F) { // Channel Count
+ channels = UnserializeUInt(pReader, pos, size);
+
+ if (channels <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x2264) { // Bit Depth
+ bit_depth = UnserializeUInt(pReader, pos, size);
+
+ if (bit_depth <= 0)
+ return E_FILE_FORMAT_INVALID;
}
- //weird: we're on the first cluster, but no keyframe found
- //should never happen but we must return something anyway
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
- pResult = GetEOS();
- return 0;
+ assert(pos == stop);
+
+ AudioTrack* const pTrack =
+ new (std::nothrow) AudioTrack(pSegment, element_start, element_size);
+
+ if (pTrack == NULL)
+ return -1; // generic error
+
+ const int status = info.Copy(pTrack->m_info);
+
+ if (status) {
+ delete pTrack;
+ return status;
+ }
+
+ pTrack->m_rate = rate;
+ pTrack->m_channels = channels;
+ pTrack->m_bitDepth = bit_depth;
+
+ pResult = pTrack;
+ return 0; // success
}
+double AudioTrack::GetSamplingRate() const { return m_rate; }
-long long VideoTrack::GetWidth() const
-{
- return m_width;
-}
+long long AudioTrack::GetChannels() const { return m_channels; }
+long long AudioTrack::GetBitDepth() const { return m_bitDepth; }
-long long VideoTrack::GetHeight() const
-{
- return m_height;
-}
+Tracks::Tracks(Segment* pSegment, long long start, long long size_,
+ long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_start(start),
+ m_size(size_),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_trackEntries(NULL),
+ m_trackEntriesEnd(NULL) {}
+long Tracks::Parse() {
+ assert(m_trackEntries == NULL);
+ assert(m_trackEntriesEnd == NULL);
-double VideoTrack::GetFrameRate() const
-{
- return m_rate;
-}
+ const long long stop = m_start + m_size;
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+ int count = 0;
+ long long pos = m_start;
-AudioTrack::AudioTrack(
- Segment* pSegment,
- long long element_start,
- long long element_size) :
- Track(pSegment, element_start, element_size)
-{
-}
+ while (pos < stop) {
+ long long id, size;
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
-long AudioTrack::Parse(
- Segment* pSegment,
- const Info& info,
- long long element_start,
- long long element_size,
- AudioTrack*& pResult)
-{
- if (pResult)
- return -1;
+ if (status < 0) // error
+ return status;
- if (info.type != Track::kAudio)
- return -1;
+ if (size == 0) // weird
+ continue;
- IMkvReader* const pReader = pSegment->m_pReader;
+ if (id == 0x2E) // TrackEntry ID
+ ++count;
- const Settings& s = info.settings;
- assert(s.start >= 0);
- assert(s.size >= 0);
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
- long long pos = s.start;
- assert(pos >= 0);
+ assert(pos == stop);
- const long long stop = pos + s.size;
+ if (count <= 0)
+ return 0; // success
- double rate = 8000.0; // MKV default
- long long channels = 1;
- long long bit_depth = 0;
+ m_trackEntries = new (std::nothrow) Track* [count];
- while (pos < stop)
- {
- long long id, size;
+ if (m_trackEntries == NULL)
+ return -1;
- long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
+ m_trackEntriesEnd = m_trackEntries;
- if (status < 0) //error
- return status;
+ pos = m_start;
- if (id == 0x35) //Sample Rate
- {
- status = UnserializeFloat(pReader, pos, size, rate);
+ while (pos < stop) {
+ const long long element_start = pos;
- if (status < 0)
- return status;
+ long long id, payload_size;
- if (rate <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x1F) //Channel Count
- {
- channels = UnserializeUInt(pReader, pos, size);
+ const long status =
+ ParseElementHeader(pReader, pos, stop, id, payload_size);
- if (channels <= 0)
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x2264) //Bit Depth
- {
- bit_depth = UnserializeUInt(pReader, pos, size);
+ if (status < 0) // error
+ return status;
- if (bit_depth <= 0)
- return E_FILE_FORMAT_INVALID;
- }
+ if (payload_size == 0) // weird
+ continue;
- pos += size; //consume payload
- assert(pos <= stop);
+ const long long payload_stop = pos + payload_size;
+ assert(payload_stop <= stop); // checked in ParseElement
+
+ const long long element_size = payload_stop - element_start;
+
+ if (id == 0x2E) { // TrackEntry ID
+ Track*& pTrack = *m_trackEntriesEnd;
+ pTrack = NULL;
+
+ const long status = ParseTrackEntry(pos, payload_size, element_start,
+ element_size, pTrack);
+
+ if (status)
+ return status;
+
+ if (pTrack)
+ ++m_trackEntriesEnd;
}
- assert(pos == stop);
+ pos = payload_stop;
+ assert(pos <= stop);
+ }
- AudioTrack* const pTrack = new (std::nothrow) AudioTrack(pSegment,
- element_start,
- element_size);
+ assert(pos == stop);
- if (pTrack == NULL)
- return -1; //generic error
+ return 0; // success
+}
- const int status = info.Copy(pTrack->m_info);
+unsigned long Tracks::GetTracksCount() const {
+ const ptrdiff_t result = m_trackEntriesEnd - m_trackEntries;
+ assert(result >= 0);
+
+ return static_cast<unsigned long>(result);
+}
+
+long Tracks::ParseTrackEntry(long long track_start, long long track_size,
+ long long element_start, long long element_size,
+ Track*& pResult) const {
+ if (pResult)
+ return -1;
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long pos = track_start;
+ const long long track_stop = track_start + track_size;
+
+ Track::Info info;
+
+ info.type = 0;
+ info.number = 0;
+ info.uid = 0;
+ info.defaultDuration = 0;
+
+ Track::Settings v;
+ v.start = -1;
+ v.size = -1;
+
+ Track::Settings a;
+ a.start = -1;
+ a.size = -1;
+
+ Track::Settings e; // content_encodings_settings;
+ e.start = -1;
+ e.size = -1;
+
+ long long lacing = 1; // default is true
+
+ while (pos < track_stop) {
+ long long id, size;
+
+ const long status = ParseElementHeader(pReader, pos, track_stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long start = pos;
+
+ if (id == 0x60) { // VideoSettings ID
+ v.start = start;
+ v.size = size;
+ } else if (id == 0x61) { // AudioSettings ID
+ a.start = start;
+ a.size = size;
+ } else if (id == 0x2D80) { // ContentEncodings ID
+ e.start = start;
+ e.size = size;
+ } else if (id == 0x33C5) { // Track UID
+ if (size > 8)
+ return E_FILE_FORMAT_INVALID;
+
+ info.uid = 0;
+
+ long long pos_ = start;
+ const long long pos_end = start + size;
+
+ while (pos_ != pos_end) {
+ unsigned char b;
+
+ const int status = pReader->Read(pos_, 1, &b);
+
+ if (status)
+ return status;
+
+ info.uid <<= 8;
+ info.uid |= b;
+
+ ++pos_;
+ }
+ } else if (id == 0x57) { // Track Number
+ const long long num = UnserializeUInt(pReader, pos, size);
+
+ if ((num <= 0) || (num > 127))
+ return E_FILE_FORMAT_INVALID;
+
+ info.number = static_cast<long>(num);
+ } else if (id == 0x03) { // Track Type
+ const long long type = UnserializeUInt(pReader, pos, size);
+
+ if ((type <= 0) || (type > 254))
+ return E_FILE_FORMAT_INVALID;
+
+ info.type = static_cast<long>(type);
+ } else if (id == 0x136E) { // Track Name
+ const long status =
+ UnserializeString(pReader, pos, size, info.nameAsUTF8);
+
+ if (status)
+ return status;
+ } else if (id == 0x02B59C) { // Track Language
+ const long status = UnserializeString(pReader, pos, size, info.language);
+
+ if (status)
+ return status;
+ } else if (id == 0x03E383) { // Default Duration
+ const long long duration = UnserializeUInt(pReader, pos, size);
+
+ if (duration < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ info.defaultDuration = static_cast<unsigned long long>(duration);
+ } else if (id == 0x06) { // CodecID
+ const long status = UnserializeString(pReader, pos, size, info.codecId);
+
+ if (status)
+ return status;
+ } else if (id == 0x1C) { // lacing
+ lacing = UnserializeUInt(pReader, pos, size);
+
+ if ((lacing < 0) || (lacing > 1))
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == 0x23A2) { // Codec Private
+ delete[] info.codecPrivate;
+ info.codecPrivate = NULL;
+ info.codecPrivateSize = 0;
+
+ const size_t buflen = static_cast<size_t>(size);
+
+ if (buflen) {
+ typedef unsigned char* buf_t;
+
+ const buf_t buf = new (std::nothrow) unsigned char[buflen];
+
+ if (buf == NULL)
+ return -1;
+
+ const int status = pReader->Read(pos, static_cast<long>(buflen), buf);
+
+ if (status) {
+ delete[] buf;
+ return status;
+ }
+
+ info.codecPrivate = buf;
+ info.codecPrivateSize = buflen;
+ }
+ } else if (id == 0x058688) { // Codec Name
+ const long status =
+ UnserializeString(pReader, pos, size, info.codecNameAsUTF8);
+
+ if (status)
+ return status;
+ } else if (id == 0x16AA) { // Codec Delay
+ info.codecDelay = UnserializeUInt(pReader, pos, size);
+ } else if (id == 0x16BB) { // Seek Pre Roll
+ info.seekPreRoll = UnserializeUInt(pReader, pos, size);
+ }
+
+ pos += size; // consume payload
+ assert(pos <= track_stop);
+ }
+
+ assert(pos == track_stop);
+
+ if (info.number <= 0) // not specified
+ return E_FILE_FORMAT_INVALID;
+
+ if (GetTrackByNumber(info.number))
+ return E_FILE_FORMAT_INVALID;
+
+ if (info.type <= 0) // not specified
+ return E_FILE_FORMAT_INVALID;
+
+ info.lacing = (lacing > 0) ? true : false;
+
+ if (info.type == Track::kVideo) {
+ if (v.start < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (a.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ info.settings = v;
+
+ VideoTrack* pTrack = NULL;
+
+ const long status = VideoTrack::Parse(m_pSegment, info, element_start,
+ element_size, pTrack);
if (status)
- {
- delete pTrack;
- return status;
- }
-
- pTrack->m_rate = rate;
- pTrack->m_channels = channels;
- pTrack->m_bitDepth = bit_depth;
+ return status;
pResult = pTrack;
- return 0; //success
+ assert(pResult);
+
+ if (e.start >= 0)
+ pResult->ParseContentEncodingsEntry(e.start, e.size);
+ } else if (info.type == Track::kAudio) {
+ if (a.start < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (v.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ info.settings = a;
+
+ AudioTrack* pTrack = NULL;
+
+ const long status = AudioTrack::Parse(m_pSegment, info, element_start,
+ element_size, pTrack);
+
+ if (status)
+ return status;
+
+ pResult = pTrack;
+ assert(pResult);
+
+ if (e.start >= 0)
+ pResult->ParseContentEncodingsEntry(e.start, e.size);
+ } else {
+ // neither video nor audio - probably metadata or subtitles
+
+ if (a.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (v.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (e.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ info.settings.start = -1;
+ info.settings.size = 0;
+
+ Track* pTrack = NULL;
+
+ const long status =
+ Track::Create(m_pSegment, info, element_start, element_size, pTrack);
+
+ if (status)
+ return status;
+
+ pResult = pTrack;
+ assert(pResult);
+ }
+
+ return 0; // success
}
+Tracks::~Tracks() {
+ Track** i = m_trackEntries;
+ Track** const j = m_trackEntriesEnd;
-double AudioTrack::GetSamplingRate() const
-{
- return m_rate;
+ while (i != j) {
+ Track* const pTrack = *i++;
+ delete pTrack;
+ }
+
+ delete[] m_trackEntries;
}
+const Track* Tracks::GetTrackByNumber(long tn) const {
+ if (tn < 0)
+ return NULL;
-long long AudioTrack::GetChannels() const
-{
- return m_channels;
+ Track** i = m_trackEntries;
+ Track** const j = m_trackEntriesEnd;
+
+ while (i != j) {
+ Track* const pTrack = *i++;
+
+ if (pTrack == NULL)
+ continue;
+
+ if (tn == pTrack->GetNumber())
+ return pTrack;
+ }
+
+ return NULL; // not found
}
-long long AudioTrack::GetBitDepth() const
-{
- return m_bitDepth;
-}
+const Track* Tracks::GetTrackByIndex(unsigned long idx) const {
+ const ptrdiff_t count = m_trackEntriesEnd - m_trackEntries;
-Tracks::Tracks(
- Segment* pSegment,
- long long start,
- long long size_,
- long long element_start,
- long long element_size) :
- m_pSegment(pSegment),
- m_start(start),
- m_size(size_),
- m_element_start(element_start),
- m_element_size(element_size),
- m_trackEntries(NULL),
- m_trackEntriesEnd(NULL)
-{
-}
+ if (idx >= static_cast<unsigned long>(count))
+ return NULL;
-
-long Tracks::Parse()
-{
- assert(m_trackEntries == NULL);
- assert(m_trackEntriesEnd == NULL);
-
- const long long stop = m_start + m_size;
- IMkvReader* const pReader = m_pSegment->m_pReader;
-
- int count = 0;
- long long pos = m_start;
-
- while (pos < stop)
- {
- long long id, size;
-
- const long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- size);
-
- if (status < 0) //error
- return status;
-
- if (size == 0) //weird
- continue;
-
- if (id == 0x2E) //TrackEntry ID
- ++count;
-
- pos += size; //consume payload
- assert(pos <= stop);
- }
-
- assert(pos == stop);
-
- if (count <= 0)
- return 0; //success
-
- m_trackEntries = new (std::nothrow) Track*[count];
-
- if (m_trackEntries == NULL)
- return -1;
-
- m_trackEntriesEnd = m_trackEntries;
-
- pos = m_start;
-
- while (pos < stop)
- {
- const long long element_start = pos;
-
- long long id, payload_size;
-
- const long status = ParseElementHeader(
- pReader,
- pos,
- stop,
- id,
- payload_size);
-
- if (status < 0) //error
- return status;
-
- if (payload_size == 0) //weird
- continue;
-
- const long long payload_stop = pos + payload_size;
- assert(payload_stop <= stop); //checked in ParseElement
-
- const long long element_size = payload_stop - element_start;
-
- if (id == 0x2E) //TrackEntry ID
- {
- Track*& pTrack = *m_trackEntriesEnd;
- pTrack = NULL;
-
- const long status = ParseTrackEntry(
- pos,
- payload_size,
- element_start,
- element_size,
- pTrack);
-
- if (status)
- return status;
-
- if (pTrack)
- ++m_trackEntriesEnd;
- }
-
- pos = payload_stop;
- assert(pos <= stop);
- }
-
- assert(pos == stop);
-
- return 0; //success
-}
-
-
-unsigned long Tracks::GetTracksCount() const
-{
- const ptrdiff_t result = m_trackEntriesEnd - m_trackEntries;
- assert(result >= 0);
-
- return static_cast<unsigned long>(result);
-}
-
-long Tracks::ParseTrackEntry(
- long long track_start,
- long long track_size,
- long long element_start,
- long long element_size,
- Track*& pResult) const
-{
- if (pResult)
- return -1;
-
- IMkvReader* const pReader = m_pSegment->m_pReader;
-
- long long pos = track_start;
- const long long track_stop = track_start + track_size;
-
- Track::Info info;
-
- info.type = 0;
- info.number = 0;
- info.uid = 0;
- info.defaultDuration = 0;
-
- Track::Settings v;
- v.start = -1;
- v.size = -1;
-
- Track::Settings a;
- a.start = -1;
- a.size = -1;
-
- Track::Settings e; //content_encodings_settings;
- e.start = -1;
- e.size = -1;
-
- long long lacing = 1; //default is true
-
- while (pos < track_stop)
- {
- long long id, size;
-
- const long status = ParseElementHeader(
- pReader,
- pos,
- track_stop,
- id,
- size);
-
- if (status < 0) //error
- return status;
-
- if (size < 0)
- return E_FILE_FORMAT_INVALID;
-
- const long long start = pos;
-
- if (id == 0x60) // VideoSettings ID
- {
- v.start = start;
- v.size = size;
- }
- else if (id == 0x61) // AudioSettings ID
- {
- a.start = start;
- a.size = size;
- }
- else if (id == 0x2D80) // ContentEncodings ID
- {
- e.start = start;
- e.size = size;
- }
- else if (id == 0x33C5) //Track UID
- {
- if (size > 8)
- return E_FILE_FORMAT_INVALID;
-
- info.uid = 0;
-
- long long pos_ = start;
- const long long pos_end = start + size;
-
- while (pos_ != pos_end)
- {
- unsigned char b;
-
- const int status = pReader->Read(pos_, 1, &b);
-
- if (status)
- return status;
-
- info.uid <<= 8;
- info.uid |= b;
-
- ++pos_;
- }
- }
- else if (id == 0x57) //Track Number
- {
- const long long num = UnserializeUInt(pReader, pos, size);
-
- if ((num <= 0) || (num > 127))
- return E_FILE_FORMAT_INVALID;
-
- info.number = static_cast<long>(num);
- }
- else if (id == 0x03) //Track Type
- {
- const long long type = UnserializeUInt(pReader, pos, size);
-
- if ((type <= 0) || (type > 254))
- return E_FILE_FORMAT_INVALID;
-
- info.type = static_cast<long>(type);
- }
- else if (id == 0x136E) //Track Name
- {
- const long status = UnserializeString(
- pReader,
- pos,
- size,
- info.nameAsUTF8);
-
- if (status)
- return status;
- }
- else if (id == 0x02B59C) //Track Language
- {
- const long status = UnserializeString(
- pReader,
- pos,
- size,
- info.language);
-
- if (status)
- return status;
- }
- else if (id == 0x03E383) //Default Duration
- {
- const long long duration = UnserializeUInt(pReader, pos, size);
-
- if (duration < 0)
- return E_FILE_FORMAT_INVALID;
-
- info.defaultDuration = static_cast<unsigned long long>(duration);
- }
- else if (id == 0x06) //CodecID
- {
- const long status = UnserializeString(
- pReader,
- pos,
- size,
- info.codecId);
-
- if (status)
- return status;
- }
- else if (id == 0x1C) //lacing
- {
- lacing = UnserializeUInt(pReader, pos, size);
-
- if ((lacing < 0) || (lacing > 1))
- return E_FILE_FORMAT_INVALID;
- }
- else if (id == 0x23A2) //Codec Private
- {
- delete[] info.codecPrivate;
- info.codecPrivate = NULL;
- info.codecPrivateSize = 0;
-
- const size_t buflen = static_cast<size_t>(size);
-
- if (buflen)
- {
- typedef unsigned char* buf_t;
-
- const buf_t buf = new (std::nothrow) unsigned char[buflen];
-
- if (buf == NULL)
- return -1;
-
- const int status = pReader->Read(pos, buflen, buf);
-
- if (status)
- {
- delete[] buf;
- return status;
- }
-
- info.codecPrivate = buf;
- info.codecPrivateSize = buflen;
- }
- }
- else if (id == 0x058688) //Codec Name
- {
- const long status = UnserializeString(
- pReader,
- pos,
- size,
- info.codecNameAsUTF8);
-
- if (status)
- return status;
- }
- else if (id == 0x16AA) //Codec Delay
- {
- info.codecDelay = UnserializeUInt(pReader, pos, size);
-
- }
- else if (id == 0x16BB) //Seek Pre Roll
- {
- info.seekPreRoll = UnserializeUInt(pReader, pos, size);
- }
-
- pos += size; //consume payload
- assert(pos <= track_stop);
- }
-
- assert(pos == track_stop);
-
- if (info.number <= 0) //not specified
- return E_FILE_FORMAT_INVALID;
-
- if (GetTrackByNumber(info.number))
- return E_FILE_FORMAT_INVALID;
-
- if (info.type <= 0) //not specified
- return E_FILE_FORMAT_INVALID;
-
- info.lacing = (lacing > 0) ? true : false;
-
- if (info.type == Track::kVideo)
- {
- if (v.start < 0)
- return E_FILE_FORMAT_INVALID;
-
- if (a.start >= 0)
- return E_FILE_FORMAT_INVALID;
-
- info.settings = v;
-
- VideoTrack* pTrack = NULL;
-
- const long status = VideoTrack::Parse(m_pSegment,
- info,
- element_start,
- element_size,
- pTrack);
-
- if (status)
- return status;
-
- pResult = pTrack;
- assert(pResult);
-
- if (e.start >= 0)
- pResult->ParseContentEncodingsEntry(e.start, e.size);
- }
- else if (info.type == Track::kAudio)
- {
- if (a.start < 0)
- return E_FILE_FORMAT_INVALID;
-
- if (v.start >= 0)
- return E_FILE_FORMAT_INVALID;
-
- info.settings = a;
-
- AudioTrack* pTrack = NULL;
-
- const long status = AudioTrack::Parse(m_pSegment,
- info,
- element_start,
- element_size,
- pTrack);
-
- if (status)
- return status;
-
- pResult = pTrack;
- assert(pResult);
-
- if (e.start >= 0)
- pResult->ParseContentEncodingsEntry(e.start, e.size);
- }
- else
- {
- // neither video nor audio - probably metadata or subtitles
-
- if (a.start >= 0)
- return E_FILE_FORMAT_INVALID;
-
- if (v.start >= 0)
- return E_FILE_FORMAT_INVALID;
-
- if (e.start >= 0)
- return E_FILE_FORMAT_INVALID;
-
- info.settings.start = -1;
- info.settings.size = 0;
-
- Track* pTrack = NULL;
-
- const long status = Track::Create(m_pSegment,
- info,
- element_start,
- element_size,
- pTrack);
-
- if (status)
- return status;
-
- pResult = pTrack;
- assert(pResult);
- }
-
- return 0; //success
-}
-
-
-Tracks::~Tracks()
-{
- Track** i = m_trackEntries;
- Track** const j = m_trackEntriesEnd;
-
- while (i != j)
- {
- Track* const pTrack = *i++;
- delete pTrack;
- }
-
- delete[] m_trackEntries;
-}
-
-const Track* Tracks::GetTrackByNumber(long tn) const
-{
- if (tn < 0)
- return NULL;
-
- Track** i = m_trackEntries;
- Track** const j = m_trackEntriesEnd;
-
- while (i != j)
- {
- Track* const pTrack = *i++;
-
- if (pTrack == NULL)
- continue;
-
- if (tn == pTrack->GetNumber())
- return pTrack;
- }
-
- return NULL; //not found
-}
-
-
-const Track* Tracks::GetTrackByIndex(unsigned long idx) const
-{
- const ptrdiff_t count = m_trackEntriesEnd - m_trackEntries;
-
- if (idx >= static_cast<unsigned long>(count))
- return NULL;
-
- return m_trackEntries[idx];
+ return m_trackEntries[idx];
}
#if 0
@@ -6980,104 +6025,100 @@
}
#endif
+long Cluster::Load(long long& pos, long& len) const {
+ assert(m_pSegment);
+ assert(m_pos >= m_element_start);
-long Cluster::Load(long long& pos, long& len) const
-{
- assert(m_pSegment);
- assert(m_pos >= m_element_start);
+ if (m_timecode >= 0) // at least partially loaded
+ return 0;
- if (m_timecode >= 0) //at least partially loaded
- return 0;
+ assert(m_pos == m_element_start);
+ assert(m_element_size < 0);
- assert(m_pos == m_element_start);
- assert(m_element_size < 0);
+ IMkvReader* const pReader = m_pSegment->m_pReader;
- IMkvReader* const pReader = m_pSegment->m_pReader;
+ long long total, avail;
- long long total, avail;
+ const int status = pReader->Length(&total, &avail);
- const int status = pReader->Length(&total, &avail);
+ if (status < 0) // error
+ return status;
- if (status < 0) //error
- return status;
+ assert((total < 0) || (avail <= total));
+ assert((total < 0) || (m_pos <= total)); // TODO: verify this
- assert((total < 0) || (avail <= total));
- assert((total < 0) || (m_pos <= total)); //TODO: verify this
+ pos = m_pos;
- pos = m_pos;
+ long long cluster_size = -1;
- long long cluster_size = -1;
-
- {
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- long long result = GetUIntLength(pReader, pos, len);
-
- if (result < 0) //error or underflow
- return static_cast<long>(result);
-
- if (result > 0) //underflow (weird)
- return E_BUFFER_NOT_FULL;
-
- //if ((pos + len) > segment_stop)
- // return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long id_ = ReadUInt(pReader, pos, len);
-
- if (id_ < 0) //error
- return static_cast<long>(id_);
-
- if (id_ != 0x0F43B675) //Cluster ID
- return E_FILE_FORMAT_INVALID;
-
- pos += len; //consume id
-
- //read cluster size
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- result = GetUIntLength(pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- //if ((pos + len) > segment_stop)
- // return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long size = ReadUInt(pReader, pos, len);
-
- if (size < 0) //error
- return static_cast<long>(cluster_size);
-
- if (size == 0)
- return E_FILE_FORMAT_INVALID; //TODO: verify this
-
- pos += len; //consume length of size of element
-
- const long long unknown_size = (1LL << (7 * len)) - 1;
-
- if (size != unknown_size)
- cluster_size = size;
+ {
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
}
- //pos points to start of payload
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error or underflow
+ return static_cast<long>(result);
+
+ if (result > 0) // underflow (weird)
+ return E_BUFFER_NOT_FULL;
+
+ // if ((pos + len) > segment_stop)
+ // return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id_ = ReadUInt(pReader, pos, len);
+
+ if (id_ < 0) // error
+ return static_cast<long>(id_);
+
+ if (id_ != 0x0F43B675) // Cluster ID
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume id
+
+ // read cluster size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ // if ((pos + len) > segment_stop)
+ // return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(cluster_size);
+
+ if (size == 0)
+ return E_FILE_FORMAT_INVALID; // TODO: verify this
+
+ pos += len; // consume length of size of element
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size != unknown_size)
+ cluster_size = size;
+ }
+
+// pos points to start of payload
#if 0
len = static_cast<long>(size_);
@@ -7086,403 +6127,376 @@
return E_BUFFER_NOT_FULL;
#endif
- long long timecode = -1;
- long long new_pos = -1;
- bool bBlock = false;
+ long long timecode = -1;
+ long long new_pos = -1;
+ bool bBlock = false;
- long long cluster_stop = (cluster_size < 0) ? -1 : pos + cluster_size;
+ long long cluster_stop = (cluster_size < 0) ? -1 : pos + cluster_size;
- for (;;)
- {
- if ((cluster_stop >= 0) && (pos >= cluster_stop))
- break;
+ for (;;) {
+ if ((cluster_stop >= 0) && (pos >= cluster_stop))
+ break;
- //Parse ID
+ // Parse ID
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- long long result = GetUIntLength(pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long id = ReadUInt(pReader, pos, len);
-
- if (id < 0) //error
- return static_cast<long>(id);
-
- if (id == 0)
- return E_FILE_FORMAT_INVALID;
-
- //This is the distinguished set of ID's we use to determine
- //that we have exhausted the sub-element's inside the cluster
- //whose ID we parsed earlier.
-
- if (id == 0x0F43B675) //Cluster ID
- break;
-
- if (id == 0x0C53BB6B) //Cues ID
- break;
-
- pos += len; //consume ID field
-
- //Parse Size
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- result = GetUIntLength(pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long size = ReadUInt(pReader, pos, len);
-
- if (size < 0) //error
- return static_cast<long>(size);
-
- const long long unknown_size = (1LL << (7 * len)) - 1;
-
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID;
-
- pos += len; //consume size field
-
- if ((cluster_stop >= 0) && (pos > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- //pos now points to start of payload
-
- if (size == 0) //weird
- continue;
-
- if ((cluster_stop >= 0) && ((pos + size) > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- if (id == 0x67) //TimeCode ID
- {
- len = static_cast<long>(size);
-
- if ((pos + size) > avail)
- return E_BUFFER_NOT_FULL;
-
- timecode = UnserializeUInt(pReader, pos, size);
-
- if (timecode < 0) //error (or underflow)
- return static_cast<long>(timecode);
-
- new_pos = pos + size;
-
- if (bBlock)
- break;
- }
- else if (id == 0x20) //BlockGroup ID
- {
- bBlock = true;
- break;
- }
- else if (id == 0x23) //SimpleBlock ID
- {
- bBlock = true;
- break;
- }
-
- pos += size; //consume payload
- assert((cluster_stop < 0) || (pos <= cluster_stop));
- }
-
- assert((cluster_stop < 0) || (pos <= cluster_stop));
-
- if (timecode < 0) //no timecode found
- return E_FILE_FORMAT_INVALID;
-
- if (!bBlock)
- return E_FILE_FORMAT_INVALID;
-
- m_pos = new_pos; //designates position just beyond timecode payload
- m_timecode = timecode; // m_timecode >= 0 means we're partially loaded
-
- if (cluster_size >= 0)
- m_element_size = cluster_stop - m_element_start;
-
- return 0;
-}
-
-
-long Cluster::Parse(long long& pos, long& len) const
-{
- long status = Load(pos, len);
-
- if (status < 0)
- return status;
-
- assert(m_pos >= m_element_start);
- assert(m_timecode >= 0);
- //assert(m_size > 0);
- //assert(m_element_size > m_size);
-
- const long long cluster_stop =
- (m_element_size < 0) ? -1 : m_element_start + m_element_size;
-
- if ((cluster_stop >= 0) && (m_pos >= cluster_stop))
- return 1; //nothing else to do
-
- IMkvReader* const pReader = m_pSegment->m_pReader;
-
- long long total, avail;
-
- status = pReader->Length(&total, &avail);
-
- if (status < 0) //error
- return status;
-
- assert((total < 0) || (avail <= total));
-
- pos = m_pos;
-
- for (;;)
- {
- if ((cluster_stop >= 0) && (pos >= cluster_stop))
- break;
-
- if ((total >= 0) && (pos >= total))
- {
- if (m_element_size < 0)
- m_element_size = pos - m_element_start;
-
- break;
- }
-
- //Parse ID
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- long long result = GetUIntLength(pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long id = ReadUInt(pReader, pos, len);
-
- if (id < 0) //error
- return static_cast<long>(id);
-
- if (id == 0) //weird
- return E_FILE_FORMAT_INVALID;
-
- //This is the distinguished set of ID's we use to determine
- //that we have exhausted the sub-element's inside the cluster
- //whose ID we parsed earlier.
-
- if ((id == 0x0F43B675) || (id == 0x0C53BB6B)) //Cluster or Cues ID
- {
- if (m_element_size < 0)
- m_element_size = pos - m_element_start;
-
- break;
- }
-
- pos += len; //consume ID field
-
- //Parse Size
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- result = GetUIntLength(pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long size = ReadUInt(pReader, pos, len);
-
- if (size < 0) //error
- return static_cast<long>(size);
-
- const long long unknown_size = (1LL << (7 * len)) - 1;
-
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID;
-
- pos += len; //consume size field
-
- if ((cluster_stop >= 0) && (pos > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- //pos now points to start of payload
-
- if (size == 0) //weird
- continue;
-
- //const long long block_start = pos;
- const long long block_stop = pos + size;
-
- if (cluster_stop >= 0)
- {
- if (block_stop > cluster_stop)
- {
- if ((id == 0x20) || (id == 0x23))
- return E_FILE_FORMAT_INVALID;
-
- pos = cluster_stop;
- break;
- }
- }
- else if ((total >= 0) && (block_stop > total))
- {
- m_element_size = total - m_element_start;
- pos = total;
- break;
- }
- else if (block_stop > avail)
- {
- len = static_cast<long>(size);
- return E_BUFFER_NOT_FULL;
- }
-
- Cluster* const this_ = const_cast<Cluster*>(this);
-
- if (id == 0x20) //BlockGroup
- return this_->ParseBlockGroup(size, pos, len);
-
- if (id == 0x23) //SimpleBlock
- return this_->ParseSimpleBlock(size, pos, len);
-
- pos += size; //consume payload
- assert((cluster_stop < 0) || (pos <= cluster_stop));
- }
-
- assert(m_element_size > 0);
-
- m_pos = pos;
- assert((cluster_stop < 0) || (m_pos <= cluster_stop));
-
- if (m_entries_count > 0)
- {
- const long idx = m_entries_count - 1;
-
- const BlockEntry* const pLast = m_entries[idx];
- assert(pLast);
-
- const Block* const pBlock = pLast->GetBlock();
- assert(pBlock);
-
- const long long start = pBlock->m_start;
-
- if ((total >= 0) && (start > total))
- return -1; //defend against trucated stream
-
- const long long size = pBlock->m_size;
-
- const long long stop = start + size;
- assert((cluster_stop < 0) || (stop <= cluster_stop));
-
- if ((total >= 0) && (stop > total))
- return -1; //defend against trucated stream
- }
-
- return 1; //no more entries
-}
-
-
-long Cluster::ParseSimpleBlock(
- long long block_size,
- long long& pos,
- long& len)
-{
- const long long block_start = pos;
- const long long block_stop = pos + block_size;
-
- IMkvReader* const pReader = m_pSegment->m_pReader;
-
- long long total, avail;
-
- long status = pReader->Length(&total, &avail);
-
- if (status < 0) //error
- return status;
-
- assert((total < 0) || (avail <= total));
-
- //parse track number
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
}
long long result = GetUIntLength(pReader, pos, len);
- if (result < 0) //error
- return static_cast<long>(result);
+ if (result < 0) // error
+ return static_cast<long>(result);
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- if ((pos + len) > block_stop)
- return E_FILE_FORMAT_INVALID;
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id = ReadUInt(pReader, pos, len);
+
+ if (id < 0) // error
+ return static_cast<long>(id);
+
+ if (id == 0)
+ return E_FILE_FORMAT_INVALID;
+
+ // This is the distinguished set of ID's we use to determine
+ // that we have exhausted the sub-element's inside the cluster
+ // whose ID we parsed earlier.
+
+ if (id == 0x0F43B675) // Cluster ID
+ break;
+
+ if (id == 0x0C53BB6B) // Cues ID
+ break;
+
+ pos += len; // consume ID field
+
+ // Parse Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume size field
+
+ if ((cluster_stop >= 0) && (pos > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ // pos now points to start of payload
+
+ if (size == 0) // weird
+ continue;
+
+ if ((cluster_stop >= 0) && ((pos + size) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if (id == 0x67) { // TimeCode ID
+ len = static_cast<long>(size);
+
+ if ((pos + size) > avail)
return E_BUFFER_NOT_FULL;
- const long long track = ReadUInt(pReader, pos, len);
+ timecode = UnserializeUInt(pReader, pos, size);
- if (track < 0) //error
- return static_cast<long>(track);
+ if (timecode < 0) // error (or underflow)
+ return static_cast<long>(timecode);
- if (track == 0)
- return E_FILE_FORMAT_INVALID;
+ new_pos = pos + size;
+
+ if (bBlock)
+ break;
+ } else if (id == 0x20) { // BlockGroup ID
+ bBlock = true;
+ break;
+ } else if (id == 0x23) { // SimpleBlock ID
+ bBlock = true;
+ break;
+ }
+
+ pos += size; // consume payload
+ assert((cluster_stop < 0) || (pos <= cluster_stop));
+ }
+
+ assert((cluster_stop < 0) || (pos <= cluster_stop));
+
+ if (timecode < 0) // no timecode found
+ return E_FILE_FORMAT_INVALID;
+
+ if (!bBlock)
+ return E_FILE_FORMAT_INVALID;
+
+ m_pos = new_pos; // designates position just beyond timecode payload
+ m_timecode = timecode; // m_timecode >= 0 means we're partially loaded
+
+ if (cluster_size >= 0)
+ m_element_size = cluster_stop - m_element_start;
+
+ return 0;
+}
+
+long Cluster::Parse(long long& pos, long& len) const {
+ long status = Load(pos, len);
+
+ if (status < 0)
+ return status;
+
+ assert(m_pos >= m_element_start);
+ assert(m_timecode >= 0);
+ // assert(m_size > 0);
+ // assert(m_element_size > m_size);
+
+ const long long cluster_stop =
+ (m_element_size < 0) ? -1 : m_element_start + m_element_size;
+
+ if ((cluster_stop >= 0) && (m_pos >= cluster_stop))
+ return 1; // nothing else to do
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long total, avail;
+
+ status = pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ pos = m_pos;
+
+ for (;;) {
+ if ((cluster_stop >= 0) && (pos >= cluster_stop))
+ break;
+
+ if ((total >= 0) && (pos >= total)) {
+ if (m_element_size < 0)
+ m_element_size = pos - m_element_start;
+
+ break;
+ }
+
+ // Parse ID
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id = ReadUInt(pReader, pos, len);
+
+ if (id < 0) // error
+ return static_cast<long>(id);
+
+ if (id == 0) // weird
+ return E_FILE_FORMAT_INVALID;
+
+ // This is the distinguished set of ID's we use to determine
+ // that we have exhausted the sub-element's inside the cluster
+ // whose ID we parsed earlier.
+
+ if ((id == 0x0F43B675) || (id == 0x0C53BB6B)) { // Cluster or Cues ID
+ if (m_element_size < 0)
+ m_element_size = pos - m_element_start;
+
+ break;
+ }
+
+ pos += len; // consume ID field
+
+ // Parse Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume size field
+
+ if ((cluster_stop >= 0) && (pos > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ // pos now points to start of payload
+
+ if (size == 0) // weird
+ continue;
+
+ // const long long block_start = pos;
+ const long long block_stop = pos + size;
+
+ if (cluster_stop >= 0) {
+ if (block_stop > cluster_stop) {
+ if ((id == 0x20) || (id == 0x23))
+ return E_FILE_FORMAT_INVALID;
+
+ pos = cluster_stop;
+ break;
+ }
+ } else if ((total >= 0) && (block_stop > total)) {
+ m_element_size = total - m_element_start;
+ pos = total;
+ break;
+ } else if (block_stop > avail) {
+ len = static_cast<long>(size);
+ return E_BUFFER_NOT_FULL;
+ }
+
+ Cluster* const this_ = const_cast<Cluster*>(this);
+
+ if (id == 0x20) // BlockGroup
+ return this_->ParseBlockGroup(size, pos, len);
+
+ if (id == 0x23) // SimpleBlock
+ return this_->ParseSimpleBlock(size, pos, len);
+
+ pos += size; // consume payload
+ assert((cluster_stop < 0) || (pos <= cluster_stop));
+ }
+
+ assert(m_element_size > 0);
+
+ m_pos = pos;
+ assert((cluster_stop < 0) || (m_pos <= cluster_stop));
+
+ if (m_entries_count > 0) {
+ const long idx = m_entries_count - 1;
+
+ const BlockEntry* const pLast = m_entries[idx];
+ assert(pLast);
+
+ const Block* const pBlock = pLast->GetBlock();
+ assert(pBlock);
+
+ const long long start = pBlock->m_start;
+
+ if ((total >= 0) && (start > total))
+ return -1; // defend against trucated stream
+
+ const long long size = pBlock->m_size;
+
+ const long long stop = start + size;
+ assert((cluster_stop < 0) || (stop <= cluster_stop));
+
+ if ((total >= 0) && (stop > total))
+ return -1; // defend against trucated stream
+ }
+
+ return 1; // no more entries
+}
+
+long Cluster::ParseSimpleBlock(long long block_size, long long& pos,
+ long& len) {
+ const long long block_start = pos;
+ const long long block_stop = pos + block_size;
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long total, avail;
+
+ long status = pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ // parse track number
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((pos + len) > block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long track = ReadUInt(pReader, pos, len);
+
+ if (track < 0) // error
+ return static_cast<long>(track);
+
+ if (track == 0)
+ return E_FILE_FORMAT_INVALID;
#if 0
//TODO(matthewjheaney)
@@ -7514,228 +6528,208 @@
return E_FILE_FORMAT_INVALID;
#endif
- pos += len; //consume track number
+ pos += len; // consume track number
- if ((pos + 2) > block_stop)
- return E_FILE_FORMAT_INVALID;
+ if ((pos + 2) > block_stop)
+ return E_FILE_FORMAT_INVALID;
- if ((pos + 2) > avail)
- {
- len = 2;
- return E_BUFFER_NOT_FULL;
- }
+ if ((pos + 2) > avail) {
+ len = 2;
+ return E_BUFFER_NOT_FULL;
+ }
- pos += 2; //consume timecode
+ pos += 2; // consume timecode
- if ((pos + 1) > block_stop)
- return E_FILE_FORMAT_INVALID;
+ if ((pos + 1) > block_stop)
+ return E_FILE_FORMAT_INVALID;
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- unsigned char flags;
+ unsigned char flags;
- status = pReader->Read(pos, 1, &flags);
+ status = pReader->Read(pos, 1, &flags);
- if (status < 0) //error or underflow
- {
- len = 1;
- return status;
- }
+ if (status < 0) { // error or underflow
+ len = 1;
+ return status;
+ }
- ++pos; //consume flags byte
- assert(pos <= avail);
+ ++pos; // consume flags byte
+ assert(pos <= avail);
- if (pos >= block_stop)
- return E_FILE_FORMAT_INVALID;
+ if (pos >= block_stop)
+ return E_FILE_FORMAT_INVALID;
- const int lacing = int(flags & 0x06) >> 1;
+ const int lacing = int(flags & 0x06) >> 1;
- if ((lacing != 0) && (block_stop > avail))
- {
- len = static_cast<long>(block_stop - pos);
- return E_BUFFER_NOT_FULL;
- }
+ if ((lacing != 0) && (block_stop > avail)) {
+ len = static_cast<long>(block_stop - pos);
+ return E_BUFFER_NOT_FULL;
+ }
- status = CreateBlock(0x23, //simple block id
- block_start, block_size,
- 0); //DiscardPadding
+ status = CreateBlock(0x23, // simple block id
+ block_start, block_size,
+ 0); // DiscardPadding
- if (status != 0)
- return status;
+ if (status != 0)
+ return status;
- m_pos = block_stop;
+ m_pos = block_stop;
- return 0; //success
+ return 0; // success
}
+long Cluster::ParseBlockGroup(long long payload_size, long long& pos,
+ long& len) {
+ const long long payload_start = pos;
+ const long long payload_stop = pos + payload_size;
-long Cluster::ParseBlockGroup(
- long long payload_size,
- long long& pos,
- long& len)
-{
- const long long payload_start = pos;
- const long long payload_stop = pos + payload_size;
+ IMkvReader* const pReader = m_pSegment->m_pReader;
- IMkvReader* const pReader = m_pSegment->m_pReader;
+ long long total, avail;
- long long total, avail;
+ long status = pReader->Length(&total, &avail);
- long status = pReader->Length(&total, &avail);
+ if (status < 0) // error
+ return status;
- if (status < 0) //error
- return status;
+ assert((total < 0) || (avail <= total));
- assert((total < 0) || (avail <= total));
+ if ((total >= 0) && (payload_stop > total))
+ return E_FILE_FORMAT_INVALID;
- if ((total >= 0) && (payload_stop > total))
- return E_FILE_FORMAT_INVALID;
+ if (payload_stop > avail) {
+ len = static_cast<long>(payload_size);
+ return E_BUFFER_NOT_FULL;
+ }
- if (payload_stop > avail)
- {
- len = static_cast<long>(payload_size);
- return E_BUFFER_NOT_FULL;
+ long long discard_padding = 0;
+
+ while (pos < payload_stop) {
+ // parse sub-block element ID
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
}
- long long discard_padding = 0;
+ long long result = GetUIntLength(pReader, pos, len);
- while (pos < payload_stop)
- {
- //parse sub-block element ID
+ if (result < 0) // error
+ return static_cast<long>(result);
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- long long result = GetUIntLength(pReader, pos, len);
+ if ((pos + len) > payload_stop)
+ return E_FILE_FORMAT_INVALID;
- if (result < 0) //error
- return static_cast<long>(result);
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
+ const long long id = ReadUInt(pReader, pos, len);
- if ((pos + len) > payload_stop)
- return E_FILE_FORMAT_INVALID;
+ if (id < 0) // error
+ return static_cast<long>(id);
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
+ if (id == 0) // not a value ID
+ return E_FILE_FORMAT_INVALID;
- const long long id = ReadUInt(pReader, pos, len);
+ pos += len; // consume ID field
- if (id < 0) //error
- return static_cast<long>(id);
+ // Parse Size
- if (id == 0) //not a value ID
- return E_FILE_FORMAT_INVALID;
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- pos += len; //consume ID field
+ result = GetUIntLength(pReader, pos, len);
- //Parse Size
+ if (result < 0) // error
+ return static_cast<long>(result);
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- result = GetUIntLength(pReader, pos, len);
+ if ((pos + len) > payload_stop)
+ return E_FILE_FORMAT_INVALID;
- if (result < 0) //error
- return static_cast<long>(result);
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
+ const long long size = ReadUInt(pReader, pos, len);
- if ((pos + len) > payload_stop)
- return E_FILE_FORMAT_INVALID;
+ if (size < 0) // error
+ return static_cast<long>(size);
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
+ pos += len; // consume size field
- const long long size = ReadUInt(pReader, pos, len);
+ // pos now points to start of sub-block group payload
- if (size < 0) //error
- return static_cast<long>(size);
+ if (pos > payload_stop)
+ return E_FILE_FORMAT_INVALID;
- pos += len; //consume size field
+ if (size == 0) // weird
+ continue;
- //pos now points to start of sub-block group payload
+ const long long unknown_size = (1LL << (7 * len)) - 1;
- if (pos > payload_stop)
- return E_FILE_FORMAT_INVALID;
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
- if (size == 0) //weird
- continue;
+ if (id == 0x35A2) { // DiscardPadding
+ status = UnserializeInt(pReader, pos, size, discard_padding);
- const long long unknown_size = (1LL << (7 * len)) - 1;
+ if (status < 0) // error
+ return status;
+ }
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID;
+ if (id != 0x21) { // sub-part of BlockGroup is not a Block
+ pos += size; // consume sub-part of block group
- if (id == 0x35A2) //DiscardPadding
- {
- result = GetUIntLength(pReader, pos, len);
+ if (pos > payload_stop)
+ return E_FILE_FORMAT_INVALID;
- if (result < 0) //error
- return static_cast<long>(result);
+ continue;
+ }
- status = UnserializeInt(pReader, pos, len, discard_padding);
+ const long long block_stop = pos + size;
- if (status < 0) //error
- return status;
- }
+ if (block_stop > payload_stop)
+ return E_FILE_FORMAT_INVALID;
- if (id != 0x21) //sub-part of BlockGroup is not a Block
- {
- pos += size; //consume sub-part of block group
+ // parse track number
- if (pos > payload_stop)
- return E_FILE_FORMAT_INVALID;
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
- continue;
- }
+ result = GetUIntLength(pReader, pos, len);
- const long long block_stop = pos + size;
+ if (result < 0) // error
+ return static_cast<long>(result);
- if (block_stop > payload_stop)
- return E_FILE_FORMAT_INVALID;
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
- //parse track number
+ if ((pos + len) > block_stop)
+ return E_FILE_FORMAT_INVALID;
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- result = GetUIntLength(pReader, pos, len);
+ const long long track = ReadUInt(pReader, pos, len);
- if (result < 0) //error
- return static_cast<long>(result);
+ if (track < 0) // error
+ return static_cast<long>(track);
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- if ((pos + len) > block_stop)
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long track = ReadUInt(pReader, pos, len);
-
- if (track < 0) //error
- return static_cast<long>(track);
-
- if (track == 0)
- return E_FILE_FORMAT_INVALID;
+ if (track == 0)
+ return E_FILE_FORMAT_INVALID;
#if 0
//TODO(matthewjheaney)
@@ -7767,213 +6761,173 @@
return E_FILE_FORMAT_INVALID;
#endif
- pos += len; //consume track number
+ pos += len; // consume track number
- if ((pos + 2) > block_stop)
- return E_FILE_FORMAT_INVALID;
+ if ((pos + 2) > block_stop)
+ return E_FILE_FORMAT_INVALID;
- if ((pos + 2) > avail)
- {
- len = 2;
- return E_BUFFER_NOT_FULL;
- }
-
- pos += 2; //consume timecode
-
- if ((pos + 1) > block_stop)
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- unsigned char flags;
-
- status = pReader->Read(pos, 1, &flags);
-
- if (status < 0) //error or underflow
- {
- len = 1;
- return status;
- }
-
- ++pos; //consume flags byte
- assert(pos <= avail);
-
- if (pos >= block_stop)
- return E_FILE_FORMAT_INVALID;
-
- const int lacing = int(flags & 0x06) >> 1;
-
- if ((lacing != 0) && (block_stop > avail))
- {
- len = static_cast<long>(block_stop - pos);
- return E_BUFFER_NOT_FULL;
- }
-
- pos = block_stop; //consume block-part of block group
- assert(pos <= payload_stop);
+ if ((pos + 2) > avail) {
+ len = 2;
+ return E_BUFFER_NOT_FULL;
}
- assert(pos == payload_stop);
+ pos += 2; // consume timecode
- status = CreateBlock(0x20, //BlockGroup ID
- payload_start, payload_size,
- discard_padding);
- if (status != 0)
- return status;
+ if ((pos + 1) > block_stop)
+ return E_FILE_FORMAT_INVALID;
- m_pos = payload_stop;
-
- return 0; //success
-}
-
-
-long Cluster::GetEntry(long index, const mkvparser::BlockEntry*& pEntry) const
-{
- assert(m_pos >= m_element_start);
-
- pEntry = NULL;
-
- if (index < 0)
- return -1; //generic error
-
- if (m_entries_count < 0)
- return E_BUFFER_NOT_FULL;
-
- assert(m_entries);
- assert(m_entries_size > 0);
- assert(m_entries_count <= m_entries_size);
-
- if (index < m_entries_count)
- {
- pEntry = m_entries[index];
- assert(pEntry);
-
- return 1; //found entry
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
}
- if (m_element_size < 0) //we don't know cluster end yet
- return E_BUFFER_NOT_FULL; //underflow
+ unsigned char flags;
- const long long element_stop = m_element_start + m_element_size;
+ status = pReader->Read(pos, 1, &flags);
- if (m_pos >= element_stop)
- return 0; //nothing left to parse
-
- return E_BUFFER_NOT_FULL; //underflow, since more remains to be parsed
-}
-
-
-Cluster* Cluster::Create(
- Segment* pSegment,
- long idx,
- long long off)
- //long long element_size)
-{
- assert(pSegment);
- assert(off >= 0);
-
- const long long element_start = pSegment->m_start + off;
-
- Cluster* const pCluster = new Cluster(pSegment,
- idx,
- element_start);
- //element_size);
- assert(pCluster);
-
- return pCluster;
-}
-
-
-Cluster::Cluster() :
- m_pSegment(NULL),
- m_element_start(0),
- m_index(0),
- m_pos(0),
- m_element_size(0),
- m_timecode(0),
- m_entries(NULL),
- m_entries_size(0),
- m_entries_count(0) //means "no entries"
-{
-}
-
-
-Cluster::Cluster(
- Segment* pSegment,
- long idx,
- long long element_start
- /* long long element_size */ ) :
- m_pSegment(pSegment),
- m_element_start(element_start),
- m_index(idx),
- m_pos(element_start),
- m_element_size(-1 /* element_size */ ),
- m_timecode(-1),
- m_entries(NULL),
- m_entries_size(0),
- m_entries_count(-1) //means "has not been parsed yet"
-{
-}
-
-
-Cluster::~Cluster()
-{
- if (m_entries_count <= 0)
- return;
-
- BlockEntry** i = m_entries;
- BlockEntry** const j = m_entries + m_entries_count;
-
- while (i != j)
- {
- BlockEntry* p = *i++;
- assert(p);
-
- delete p;
+ if (status < 0) { // error or underflow
+ len = 1;
+ return status;
}
- delete[] m_entries;
+ ++pos; // consume flags byte
+ assert(pos <= avail);
+
+ if (pos >= block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ const int lacing = int(flags & 0x06) >> 1;
+
+ if ((lacing != 0) && (block_stop > avail)) {
+ len = static_cast<long>(block_stop - pos);
+ return E_BUFFER_NOT_FULL;
+ }
+
+ pos = block_stop; // consume block-part of block group
+ assert(pos <= payload_stop);
+ }
+
+ assert(pos == payload_stop);
+
+ status = CreateBlock(0x20, // BlockGroup ID
+ payload_start, payload_size, discard_padding);
+ if (status != 0)
+ return status;
+
+ m_pos = payload_stop;
+
+ return 0; // success
}
+long Cluster::GetEntry(long index, const mkvparser::BlockEntry*& pEntry) const {
+ assert(m_pos >= m_element_start);
-bool Cluster::EOS() const
+ pEntry = NULL;
+
+ if (index < 0)
+ return -1; // generic error
+
+ if (m_entries_count < 0)
+ return E_BUFFER_NOT_FULL;
+
+ assert(m_entries);
+ assert(m_entries_size > 0);
+ assert(m_entries_count <= m_entries_size);
+
+ if (index < m_entries_count) {
+ pEntry = m_entries[index];
+ assert(pEntry);
+
+ return 1; // found entry
+ }
+
+ if (m_element_size < 0) // we don't know cluster end yet
+ return E_BUFFER_NOT_FULL; // underflow
+
+ const long long element_stop = m_element_start + m_element_size;
+
+ if (m_pos >= element_stop)
+ return 0; // nothing left to parse
+
+ return E_BUFFER_NOT_FULL; // underflow, since more remains to be parsed
+}
+
+Cluster* Cluster::Create(Segment* pSegment, long idx, long long off)
+// long long element_size)
{
- return (m_pSegment == NULL);
+ assert(pSegment);
+ assert(off >= 0);
+
+ const long long element_start = pSegment->m_start + off;
+
+ Cluster* const pCluster = new Cluster(pSegment, idx, element_start);
+ // element_size);
+ assert(pCluster);
+
+ return pCluster;
}
+Cluster::Cluster()
+ : m_pSegment(NULL),
+ m_element_start(0),
+ m_index(0),
+ m_pos(0),
+ m_element_size(0),
+ m_timecode(0),
+ m_entries(NULL),
+ m_entries_size(0),
+ m_entries_count(0) // means "no entries"
+{}
-long Cluster::GetIndex() const
-{
- return m_index;
+Cluster::Cluster(Segment* pSegment, long idx, long long element_start
+ /* long long element_size */)
+ : m_pSegment(pSegment),
+ m_element_start(element_start),
+ m_index(idx),
+ m_pos(element_start),
+ m_element_size(-1 /* element_size */),
+ m_timecode(-1),
+ m_entries(NULL),
+ m_entries_size(0),
+ m_entries_count(-1) // means "has not been parsed yet"
+{}
+
+Cluster::~Cluster() {
+ if (m_entries_count <= 0)
+ return;
+
+ BlockEntry** i = m_entries;
+ BlockEntry** const j = m_entries + m_entries_count;
+
+ while (i != j) {
+ BlockEntry* p = *i++;
+ assert(p);
+
+ delete p;
+ }
+
+ delete[] m_entries;
}
+bool Cluster::EOS() const { return (m_pSegment == NULL); }
-long long Cluster::GetPosition() const
-{
- const long long pos = m_element_start - m_pSegment->m_start;
- assert(pos >= 0);
+long Cluster::GetIndex() const { return m_index; }
- return pos;
+long long Cluster::GetPosition() const {
+ const long long pos = m_element_start - m_pSegment->m_start;
+ assert(pos >= 0);
+
+ return pos;
}
-
-long long Cluster::GetElementSize() const
-{
- return m_element_size;
-}
-
+long long Cluster::GetElementSize() const { return m_element_size; }
#if 0
bool Cluster::HasBlockEntries(
const Segment* pSegment,
- long long off) //relative to start of segment payload
-{
+ long long off) {
assert(pSegment);
- assert(off >= 0); //relative to segment
+ assert(off >= 0); //relative to start of segment payload
IMkvReader* const pReader = pSegment->m_pReader;
@@ -8030,631 +6984,558 @@
}
#endif
-
long Cluster::HasBlockEntries(
const Segment* pSegment,
- long long off, //relative to start of segment payload
- long long& pos,
- long& len)
-{
- assert(pSegment);
- assert(off >= 0); //relative to segment
+ long long off, // relative to start of segment payload
+ long long& pos, long& len) {
+ assert(pSegment);
+ assert(off >= 0); // relative to segment
- IMkvReader* const pReader = pSegment->m_pReader;
+ IMkvReader* const pReader = pSegment->m_pReader;
- long long total, avail;
+ long long total, avail;
- long status = pReader->Length(&total, &avail);
+ long status = pReader->Length(&total, &avail);
- if (status < 0) //error
- return status;
+ if (status < 0) // error
+ return status;
- assert((total < 0) || (avail <= total));
+ assert((total < 0) || (avail <= total));
- pos = pSegment->m_start + off; //absolute
+ pos = pSegment->m_start + off; // absolute
- if ((total >= 0) && (pos >= total))
- return 0; //we don't even have a complete cluster
+ if ((total >= 0) && (pos >= total))
+ return 0; // we don't even have a complete cluster
- const long long segment_stop =
- (pSegment->m_size < 0) ? -1 : pSegment->m_start + pSegment->m_size;
+ const long long segment_stop =
+ (pSegment->m_size < 0) ? -1 : pSegment->m_start + pSegment->m_size;
- long long cluster_stop = -1; //interpreted later to mean "unknown size"
+ long long cluster_stop = -1; // interpreted later to mean "unknown size"
- {
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- long long result = GetUIntLength(pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //need more data
- return E_BUFFER_NOT_FULL;
-
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((total >= 0) && ((pos + len) > total))
- return 0;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long id = ReadUInt(pReader, pos, len);
-
- if (id < 0) //error
- return static_cast<long>(id);
-
- if (id != 0x0F43B675) //weird: not cluster ID
- return -1; //generic error
-
- pos += len; //consume Cluster ID field
-
- //read size field
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- result = GetUIntLength(pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //weird
- return E_BUFFER_NOT_FULL;
-
- if ((segment_stop >= 0) && ((pos + len) > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((total >= 0) && ((pos + len) > total))
- return 0;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long size = ReadUInt(pReader, pos, len);
-
- if (size < 0) //error
- return static_cast<long>(size);
-
- if (size == 0)
- return 0; //cluster does not have entries
-
- pos += len; //consume size field
-
- //pos now points to start of payload
-
- const long long unknown_size = (1LL << (7 * len)) - 1;
-
- if (size != unknown_size)
- {
- cluster_stop = pos + size;
- assert(cluster_stop >= 0);
-
- if ((segment_stop >= 0) && (cluster_stop > segment_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((total >= 0) && (cluster_stop > total))
- //return E_FILE_FORMAT_INVALID; //too conservative
- return 0; //cluster does not have any entries
- }
+ {
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
}
- for (;;)
- {
- if ((cluster_stop >= 0) && (pos >= cluster_stop))
- return 0; //no entries detected
+ long long result = GetUIntLength(pReader, pos, len);
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
+ if (result < 0) // error
+ return static_cast<long>(result);
- long long result = GetUIntLength(pReader, pos, len);
+ if (result > 0) // need more data
+ return E_BUFFER_NOT_FULL;
- if (result < 0) //error
- return static_cast<long>(result);
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
- if (result > 0) //need more data
- return E_BUFFER_NOT_FULL;
+ if ((total >= 0) && ((pos + len) > total))
+ return 0;
- if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
- return E_FILE_FORMAT_INVALID;
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
+ const long long id = ReadUInt(pReader, pos, len);
- const long long id = ReadUInt(pReader, pos, len);
+ if (id < 0) // error
+ return static_cast<long>(id);
- if (id < 0) //error
- return static_cast<long>(id);
+ if (id != 0x0F43B675) // weird: not cluster ID
+ return -1; // generic error
- //This is the distinguished set of ID's we use to determine
- //that we have exhausted the sub-element's inside the cluster
- //whose ID we parsed earlier.
+ pos += len; // consume Cluster ID field
- if (id == 0x0F43B675) //Cluster ID
- return 0; //no entries found
+ // read size field
- if (id == 0x0C53BB6B) //Cues ID
- return 0; //no entries found
-
- pos += len; //consume id field
-
- if ((cluster_stop >= 0) && (pos >= cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- //read size field
-
- if ((pos + 1) > avail)
- {
- len = 1;
- return E_BUFFER_NOT_FULL;
- }
-
- result = GetUIntLength(pReader, pos, len);
-
- if (result < 0) //error
- return static_cast<long>(result);
-
- if (result > 0) //underflow
- return E_BUFFER_NOT_FULL;
-
- if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > avail)
- return E_BUFFER_NOT_FULL;
-
- const long long size = ReadUInt(pReader, pos, len);
-
- if (size < 0) //error
- return static_cast<long>(size);
-
- pos += len; //consume size field
-
- //pos now points to start of payload
-
- if ((cluster_stop >= 0) && (pos > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- if (size == 0) //weird
- continue;
-
- const long long unknown_size = (1LL << (7 * len)) - 1;
-
- if (size == unknown_size)
- return E_FILE_FORMAT_INVALID; //not supported inside cluster
-
- if ((cluster_stop >= 0) && ((pos + size) > cluster_stop))
- return E_FILE_FORMAT_INVALID;
-
- if (id == 0x20) //BlockGroup ID
- return 1; //have at least one entry
-
- if (id == 0x23) //SimpleBlock ID
- return 1; //have at least one entry
-
- pos += size; //consume payload
- assert((cluster_stop < 0) || (pos <= cluster_stop));
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
}
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && ((pos + len) > total))
+ return 0;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ if (size == 0)
+ return 0; // cluster does not have entries
+
+ pos += len; // consume size field
+
+ // pos now points to start of payload
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size != unknown_size) {
+ cluster_stop = pos + size;
+ assert(cluster_stop >= 0);
+
+ if ((segment_stop >= 0) && (cluster_stop > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && (cluster_stop > total))
+ // return E_FILE_FORMAT_INVALID; //too conservative
+ return 0; // cluster does not have any entries
+ }
+ }
+
+ for (;;) {
+ if ((cluster_stop >= 0) && (pos >= cluster_stop))
+ return 0; // no entries detected
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // need more data
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id = ReadUInt(pReader, pos, len);
+
+ if (id < 0) // error
+ return static_cast<long>(id);
+
+ // This is the distinguished set of ID's we use to determine
+ // that we have exhausted the sub-element's inside the cluster
+ // whose ID we parsed earlier.
+
+ if (id == 0x0F43B675) // Cluster ID
+ return 0; // no entries found
+
+ if (id == 0x0C53BB6B) // Cues ID
+ return 0; // no entries found
+
+ pos += len; // consume id field
+
+ if ((cluster_stop >= 0) && (pos >= cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ // read size field
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // underflow
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ pos += len; // consume size field
+
+ // pos now points to start of payload
+
+ if ((cluster_stop >= 0) && (pos > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if (size == 0) // weird
+ continue;
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID; // not supported inside cluster
+
+ if ((cluster_stop >= 0) && ((pos + size) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if (id == 0x20) // BlockGroup ID
+ return 1; // have at least one entry
+
+ if (id == 0x23) // SimpleBlock ID
+ return 1; // have at least one entry
+
+ pos += size; // consume payload
+ assert((cluster_stop < 0) || (pos <= cluster_stop));
+ }
}
+long long Cluster::GetTimeCode() const {
+ long long pos;
+ long len;
-long long Cluster::GetTimeCode() const
-{
+ const long status = Load(pos, len);
+
+ if (status < 0) // error
+ return status;
+
+ return m_timecode;
+}
+
+long long Cluster::GetTime() const {
+ const long long tc = GetTimeCode();
+
+ if (tc < 0)
+ return tc;
+
+ const SegmentInfo* const pInfo = m_pSegment->GetInfo();
+ assert(pInfo);
+
+ const long long scale = pInfo->GetTimeCodeScale();
+ assert(scale >= 1);
+
+ const long long t = m_timecode * scale;
+
+ return t;
+}
+
+long long Cluster::GetFirstTime() const {
+ const BlockEntry* pEntry;
+
+ const long status = GetFirst(pEntry);
+
+ if (status < 0) // error
+ return status;
+
+ if (pEntry == NULL) // empty cluster
+ return GetTime();
+
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
+
+ return pBlock->GetTime(this);
+}
+
+long long Cluster::GetLastTime() const {
+ const BlockEntry* pEntry;
+
+ const long status = GetLast(pEntry);
+
+ if (status < 0) // error
+ return status;
+
+ if (pEntry == NULL) // empty cluster
+ return GetTime();
+
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
+
+ return pBlock->GetTime(this);
+}
+
+long Cluster::CreateBlock(long long id,
+ long long pos, // absolute pos of payload
+ long long size, long long discard_padding) {
+ assert((id == 0x20) || (id == 0x23)); // BlockGroup or SimpleBlock
+
+ if (m_entries_count < 0) { // haven't parsed anything yet
+ assert(m_entries == NULL);
+ assert(m_entries_size == 0);
+
+ m_entries_size = 1024;
+ m_entries = new BlockEntry* [m_entries_size];
+
+ m_entries_count = 0;
+ } else {
+ assert(m_entries);
+ assert(m_entries_size > 0);
+ assert(m_entries_count <= m_entries_size);
+
+ if (m_entries_count >= m_entries_size) {
+ const long entries_size = 2 * m_entries_size;
+
+ BlockEntry** const entries = new BlockEntry* [entries_size];
+ assert(entries);
+
+ BlockEntry** src = m_entries;
+ BlockEntry** const src_end = src + m_entries_count;
+
+ BlockEntry** dst = entries;
+
+ while (src != src_end)
+ *dst++ = *src++;
+
+ delete[] m_entries;
+
+ m_entries = entries;
+ m_entries_size = entries_size;
+ }
+ }
+
+ if (id == 0x20) // BlockGroup ID
+ return CreateBlockGroup(pos, size, discard_padding);
+ else // SimpleBlock ID
+ return CreateSimpleBlock(pos, size);
+}
+
+long Cluster::CreateBlockGroup(long long start_offset, long long size,
+ long long discard_padding) {
+ assert(m_entries);
+ assert(m_entries_size > 0);
+ assert(m_entries_count >= 0);
+ assert(m_entries_count < m_entries_size);
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long pos = start_offset;
+ const long long stop = start_offset + size;
+
+ // For WebM files, there is a bias towards previous reference times
+ //(in order to support alt-ref frames, which refer back to the previous
+ // keyframe). Normally a 0 value is not possible, but here we tenatively
+ // allow 0 as the value of a reference frame, with the interpretation
+ // that this is a "previous" reference time.
+
+ long long prev = 1; // nonce
+ long long next = 0; // nonce
+ long long duration = -1; // really, this is unsigned
+
+ long long bpos = -1;
+ long long bsize = -1;
+
+ while (pos < stop) {
+ long len;
+ const long long id = ReadUInt(pReader, pos, len);
+ assert(id >= 0); // TODO
+ assert((pos + len) <= stop);
+
+ pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos, len);
+ assert(size >= 0); // TODO
+ assert((pos + len) <= stop);
+
+ pos += len; // consume size
+
+ if (id == 0x21) { // Block ID
+ if (bpos < 0) { // Block ID
+ bpos = pos;
+ bsize = size;
+ }
+ } else if (id == 0x1B) { // Duration ID
+ assert(size <= 8);
+
+ duration = UnserializeUInt(pReader, pos, size);
+ assert(duration >= 0); // TODO
+ } else if (id == 0x7B) { // ReferenceBlock
+ assert(size <= 8);
+ const long size_ = static_cast<long>(size);
+
+ long long time;
+
+ long status = UnserializeInt(pReader, pos, size_, time);
+ assert(status == 0);
+ if (status != 0)
+ return -1;
+
+ if (time <= 0) // see note above
+ prev = time;
+ else // weird
+ next = time;
+ }
+
+ pos += size; // consume payload
+ assert(pos <= stop);
+ }
+
+ assert(pos == stop);
+ assert(bpos >= 0);
+ assert(bsize >= 0);
+
+ const long idx = m_entries_count;
+
+ BlockEntry** const ppEntry = m_entries + idx;
+ BlockEntry*& pEntry = *ppEntry;
+
+ pEntry = new (std::nothrow)
+ BlockGroup(this, idx, bpos, bsize, prev, next, duration, discard_padding);
+
+ if (pEntry == NULL)
+ return -1; // generic error
+
+ BlockGroup* const p = static_cast<BlockGroup*>(pEntry);
+
+ const long status = p->Parse();
+
+ if (status == 0) { // success
+ ++m_entries_count;
+ return 0;
+ }
+
+ delete pEntry;
+ pEntry = 0;
+
+ return status;
+}
+
+long Cluster::CreateSimpleBlock(long long st, long long sz) {
+ assert(m_entries);
+ assert(m_entries_size > 0);
+ assert(m_entries_count >= 0);
+ assert(m_entries_count < m_entries_size);
+
+ const long idx = m_entries_count;
+
+ BlockEntry** const ppEntry = m_entries + idx;
+ BlockEntry*& pEntry = *ppEntry;
+
+ pEntry = new (std::nothrow) SimpleBlock(this, idx, st, sz);
+
+ if (pEntry == NULL)
+ return -1; // generic error
+
+ SimpleBlock* const p = static_cast<SimpleBlock*>(pEntry);
+
+ const long status = p->Parse();
+
+ if (status == 0) {
+ ++m_entries_count;
+ return 0;
+ }
+
+ delete pEntry;
+ pEntry = 0;
+
+ return status;
+}
+
+long Cluster::GetFirst(const BlockEntry*& pFirst) const {
+ if (m_entries_count <= 0) {
long long pos;
long len;
- const long status = Load(pos, len);
+ const long status = Parse(pos, len);
- if (status < 0) //error
- return status;
-
- return m_timecode;
-}
-
-
-long long Cluster::GetTime() const
-{
- const long long tc = GetTimeCode();
-
- if (tc < 0)
- return tc;
-
- const SegmentInfo* const pInfo = m_pSegment->GetInfo();
- assert(pInfo);
-
- const long long scale = pInfo->GetTimeCodeScale();
- assert(scale >= 1);
-
- const long long t = m_timecode * scale;
-
- return t;
-}
-
-
-long long Cluster::GetFirstTime() const
-{
- const BlockEntry* pEntry;
-
- const long status = GetFirst(pEntry);
-
- if (status < 0) //error
- return status;
-
- if (pEntry == NULL) //empty cluster
- return GetTime();
-
- const Block* const pBlock = pEntry->GetBlock();
- assert(pBlock);
-
- return pBlock->GetTime(this);
-}
-
-
-long long Cluster::GetLastTime() const
-{
- const BlockEntry* pEntry;
-
- const long status = GetLast(pEntry);
-
- if (status < 0) //error
- return status;
-
- if (pEntry == NULL) //empty cluster
- return GetTime();
-
- const Block* const pBlock = pEntry->GetBlock();
- assert(pBlock);
-
- return pBlock->GetTime(this);
-}
-
-
-long Cluster::CreateBlock(
- long long id,
- long long pos, //absolute pos of payload
- long long size,
- long long discard_padding)
-{
- assert((id == 0x20) || (id == 0x23)); //BlockGroup or SimpleBlock
-
- if (m_entries_count < 0) //haven't parsed anything yet
- {
- assert(m_entries == NULL);
- assert(m_entries_size == 0);
-
- m_entries_size = 1024;
- m_entries = new BlockEntry*[m_entries_size];
-
- m_entries_count = 0;
- }
- else
- {
- assert(m_entries);
- assert(m_entries_size > 0);
- assert(m_entries_count <= m_entries_size);
-
- if (m_entries_count >= m_entries_size)
- {
- const long entries_size = 2 * m_entries_size;
-
- BlockEntry** const entries = new BlockEntry*[entries_size];
- assert(entries);
-
- BlockEntry** src = m_entries;
- BlockEntry** const src_end = src + m_entries_count;
-
- BlockEntry** dst = entries;
-
- while (src != src_end)
- *dst++ = *src++;
-
- delete[] m_entries;
-
- m_entries = entries;
- m_entries_size = entries_size;
- }
+ if (status < 0) { // error
+ pFirst = NULL;
+ return status;
}
- if (id == 0x20) //BlockGroup ID
- return CreateBlockGroup(pos, size, discard_padding);
- else //SimpleBlock ID
- return CreateSimpleBlock(pos, size);
+ if (m_entries_count <= 0) { // empty cluster
+ pFirst = NULL;
+ return 0;
+ }
+ }
+
+ assert(m_entries);
+
+ pFirst = m_entries[0];
+ assert(pFirst);
+
+ return 0; // success
}
+long Cluster::GetLast(const BlockEntry*& pLast) const {
+ for (;;) {
+ long long pos;
+ long len;
-long Cluster::CreateBlockGroup(
- long long start_offset,
- long long size,
- long long discard_padding)
-{
- assert(m_entries);
- assert(m_entries_size > 0);
- assert(m_entries_count >= 0);
- assert(m_entries_count < m_entries_size);
+ const long status = Parse(pos, len);
- IMkvReader* const pReader = m_pSegment->m_pReader;
-
- long long pos = start_offset;
- const long long stop = start_offset + size;
-
- //For WebM files, there is a bias towards previous reference times
- //(in order to support alt-ref frames, which refer back to the previous
- //keyframe). Normally a 0 value is not possible, but here we tenatively
- //allow 0 as the value of a reference frame, with the interpretation
- //that this is a "previous" reference time.
-
- long long prev = 1; //nonce
- long long next = 0; //nonce
- long long duration = -1; //really, this is unsigned
-
- long long bpos = -1;
- long long bsize = -1;
-
- while (pos < stop)
- {
- long len;
- const long long id = ReadUInt(pReader, pos, len);
- assert(id >= 0); //TODO
- assert((pos + len) <= stop);
-
- pos += len; //consume ID
-
- const long long size = ReadUInt(pReader, pos, len);
- assert(size >= 0); //TODO
- assert((pos + len) <= stop);
-
- pos += len; //consume size
-
- if (id == 0x21) //Block ID
- {
- if (bpos < 0) //Block ID
- {
- bpos = pos;
- bsize = size;
- }
- }
- else if (id == 0x1B) //Duration ID
- {
- assert(size <= 8);
-
- duration = UnserializeUInt(pReader, pos, size);
- assert(duration >= 0); //TODO
- }
- else if (id == 0x7B) //ReferenceBlock
- {
- assert(size <= 8);
- const long size_ = static_cast<long>(size);
-
- long long time;
-
- long status = UnserializeInt(pReader, pos, size_, time);
- assert(status == 0);
- if (status != 0)
- return -1;
-
- if (time <= 0) //see note above
- prev = time;
- else //weird
- next = time;
- }
-
- pos += size; //consume payload
- assert(pos <= stop);
+ if (status < 0) { // error
+ pLast = NULL;
+ return status;
}
- assert(pos == stop);
- assert(bpos >= 0);
- assert(bsize >= 0);
+ if (status > 0) // no new block
+ break;
+ }
- const long idx = m_entries_count;
-
- BlockEntry** const ppEntry = m_entries + idx;
- BlockEntry*& pEntry = *ppEntry;
-
- pEntry = new (std::nothrow) BlockGroup(
- this,
- idx,
- bpos,
- bsize,
- prev,
- next,
- duration,
- discard_padding);
-
- if (pEntry == NULL)
- return -1; //generic error
-
- BlockGroup* const p = static_cast<BlockGroup*>(pEntry);
-
- const long status = p->Parse();
-
- if (status == 0) //success
- {
- ++m_entries_count;
- return 0;
- }
-
- delete pEntry;
- pEntry = 0;
-
- return status;
-}
-
-
-
-long Cluster::CreateSimpleBlock(
- long long st,
- long long sz)
-{
- assert(m_entries);
- assert(m_entries_size > 0);
- assert(m_entries_count >= 0);
- assert(m_entries_count < m_entries_size);
-
- const long idx = m_entries_count;
-
- BlockEntry** const ppEntry = m_entries + idx;
- BlockEntry*& pEntry = *ppEntry;
-
- pEntry = new (std::nothrow) SimpleBlock(this, idx, st, sz);
-
- if (pEntry == NULL)
- return -1; //generic error
-
- SimpleBlock* const p = static_cast<SimpleBlock*>(pEntry);
-
- const long status = p->Parse();
-
- if (status == 0)
- {
- ++m_entries_count;
- return 0;
- }
-
- delete pEntry;
- pEntry = 0;
-
- return status;
-}
-
-
-long Cluster::GetFirst(const BlockEntry*& pFirst) const
-{
- if (m_entries_count <= 0)
- {
- long long pos;
- long len;
-
- const long status = Parse(pos, len);
-
- if (status < 0) //error
- {
- pFirst = NULL;
- return status;
- }
-
- if (m_entries_count <= 0) //empty cluster
- {
- pFirst = NULL;
- return 0;
- }
- }
-
- assert(m_entries);
-
- pFirst = m_entries[0];
- assert(pFirst);
-
- return 0; //success
-}
-
-long Cluster::GetLast(const BlockEntry*& pLast) const
-{
- for (;;)
- {
- long long pos;
- long len;
-
- const long status = Parse(pos, len);
-
- if (status < 0) //error
- {
- pLast = NULL;
- return status;
- }
-
- if (status > 0) //no new block
- break;
- }
-
- if (m_entries_count <= 0)
- {
- pLast = NULL;
- return 0;
- }
-
- assert(m_entries);
-
- const long idx = m_entries_count - 1;
-
- pLast = m_entries[idx];
- assert(pLast);
-
+ if (m_entries_count <= 0) {
+ pLast = NULL;
return 0;
+ }
+
+ assert(m_entries);
+
+ const long idx = m_entries_count - 1;
+
+ pLast = m_entries[idx];
+ assert(pLast);
+
+ return 0;
}
+long Cluster::GetNext(const BlockEntry* pCurr, const BlockEntry*& pNext) const {
+ assert(pCurr);
+ assert(m_entries);
+ assert(m_entries_count > 0);
-long Cluster::GetNext(
- const BlockEntry* pCurr,
- const BlockEntry*& pNext) const
-{
- assert(pCurr);
+ size_t idx = pCurr->GetIndex();
+ assert(idx < size_t(m_entries_count));
+ assert(m_entries[idx] == pCurr);
+
+ ++idx;
+
+ if (idx >= size_t(m_entries_count)) {
+ long long pos;
+ long len;
+
+ const long status = Parse(pos, len);
+
+ if (status < 0) { // error
+ pNext = NULL;
+ return status;
+ }
+
+ if (status > 0) {
+ pNext = NULL;
+ return 0;
+ }
+
assert(m_entries);
assert(m_entries_count > 0);
-
- size_t idx = pCurr->GetIndex();
assert(idx < size_t(m_entries_count));
- assert(m_entries[idx] == pCurr);
+ }
- ++idx;
+ pNext = m_entries[idx];
+ assert(pNext);
- if (idx >= size_t(m_entries_count))
- {
- long long pos;
- long len;
-
- const long status = Parse(pos, len);
-
- if (status < 0) //error
- {
- pNext = NULL;
- return status;
- }
-
- if (status > 0)
- {
- pNext = NULL;
- return 0;
- }
-
- assert(m_entries);
- assert(m_entries_count > 0);
- assert(idx < size_t(m_entries_count));
- }
-
- pNext = m_entries[idx];
- assert(pNext);
-
- return 0;
+ return 0;
}
+long Cluster::GetEntryCount() const { return m_entries_count; }
-long Cluster::GetEntryCount() const
-{
- return m_entries_count;
-}
+const BlockEntry* Cluster::GetEntry(const Track* pTrack,
+ long long time_ns) const {
+ assert(pTrack);
-
-const BlockEntry* Cluster::GetEntry(
- const Track* pTrack,
- long long time_ns) const
-{
- assert(pTrack);
-
- if (m_pSegment == NULL) //this is the special EOS cluster
- return pTrack->GetEOS();
+ if (m_pSegment == NULL) // this is the special EOS cluster
+ return pTrack->GetEOS();
#if 0
@@ -8707,76 +7588,66 @@
#else
- const BlockEntry* pResult = pTrack->GetEOS();
+ const BlockEntry* pResult = pTrack->GetEOS();
- long index = 0;
+ long index = 0;
- for (;;)
- {
- if (index >= m_entries_count)
- {
- long long pos;
- long len;
+ for (;;) {
+ if (index >= m_entries_count) {
+ long long pos;
+ long len;
- const long status = Parse(pos, len);
- assert(status >= 0);
+ const long status = Parse(pos, len);
+ assert(status >= 0);
- if (status > 0) //completely parsed, and no more entries
- return pResult;
+ if (status > 0) // completely parsed, and no more entries
+ return pResult;
- if (status < 0) //should never happen
- return 0;
+ if (status < 0) // should never happen
+ return 0;
- assert(m_entries);
- assert(index < m_entries_count);
- }
-
- const BlockEntry* const pEntry = m_entries[index];
- assert(pEntry);
- assert(!pEntry->EOS());
-
- const Block* const pBlock = pEntry->GetBlock();
- assert(pBlock);
-
- if (pBlock->GetTrackNumber() != pTrack->GetNumber())
- {
- ++index;
- continue;
- }
-
- if (pTrack->VetEntry(pEntry))
- {
- if (time_ns < 0) //just want first candidate block
- return pEntry;
-
- const long long ns = pBlock->GetTime(this);
-
- if (ns > time_ns)
- return pResult;
-
- pResult = pEntry; //have a candidate
- }
- else if (time_ns >= 0)
- {
- const long long ns = pBlock->GetTime(this);
-
- if (ns > time_ns)
- return pResult;
- }
-
- ++index;
+ assert(m_entries);
+ assert(index < m_entries_count);
}
+ const BlockEntry* const pEntry = m_entries[index];
+ assert(pEntry);
+ assert(!pEntry->EOS());
+
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
+
+ if (pBlock->GetTrackNumber() != pTrack->GetNumber()) {
+ ++index;
+ continue;
+ }
+
+ if (pTrack->VetEntry(pEntry)) {
+ if (time_ns < 0) // just want first candidate block
+ return pEntry;
+
+ const long long ns = pBlock->GetTime(this);
+
+ if (ns > time_ns)
+ return pResult;
+
+ pResult = pEntry; // have a candidate
+ } else if (time_ns >= 0) {
+ const long long ns = pBlock->GetTime(this);
+
+ if (ns > time_ns)
+ return pResult;
+ }
+
+ ++index;
+ }
+
#endif
}
-
-const BlockEntry*
-Cluster::GetEntry(
- const CuePoint& cp,
- const CuePoint::TrackPosition& tp) const
-{
- assert(m_pSegment);
+const BlockEntry* Cluster::GetEntry(const CuePoint& cp,
+ const CuePoint::TrackPosition& tp) const {
+ assert(m_pSegment);
#if 0
@@ -8867,114 +7738,105 @@
#else
- const long long tc = cp.GetTimeCode();
+ const long long tc = cp.GetTimeCode();
- if (tp.m_block > 0)
- {
- const long block = static_cast<long>(tp.m_block);
- const long index = block - 1;
+ if (tp.m_block > 0) {
+ const long block = static_cast<long>(tp.m_block);
+ const long index = block - 1;
- while (index >= m_entries_count)
- {
- long long pos;
- long len;
+ while (index >= m_entries_count) {
+ long long pos;
+ long len;
- const long status = Parse(pos, len);
+ const long status = Parse(pos, len);
- if (status < 0) //TODO: can this happen?
- return NULL;
+ if (status < 0) // TODO: can this happen?
+ return NULL;
- if (status > 0) //nothing remains to be parsed
- return NULL;
- }
-
- const BlockEntry* const pEntry = m_entries[index];
- assert(pEntry);
- assert(!pEntry->EOS());
-
- const Block* const pBlock = pEntry->GetBlock();
- assert(pBlock);
-
- if ((pBlock->GetTrackNumber() == tp.m_track) &&
- (pBlock->GetTimeCode(this) == tc))
- {
- return pEntry;
- }
+ if (status > 0) // nothing remains to be parsed
+ return NULL;
}
- long index = 0;
+ const BlockEntry* const pEntry = m_entries[index];
+ assert(pEntry);
+ assert(!pEntry->EOS());
- for (;;)
- {
- if (index >= m_entries_count)
- {
- long long pos;
- long len;
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
- const long status = Parse(pos, len);
-
- if (status < 0) //TODO: can this happen?
- return NULL;
-
- if (status > 0) //nothing remains to be parsed
- return NULL;
-
- assert(m_entries);
- assert(index < m_entries_count);
- }
-
- const BlockEntry* const pEntry = m_entries[index];
- assert(pEntry);
- assert(!pEntry->EOS());
-
- const Block* const pBlock = pEntry->GetBlock();
- assert(pBlock);
-
- if (pBlock->GetTrackNumber() != tp.m_track)
- {
- ++index;
- continue;
- }
-
- const long long tc_ = pBlock->GetTimeCode(this);
-
- if (tc_ < tc)
- {
- ++index;
- continue;
- }
-
- if (tc_ > tc)
- return NULL;
-
- const Tracks* const pTracks = m_pSegment->GetTracks();
- assert(pTracks);
-
- const long tn = static_cast<long>(tp.m_track);
- const Track* const pTrack = pTracks->GetTrackByNumber(tn);
-
- if (pTrack == NULL)
- return NULL;
-
- const long long type = pTrack->GetType();
-
- if (type == 2) //audio
- return pEntry;
-
- if (type != 1) //not video
- return NULL;
-
- if (!pBlock->IsKey())
- return NULL;
-
- return pEntry;
+ if ((pBlock->GetTrackNumber() == tp.m_track) &&
+ (pBlock->GetTimeCode(this) == tc)) {
+ return pEntry;
}
+ }
+
+ long index = 0;
+
+ for (;;) {
+ if (index >= m_entries_count) {
+ long long pos;
+ long len;
+
+ const long status = Parse(pos, len);
+
+ if (status < 0) // TODO: can this happen?
+ return NULL;
+
+ if (status > 0) // nothing remains to be parsed
+ return NULL;
+
+ assert(m_entries);
+ assert(index < m_entries_count);
+ }
+
+ const BlockEntry* const pEntry = m_entries[index];
+ assert(pEntry);
+ assert(!pEntry->EOS());
+
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
+
+ if (pBlock->GetTrackNumber() != tp.m_track) {
+ ++index;
+ continue;
+ }
+
+ const long long tc_ = pBlock->GetTimeCode(this);
+
+ if (tc_ < tc) {
+ ++index;
+ continue;
+ }
+
+ if (tc_ > tc)
+ return NULL;
+
+ const Tracks* const pTracks = m_pSegment->GetTracks();
+ assert(pTracks);
+
+ const long tn = static_cast<long>(tp.m_track);
+ const Track* const pTrack = pTracks->GetTrackByNumber(tn);
+
+ if (pTrack == NULL)
+ return NULL;
+
+ const long long type = pTrack->GetType();
+
+ if (type == 2) // audio
+ return pEntry;
+
+ if (type != 1) // not video
+ return NULL;
+
+ if (!pBlock->IsKey())
+ return NULL;
+
+ return pEntry;
+ }
#endif
-
}
-
#if 0
const BlockEntry* Cluster::GetMaxKey(const VideoTrack* pTrack) const
{
@@ -9011,97 +7873,46 @@
}
#endif
+BlockEntry::BlockEntry(Cluster* p, long idx) : m_pCluster(p), m_index(idx) {}
-BlockEntry::BlockEntry(Cluster* p, long idx) :
- m_pCluster(p),
- m_index(idx)
-{
+BlockEntry::~BlockEntry() {}
+
+bool BlockEntry::EOS() const { return (GetKind() == kBlockEOS); }
+
+const Cluster* BlockEntry::GetCluster() const { return m_pCluster; }
+
+long BlockEntry::GetIndex() const { return m_index; }
+
+SimpleBlock::SimpleBlock(Cluster* pCluster, long idx, long long start,
+ long long size)
+ : BlockEntry(pCluster, idx), m_block(start, size, 0) {}
+
+long SimpleBlock::Parse() { return m_block.Parse(m_pCluster); }
+
+BlockEntry::Kind SimpleBlock::GetKind() const { return kBlockSimple; }
+
+const Block* SimpleBlock::GetBlock() const { return &m_block; }
+
+BlockGroup::BlockGroup(Cluster* pCluster, long idx, long long block_start,
+ long long block_size, long long prev, long long next,
+ long long duration, long long discard_padding)
+ : BlockEntry(pCluster, idx),
+ m_block(block_start, block_size, discard_padding),
+ m_prev(prev),
+ m_next(next),
+ m_duration(duration) {}
+
+long BlockGroup::Parse() {
+ const long status = m_block.Parse(m_pCluster);
+
+ if (status)
+ return status;
+
+ m_block.SetKey((m_prev > 0) && (m_next <= 0));
+
+ return 0;
}
-
-BlockEntry::~BlockEntry()
-{
-}
-
-
-bool BlockEntry::EOS() const
-{
- return (GetKind() == kBlockEOS);
-}
-
-
-const Cluster* BlockEntry::GetCluster() const
-{
- return m_pCluster;
-}
-
-
-long BlockEntry::GetIndex() const
-{
- return m_index;
-}
-
-
-SimpleBlock::SimpleBlock(
- Cluster* pCluster,
- long idx,
- long long start,
- long long size) :
- BlockEntry(pCluster, idx),
- m_block(start, size, 0)
-{
-}
-
-
-long SimpleBlock::Parse()
-{
- return m_block.Parse(m_pCluster);
-}
-
-
-BlockEntry::Kind SimpleBlock::GetKind() const
-{
- return kBlockSimple;
-}
-
-
-const Block* SimpleBlock::GetBlock() const
-{
- return &m_block;
-}
-
-
-BlockGroup::BlockGroup(
- Cluster* pCluster,
- long idx,
- long long block_start,
- long long block_size,
- long long prev,
- long long next,
- long long duration,
- long long discard_padding) :
- BlockEntry(pCluster, idx),
- m_block(block_start, block_size, discard_padding),
- m_prev(prev),
- m_next(next),
- m_duration(duration)
-{
-}
-
-
-long BlockGroup::Parse()
-{
- const long status = m_block.Parse(m_pCluster);
-
- if (status)
- return status;
-
- m_block.SetKey((m_prev > 0) && (m_next <= 0));
-
- return 0;
-}
-
-
#if 0
void BlockGroup::ParseBlock(long long start, long long size)
{
@@ -9118,496 +7929,428 @@
}
#endif
+BlockEntry::Kind BlockGroup::GetKind() const { return kBlockGroup; }
-BlockEntry::Kind BlockGroup::GetKind() const
-{
- return kBlockGroup;
-}
+const Block* BlockGroup::GetBlock() const { return &m_block; }
+long long BlockGroup::GetPrevTimeCode() const { return m_prev; }
-const Block* BlockGroup::GetBlock() const
-{
- return &m_block;
-}
+long long BlockGroup::GetNextTimeCode() const { return m_next; }
+long long BlockGroup::GetDurationTimeCode() const { return m_duration; }
-long long BlockGroup::GetPrevTimeCode() const
-{
- return m_prev;
-}
+Block::Block(long long start, long long size_, long long discard_padding)
+ : m_start(start),
+ m_size(size_),
+ m_track(0),
+ m_timecode(-1),
+ m_flags(0),
+ m_frames(NULL),
+ m_frame_count(-1),
+ m_discard_padding(discard_padding) {}
+Block::~Block() { delete[] m_frames; }
-long long BlockGroup::GetNextTimeCode() const
-{
- return m_next;
-}
+long Block::Parse(const Cluster* pCluster) {
+ if (pCluster == NULL)
+ return -1;
-long long BlockGroup::GetDurationTimeCode() const
-{
- return m_duration;
-}
+ if (pCluster->m_pSegment == NULL)
+ return -1;
-Block::Block(long long start, long long size_, long long discard_padding) :
- m_start(start),
- m_size(size_),
- m_track(0),
- m_timecode(-1),
- m_flags(0),
- m_frames(NULL),
- m_frame_count(-1),
- m_discard_padding(discard_padding)
-{
-}
+ assert(m_start >= 0);
+ assert(m_size >= 0);
+ assert(m_track <= 0);
+ assert(m_frames == NULL);
+ assert(m_frame_count <= 0);
+ long long pos = m_start;
+ const long long stop = m_start + m_size;
-Block::~Block()
-{
- delete[] m_frames;
-}
+ long len;
+ IMkvReader* const pReader = pCluster->m_pSegment->m_pReader;
-long Block::Parse(const Cluster* pCluster)
-{
- if (pCluster == NULL)
- return -1;
+ m_track = ReadUInt(pReader, pos, len);
- if (pCluster->m_pSegment == NULL)
- return -1;
+ if (m_track <= 0)
+ return E_FILE_FORMAT_INVALID;
- assert(m_start >= 0);
- assert(m_size >= 0);
- assert(m_track <= 0);
- assert(m_frames == NULL);
- assert(m_frame_count <= 0);
+ if ((pos + len) > stop)
+ return E_FILE_FORMAT_INVALID;
- long long pos = m_start;
- const long long stop = m_start + m_size;
+ pos += len; // consume track number
- long len;
+ if ((stop - pos) < 2)
+ return E_FILE_FORMAT_INVALID;
- IMkvReader* const pReader = pCluster->m_pSegment->m_pReader;
+ long status;
+ long long value;
- m_track = ReadUInt(pReader, pos, len);
+ status = UnserializeInt(pReader, pos, 2, value);
- if (m_track <= 0)
- return E_FILE_FORMAT_INVALID;
+ if (status)
+ return E_FILE_FORMAT_INVALID;
- if ((pos + len) > stop)
- return E_FILE_FORMAT_INVALID;
+ if (value < SHRT_MIN)
+ return E_FILE_FORMAT_INVALID;
- pos += len; //consume track number
+ if (value > SHRT_MAX)
+ return E_FILE_FORMAT_INVALID;
- if ((stop - pos) < 2)
- return E_FILE_FORMAT_INVALID;
+ m_timecode = static_cast<short>(value);
- long status;
- long long value;
+ pos += 2;
- status = UnserializeInt(pReader, pos, 2, value);
+ if ((stop - pos) <= 0)
+ return E_FILE_FORMAT_INVALID;
- if (status)
- return E_FILE_FORMAT_INVALID;
+ status = pReader->Read(pos, 1, &m_flags);
- if (value < SHRT_MIN)
- return E_FILE_FORMAT_INVALID;
+ if (status)
+ return E_FILE_FORMAT_INVALID;
- if (value > SHRT_MAX)
- return E_FILE_FORMAT_INVALID;
+ const int lacing = int(m_flags & 0x06) >> 1;
- m_timecode = static_cast<short>(value);
+ ++pos; // consume flags byte
- pos += 2;
+ if (lacing == 0) { // no lacing
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
- if ((stop - pos) <= 0)
- return E_FILE_FORMAT_INVALID;
-
- status = pReader->Read(pos, 1, &m_flags);
-
- if (status)
- return E_FILE_FORMAT_INVALID;
-
- const int lacing = int(m_flags & 0x06) >> 1;
-
- ++pos; //consume flags byte
-
- if (lacing == 0) //no lacing
- {
- if (pos > stop)
- return E_FILE_FORMAT_INVALID;
-
- m_frame_count = 1;
- m_frames = new Frame[m_frame_count];
-
- Frame& f = m_frames[0];
- f.pos = pos;
-
- const long long frame_size = stop - pos;
-
- if (frame_size > LONG_MAX)
- return E_FILE_FORMAT_INVALID;
-
- f.len = static_cast<long>(frame_size);
-
- return 0; //success
- }
-
- if (pos >= stop)
- return E_FILE_FORMAT_INVALID;
-
- unsigned char biased_count;
-
- status = pReader->Read(pos, 1, &biased_count);
-
- if (status)
- return E_FILE_FORMAT_INVALID;
-
- ++pos; //consume frame count
- assert(pos <= stop);
-
- m_frame_count = int(biased_count) + 1;
-
+ m_frame_count = 1;
m_frames = new Frame[m_frame_count];
- assert(m_frames);
- if (lacing == 1) //Xiph
- {
- Frame* pf = m_frames;
- Frame* const pf_end = pf + m_frame_count;
+ Frame& f = m_frames[0];
+ f.pos = pos;
- long size = 0;
- int frame_count = m_frame_count;
+ const long long frame_size = stop - pos;
- while (frame_count > 1)
- {
- long frame_size = 0;
+ if (frame_size > LONG_MAX)
+ return E_FILE_FORMAT_INVALID;
- for (;;)
- {
- unsigned char val;
+ f.len = static_cast<long>(frame_size);
- if (pos >= stop)
- return E_FILE_FORMAT_INVALID;
+ return 0; // success
+ }
- status = pReader->Read(pos, 1, &val);
+ if (pos >= stop)
+ return E_FILE_FORMAT_INVALID;
- if (status)
- return E_FILE_FORMAT_INVALID;
+ unsigned char biased_count;
- ++pos; //consume xiph size byte
+ status = pReader->Read(pos, 1, &biased_count);
- frame_size += val;
+ if (status)
+ return E_FILE_FORMAT_INVALID;
- if (val < 255)
- break;
- }
+ ++pos; // consume frame count
+ assert(pos <= stop);
- Frame& f = *pf++;
- assert(pf < pf_end);
+ m_frame_count = int(biased_count) + 1;
- f.pos = 0; //patch later
+ m_frames = new Frame[m_frame_count];
+ assert(m_frames);
- f.len = frame_size;
- size += frame_size; //contribution of this frame
+ if (lacing == 1) { // Xiph
+ Frame* pf = m_frames;
+ Frame* const pf_end = pf + m_frame_count;
- --frame_count;
- }
+ long size = 0;
+ int frame_count = m_frame_count;
- assert(pf < pf_end);
- assert(pos <= stop);
+ while (frame_count > 1) {
+ long frame_size = 0;
- {
- Frame& f = *pf++;
-
- if (pf != pf_end)
- return E_FILE_FORMAT_INVALID;
-
- f.pos = 0; //patch later
-
- const long long total_size = stop - pos;
-
- if (total_size < size)
- return E_FILE_FORMAT_INVALID;
-
- const long long frame_size = total_size - size;
-
- if (frame_size > LONG_MAX)
- return E_FILE_FORMAT_INVALID;
-
- f.len = static_cast<long>(frame_size);
- }
-
- pf = m_frames;
- while (pf != pf_end)
- {
- Frame& f = *pf++;
- assert((pos + f.len) <= stop);
-
- f.pos = pos;
- pos += f.len;
- }
-
- assert(pos == stop);
- }
- else if (lacing == 2) //fixed-size lacing
- {
- const long long total_size = stop - pos;
-
- if ((total_size % m_frame_count) != 0)
- return E_FILE_FORMAT_INVALID;
-
- const long long frame_size = total_size / m_frame_count;
-
- if (frame_size > LONG_MAX)
- return E_FILE_FORMAT_INVALID;
-
- Frame* pf = m_frames;
- Frame* const pf_end = pf + m_frame_count;
-
- while (pf != pf_end)
- {
- assert((pos + frame_size) <= stop);
-
- Frame& f = *pf++;
-
- f.pos = pos;
- f.len = static_cast<long>(frame_size);
-
- pos += frame_size;
- }
-
- assert(pos == stop);
- }
- else
- {
- assert(lacing == 3); //EBML lacing
+ for (;;) {
+ unsigned char val;
if (pos >= stop)
- return E_FILE_FORMAT_INVALID;
+ return E_FILE_FORMAT_INVALID;
- long size = 0;
- int frame_count = m_frame_count;
+ status = pReader->Read(pos, 1, &val);
- long long frame_size = ReadUInt(pReader, pos, len);
+ if (status)
+ return E_FILE_FORMAT_INVALID;
- if (frame_size < 0)
- return E_FILE_FORMAT_INVALID;
+ ++pos; // consume xiph size byte
- if (frame_size > LONG_MAX)
- return E_FILE_FORMAT_INVALID;
+ frame_size += val;
- if ((pos + len) > stop)
- return E_FILE_FORMAT_INVALID;
+ if (val < 255)
+ break;
+ }
- pos += len; //consume length of size of first frame
+ Frame& f = *pf++;
+ assert(pf < pf_end);
- if ((pos + frame_size) > stop)
- return E_FILE_FORMAT_INVALID;
+ f.pos = 0; // patch later
- Frame* pf = m_frames;
- Frame* const pf_end = pf + m_frame_count;
+ f.len = frame_size;
+ size += frame_size; // contribution of this frame
- {
- Frame& curr = *pf;
-
- curr.pos = 0; //patch later
-
- curr.len = static_cast<long>(frame_size);
- size += curr.len; //contribution of this frame
- }
-
- --frame_count;
-
- while (frame_count > 1)
- {
- if (pos >= stop)
- return E_FILE_FORMAT_INVALID;
-
- assert(pf < pf_end);
-
- const Frame& prev = *pf++;
- assert(prev.len == frame_size);
- if (prev.len != frame_size)
- return E_FILE_FORMAT_INVALID;
-
- assert(pf < pf_end);
-
- Frame& curr = *pf;
-
- curr.pos = 0; //patch later
-
- const long long delta_size_ = ReadUInt(pReader, pos, len);
-
- if (delta_size_ < 0)
- return E_FILE_FORMAT_INVALID;
-
- if ((pos + len) > stop)
- return E_FILE_FORMAT_INVALID;
-
- pos += len; //consume length of (delta) size
- assert(pos <= stop);
-
- const int exp = 7*len - 1;
- const long long bias = (1LL << exp) - 1LL;
- const long long delta_size = delta_size_ - bias;
-
- frame_size += delta_size;
-
- if (frame_size < 0)
- return E_FILE_FORMAT_INVALID;
-
- if (frame_size > LONG_MAX)
- return E_FILE_FORMAT_INVALID;
-
- curr.len = static_cast<long>(frame_size);
- size += curr.len; //contribution of this frame
-
- --frame_count;
- }
-
- {
- assert(pos <= stop);
- assert(pf < pf_end);
-
- const Frame& prev = *pf++;
- assert(prev.len == frame_size);
- if (prev.len != frame_size)
- return E_FILE_FORMAT_INVALID;
-
- assert(pf < pf_end);
-
- Frame& curr = *pf++;
- assert(pf == pf_end);
-
- curr.pos = 0; //patch later
-
- const long long total_size = stop - pos;
-
- if (total_size < size)
- return E_FILE_FORMAT_INVALID;
-
- frame_size = total_size - size;
-
- if (frame_size > LONG_MAX)
- return E_FILE_FORMAT_INVALID;
-
- curr.len = static_cast<long>(frame_size);
- }
-
- pf = m_frames;
- while (pf != pf_end)
- {
- Frame& f = *pf++;
- assert((pos + f.len) <= stop);
-
- f.pos = pos;
- pos += f.len;
- }
-
- assert(pos == stop);
+ --frame_count;
}
- return 0; //success
+ assert(pf < pf_end);
+ assert(pos <= stop);
+
+ {
+ Frame& f = *pf++;
+
+ if (pf != pf_end)
+ return E_FILE_FORMAT_INVALID;
+
+ f.pos = 0; // patch later
+
+ const long long total_size = stop - pos;
+
+ if (total_size < size)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long frame_size = total_size - size;
+
+ if (frame_size > LONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ f.len = static_cast<long>(frame_size);
+ }
+
+ pf = m_frames;
+ while (pf != pf_end) {
+ Frame& f = *pf++;
+ assert((pos + f.len) <= stop);
+
+ f.pos = pos;
+ pos += f.len;
+ }
+
+ assert(pos == stop);
+ } else if (lacing == 2) { // fixed-size lacing
+ const long long total_size = stop - pos;
+
+ if ((total_size % m_frame_count) != 0)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long frame_size = total_size / m_frame_count;
+
+ if (frame_size > LONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ Frame* pf = m_frames;
+ Frame* const pf_end = pf + m_frame_count;
+
+ while (pf != pf_end) {
+ assert((pos + frame_size) <= stop);
+
+ Frame& f = *pf++;
+
+ f.pos = pos;
+ f.len = static_cast<long>(frame_size);
+
+ pos += frame_size;
+ }
+
+ assert(pos == stop);
+ } else {
+ assert(lacing == 3); // EBML lacing
+
+ if (pos >= stop)
+ return E_FILE_FORMAT_INVALID;
+
+ long size = 0;
+ int frame_count = m_frame_count;
+
+ long long frame_size = ReadUInt(pReader, pos, len);
+
+ if (frame_size < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (frame_size > LONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume length of size of first frame
+
+ if ((pos + frame_size) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ Frame* pf = m_frames;
+ Frame* const pf_end = pf + m_frame_count;
+
+ {
+ Frame& curr = *pf;
+
+ curr.pos = 0; // patch later
+
+ curr.len = static_cast<long>(frame_size);
+ size += curr.len; // contribution of this frame
+ }
+
+ --frame_count;
+
+ while (frame_count > 1) {
+ if (pos >= stop)
+ return E_FILE_FORMAT_INVALID;
+
+ assert(pf < pf_end);
+
+ const Frame& prev = *pf++;
+ assert(prev.len == frame_size);
+ if (prev.len != frame_size)
+ return E_FILE_FORMAT_INVALID;
+
+ assert(pf < pf_end);
+
+ Frame& curr = *pf;
+
+ curr.pos = 0; // patch later
+
+ const long long delta_size_ = ReadUInt(pReader, pos, len);
+
+ if (delta_size_ < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume length of (delta) size
+ assert(pos <= stop);
+
+ const int exp = 7 * len - 1;
+ const long long bias = (1LL << exp) - 1LL;
+ const long long delta_size = delta_size_ - bias;
+
+ frame_size += delta_size;
+
+ if (frame_size < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (frame_size > LONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ curr.len = static_cast<long>(frame_size);
+ size += curr.len; // contribution of this frame
+
+ --frame_count;
+ }
+
+ {
+ assert(pos <= stop);
+ assert(pf < pf_end);
+
+ const Frame& prev = *pf++;
+ assert(prev.len == frame_size);
+ if (prev.len != frame_size)
+ return E_FILE_FORMAT_INVALID;
+
+ assert(pf < pf_end);
+
+ Frame& curr = *pf++;
+ assert(pf == pf_end);
+
+ curr.pos = 0; // patch later
+
+ const long long total_size = stop - pos;
+
+ if (total_size < size)
+ return E_FILE_FORMAT_INVALID;
+
+ frame_size = total_size - size;
+
+ if (frame_size > LONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ curr.len = static_cast<long>(frame_size);
+ }
+
+ pf = m_frames;
+ while (pf != pf_end) {
+ Frame& f = *pf++;
+ assert((pos + f.len) <= stop);
+
+ f.pos = pos;
+ pos += f.len;
+ }
+
+ assert(pos == stop);
+ }
+
+ return 0; // success
}
+long long Block::GetTimeCode(const Cluster* pCluster) const {
+ if (pCluster == 0)
+ return m_timecode;
-long long Block::GetTimeCode(const Cluster* pCluster) const
-{
- if (pCluster == 0)
- return m_timecode;
+ const long long tc0 = pCluster->GetTimeCode();
+ assert(tc0 >= 0);
- const long long tc0 = pCluster->GetTimeCode();
- assert(tc0 >= 0);
+ const long long tc = tc0 + m_timecode;
- const long long tc = tc0 + m_timecode;
-
- return tc; //unscaled timecode units
+ return tc; // unscaled timecode units
}
+long long Block::GetTime(const Cluster* pCluster) const {
+ assert(pCluster);
-long long Block::GetTime(const Cluster* pCluster) const
-{
- assert(pCluster);
+ const long long tc = GetTimeCode(pCluster);
- const long long tc = GetTimeCode(pCluster);
+ const Segment* const pSegment = pCluster->m_pSegment;
+ const SegmentInfo* const pInfo = pSegment->GetInfo();
+ assert(pInfo);
- const Segment* const pSegment = pCluster->m_pSegment;
- const SegmentInfo* const pInfo = pSegment->GetInfo();
- assert(pInfo);
+ const long long scale = pInfo->GetTimeCodeScale();
+ assert(scale >= 1);
- const long long scale = pInfo->GetTimeCodeScale();
- assert(scale >= 1);
+ const long long ns = tc * scale;
- const long long ns = tc * scale;
-
- return ns;
+ return ns;
}
+long long Block::GetTrackNumber() const { return m_track; }
-long long Block::GetTrackNumber() const
-{
- return m_track;
+bool Block::IsKey() const {
+ return ((m_flags & static_cast<unsigned char>(1 << 7)) != 0);
}
-
-bool Block::IsKey() const
-{
- return ((m_flags & static_cast<unsigned char>(1 << 7)) != 0);
+void Block::SetKey(bool bKey) {
+ if (bKey)
+ m_flags |= static_cast<unsigned char>(1 << 7);
+ else
+ m_flags &= 0x7F;
}
+bool Block::IsInvisible() const { return bool(int(m_flags & 0x08) != 0); }
-void Block::SetKey(bool bKey)
-{
- if (bKey)
- m_flags |= static_cast<unsigned char>(1 << 7);
- else
- m_flags &= 0x7F;
+Block::Lacing Block::GetLacing() const {
+ const int value = int(m_flags & 0x06) >> 1;
+ return static_cast<Lacing>(value);
}
+int Block::GetFrameCount() const { return m_frame_count; }
-bool Block::IsInvisible() const
-{
- return bool(int(m_flags & 0x08) != 0);
+const Block::Frame& Block::GetFrame(int idx) const {
+ assert(idx >= 0);
+ assert(idx < m_frame_count);
+
+ const Frame& f = m_frames[idx];
+ assert(f.pos > 0);
+ assert(f.len > 0);
+
+ return f;
}
+long Block::Frame::Read(IMkvReader* pReader, unsigned char* buf) const {
+ assert(pReader);
+ assert(buf);
-Block::Lacing Block::GetLacing() const
-{
- const int value = int(m_flags & 0x06) >> 1;
- return static_cast<Lacing>(value);
+ const long status = pReader->Read(pos, len, buf);
+ return status;
}
+long long Block::GetDiscardPadding() const { return m_discard_padding; }
-int Block::GetFrameCount() const
-{
- return m_frame_count;
-}
-
-
-const Block::Frame& Block::GetFrame(int idx) const
-{
- assert(idx >= 0);
- assert(idx < m_frame_count);
-
- const Frame& f = m_frames[idx];
- assert(f.pos > 0);
- assert(f.len > 0);
-
- return f;
-}
-
-
-long Block::Frame::Read(IMkvReader* pReader, unsigned char* buf) const
-{
- assert(pReader);
- assert(buf);
-
- const long status = pReader->Read(pos, len, buf);
- return status;
-}
-
-long long Block::GetDiscardPadding() const
-{
- return m_discard_padding;
-}
-
-} //end namespace mkvparser
+} // end namespace mkvparser
|
CWE-119
|
{
Track** i = m_trackEntries;
Track** const j = m_trackEntriesEnd;
while (i != j)
{
Track* const pTrack = *i++;
delete pTrack;
}
delete[] m_trackEntries;
}
| null |
150,801 |
int main(int argc, char **argv) {
int frame_cnt = 0;
FILE *outfile = NULL;
vpx_codec_ctx_t codec;
VpxVideoReader *reader = NULL;
const VpxVideoInfo *info = NULL;
const VpxInterface *decoder = NULL;
exec_name = argv[0];
if (argc != 3)
die("Invalid number of arguments.");
reader = vpx_video_reader_open(argv[1]);
if (!reader)
die("Failed to open %s for reading.", argv[1]);
if (!(outfile = fopen(argv[2], "wb")))
die("Failed to open %s for writing.", argv[2]);
info = vpx_video_reader_get_info(reader);
decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
if (!decoder)
die("Unknown input codec.");
printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder");
while (vpx_video_reader_read_frame(reader)) {
vpx_codec_iter_t iter = NULL;
vpx_image_t *img = NULL;
size_t frame_size = 0;
const unsigned char *frame = vpx_video_reader_get_frame(reader,
&frame_size);
if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
die_codec(&codec, "Failed to decode frame");
while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL) {
unsigned char digest[16];
get_image_md5(img, digest);
print_md5(outfile, digest);
fprintf(outfile, " img-%dx%d-%04d.i420\n",
img->d_w, img->d_h, ++frame_cnt);
}
}
printf("Processed %d frames.\n", frame_cnt);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_reader_close(reader);
fclose(outfile);
return EXIT_SUCCESS;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv) {
int frame_cnt = 0;
FILE *outfile = NULL;
vpx_codec_ctx_t codec;
VpxVideoReader *reader = NULL;
const VpxVideoInfo *info = NULL;
const VpxInterface *decoder = NULL;
exec_name = argv[0];
if (argc != 3)
die("Invalid number of arguments.");
reader = vpx_video_reader_open(argv[1]);
if (!reader)
die("Failed to open %s for reading.", argv[1]);
if (!(outfile = fopen(argv[2], "wb")))
die("Failed to open %s for writing.", argv[2]);
info = vpx_video_reader_get_info(reader);
decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
if (!decoder)
die("Unknown input codec.");
printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder");
while (vpx_video_reader_read_frame(reader)) {
vpx_codec_iter_t iter = NULL;
vpx_image_t *img = NULL;
size_t frame_size = 0;
const unsigned char *frame = vpx_video_reader_get_frame(reader,
&frame_size);
if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
die_codec(&codec, "Failed to decode frame");
while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL) {
unsigned char digest[16];
get_image_md5(img, digest);
print_md5(outfile, digest);
fprintf(outfile, " img-%dx%d-%04d.i420\n",
img->d_w, img->d_h, ++frame_cnt);
}
}
printf("Processed %d frames.\n", frame_cnt);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_reader_close(reader);
fclose(outfile);
return EXIT_SUCCESS;
}
|
@@ -33,14 +33,12 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
-
#include "vpx/vp8dx.h"
#include "vpx/vpx_decoder.h"
-#include "./md5_utils.h"
-#include "./tools_common.h"
-#include "./video_reader.h"
+#include "../md5_utils.h"
+#include "../tools_common.h"
+#include "../video_reader.h"
#include "./vpx_config.h"
static void get_image_md5(const vpx_image_t *img, unsigned char digest[16]) {
@@ -73,7 +71,7 @@
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <infile> <outfile>\n", exec_name);
exit(EXIT_FAILURE);
}
@@ -104,9 +102,9 @@
if (!decoder)
die("Unknown input codec.");
- printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
- if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
+ if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder");
while (vpx_video_reader_read_frame(reader)) {
|
CWE-119
|
printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
|
printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
|
150,802 |
int main(int argc, char **argv) {
int frame_cnt = 0;
FILE *outfile = NULL;
vpx_codec_ctx_t codec;
const VpxInterface *decoder = NULL;
VpxVideoReader *reader = NULL;
const VpxVideoInfo *info = NULL;
int n = 0;
int m = 0;
int is_range = 0;
char *nptr = NULL;
exec_name = argv[0];
if (argc != 4)
die("Invalid number of arguments.");
reader = vpx_video_reader_open(argv[1]);
if (!reader)
die("Failed to open %s for reading.", argv[1]);
if (!(outfile = fopen(argv[2], "wb")))
die("Failed to open %s for writing.", argv[2]);
n = strtol(argv[3], &nptr, 0);
m = strtol(nptr + 1, NULL, 0);
is_range = (*nptr == '-');
if (!n || !m || (*nptr != '-' && *nptr != '/'))
die("Couldn't parse pattern %s.\n", argv[3]);
info = vpx_video_reader_get_info(reader);
decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
if (!decoder)
die("Unknown input codec.");
printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder.");
while (vpx_video_reader_read_frame(reader)) {
vpx_codec_iter_t iter = NULL;
vpx_image_t *img = NULL;
size_t frame_size = 0;
int skip;
const unsigned char *frame = vpx_video_reader_get_frame(reader,
&frame_size);
if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
die_codec(&codec, "Failed to decode frame.");
++frame_cnt;
skip = (is_range && frame_cnt >= n && frame_cnt <= m) ||
(!is_range && m - (frame_cnt - 1) % m <= n);
if (!skip) {
putc('.', stdout);
while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL)
vpx_img_write(img, outfile);
} else {
putc('X', stdout);
}
fflush(stdout);
}
printf("Processed %d frames.\n", frame_cnt);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n",
info->frame_width, info->frame_height, argv[2]);
vpx_video_reader_close(reader);
fclose(outfile);
return EXIT_SUCCESS;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv) {
int frame_cnt = 0;
FILE *outfile = NULL;
vpx_codec_ctx_t codec;
const VpxInterface *decoder = NULL;
VpxVideoReader *reader = NULL;
const VpxVideoInfo *info = NULL;
int n = 0;
int m = 0;
int is_range = 0;
char *nptr = NULL;
exec_name = argv[0];
if (argc != 4)
die("Invalid number of arguments.");
reader = vpx_video_reader_open(argv[1]);
if (!reader)
die("Failed to open %s for reading.", argv[1]);
if (!(outfile = fopen(argv[2], "wb")))
die("Failed to open %s for writing.", argv[2]);
n = strtol(argv[3], &nptr, 0);
m = strtol(nptr + 1, NULL, 0);
is_range = (*nptr == '-');
if (!n || !m || (*nptr != '-' && *nptr != '/'))
die("Couldn't parse pattern %s.\n", argv[3]);
info = vpx_video_reader_get_info(reader);
decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
if (!decoder)
die("Unknown input codec.");
printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder.");
while (vpx_video_reader_read_frame(reader)) {
vpx_codec_iter_t iter = NULL;
vpx_image_t *img = NULL;
size_t frame_size = 0;
int skip;
const unsigned char *frame = vpx_video_reader_get_frame(reader,
&frame_size);
if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
die_codec(&codec, "Failed to decode frame.");
++frame_cnt;
skip = (is_range && frame_cnt >= n && frame_cnt <= m) ||
(!is_range && m - (frame_cnt - 1) % m <= n);
if (!skip) {
putc('.', stdout);
while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL)
vpx_img_write(img, outfile);
} else {
putc('X', stdout);
}
fflush(stdout);
}
printf("Processed %d frames.\n", frame_cnt);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n",
info->frame_width, info->frame_height, argv[2]);
vpx_video_reader_close(reader);
fclose(outfile);
return EXIT_SUCCESS;
}
|
@@ -56,18 +56,16 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
-
#include "vpx/vp8dx.h"
#include "vpx/vpx_decoder.h"
-#include "./tools_common.h"
-#include "./video_reader.h"
+#include "../tools_common.h"
+#include "../video_reader.h"
#include "./vpx_config.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <infile> <outfile> <N-M|N/M>\n", exec_name);
exit(EXIT_FAILURE);
}
@@ -108,9 +106,9 @@
if (!decoder)
die("Unknown input codec.");
- printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
- if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
+ if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder.");
while (vpx_video_reader_read_frame(reader)) {
|
CWE-119
|
printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
|
printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
|
150,803 |
void usage_exit() {
fprintf(stderr, "Usage: %s <infile> <outfile> <N-M|N/M>\n", exec_name);
exit(EXIT_FAILURE);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void usage_exit() {
void usage_exit(void) {
fprintf(stderr, "Usage: %s <infile> <outfile> <N-M|N/M>\n", exec_name);
exit(EXIT_FAILURE);
}
|
@@ -56,18 +56,16 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
-
#include "vpx/vp8dx.h"
#include "vpx/vpx_decoder.h"
-#include "./tools_common.h"
-#include "./video_reader.h"
+#include "../tools_common.h"
+#include "../video_reader.h"
#include "./vpx_config.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <infile> <outfile> <N-M|N/M>\n", exec_name);
exit(EXIT_FAILURE);
}
@@ -108,9 +106,9 @@
if (!decoder)
die("Unknown input codec.");
- printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
- if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
+ if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder.");
while (vpx_video_reader_read_frame(reader)) {
|
CWE-119
| null |
void usage_exit(void) {
|
150,804 |
int main(int argc, char **argv) {
int frame_cnt = 0;
FILE *outfile = NULL;
vpx_codec_ctx_t codec;
vpx_codec_err_t res;
VpxVideoReader *reader = NULL;
const VpxInterface *decoder = NULL;
const VpxVideoInfo *info = NULL;
exec_name = argv[0];
if (argc != 3)
die("Invalid number of arguments.");
reader = vpx_video_reader_open(argv[1]);
if (!reader)
die("Failed to open %s for reading.", argv[1]);
if (!(outfile = fopen(argv[2], "wb")))
die("Failed to open %s for writing", argv[2]);
info = vpx_video_reader_get_info(reader);
decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
if (!decoder)
die("Unknown input codec.");
printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
res = vpx_codec_dec_init(&codec, decoder->interface(), NULL,
VPX_CODEC_USE_POSTPROC);
if (res == VPX_CODEC_INCAPABLE)
die_codec(&codec, "Postproc not supported by this decoder.");
if (res)
die_codec(&codec, "Failed to initialize decoder.");
while (vpx_video_reader_read_frame(reader)) {
vpx_codec_iter_t iter = NULL;
vpx_image_t *img = NULL;
size_t frame_size = 0;
const unsigned char *frame = vpx_video_reader_get_frame(reader,
&frame_size);
++frame_cnt;
if (frame_cnt % 30 == 1) {
vp8_postproc_cfg_t pp = {0, 0, 0};
if (vpx_codec_control(&codec, VP8_SET_POSTPROC, &pp))
die_codec(&codec, "Failed to turn off postproc.");
} else if (frame_cnt % 30 == 16) {
vp8_postproc_cfg_t pp = {VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE,
4, 0};
if (vpx_codec_control(&codec, VP8_SET_POSTPROC, &pp))
die_codec(&codec, "Failed to turn on postproc.");
};
if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 15000))
die_codec(&codec, "Failed to decode frame");
while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL) {
vpx_img_write(img, outfile);
}
}
printf("Processed %d frames.\n", frame_cnt);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec");
printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n",
info->frame_width, info->frame_height, argv[2]);
vpx_video_reader_close(reader);
fclose(outfile);
return EXIT_SUCCESS;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv) {
int frame_cnt = 0;
FILE *outfile = NULL;
vpx_codec_ctx_t codec;
vpx_codec_err_t res;
VpxVideoReader *reader = NULL;
const VpxInterface *decoder = NULL;
const VpxVideoInfo *info = NULL;
exec_name = argv[0];
if (argc != 3)
die("Invalid number of arguments.");
reader = vpx_video_reader_open(argv[1]);
if (!reader)
die("Failed to open %s for reading.", argv[1]);
if (!(outfile = fopen(argv[2], "wb")))
die("Failed to open %s for writing", argv[2]);
info = vpx_video_reader_get_info(reader);
decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
if (!decoder)
die("Unknown input codec.");
printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
res = vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL,
VPX_CODEC_USE_POSTPROC);
if (res == VPX_CODEC_INCAPABLE)
die_codec(&codec, "Postproc not supported by this decoder.");
if (res)
die_codec(&codec, "Failed to initialize decoder.");
while (vpx_video_reader_read_frame(reader)) {
vpx_codec_iter_t iter = NULL;
vpx_image_t *img = NULL;
size_t frame_size = 0;
const unsigned char *frame = vpx_video_reader_get_frame(reader,
&frame_size);
++frame_cnt;
if (frame_cnt % 30 == 1) {
vp8_postproc_cfg_t pp = {0, 0, 0};
if (vpx_codec_control(&codec, VP8_SET_POSTPROC, &pp))
die_codec(&codec, "Failed to turn off postproc.");
} else if (frame_cnt % 30 == 16) {
vp8_postproc_cfg_t pp = {VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE,
4, 0};
if (vpx_codec_control(&codec, VP8_SET_POSTPROC, &pp))
die_codec(&codec, "Failed to turn on postproc.");
};
if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 15000))
die_codec(&codec, "Failed to decode frame");
while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL) {
vpx_img_write(img, outfile);
}
}
printf("Processed %d frames.\n", frame_cnt);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec");
printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n",
info->frame_width, info->frame_height, argv[2]);
vpx_video_reader_close(reader);
fclose(outfile);
return EXIT_SUCCESS;
}
|
@@ -43,18 +43,16 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
-
#include "vpx/vp8dx.h"
#include "vpx/vpx_decoder.h"
-#include "./tools_common.h"
-#include "./video_reader.h"
+#include "../tools_common.h"
+#include "../video_reader.h"
#include "./vpx_config.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <infile> <outfile>\n", exec_name);
exit(EXIT_FAILURE);
}
@@ -86,9 +84,9 @@
if (!decoder)
die("Unknown input codec.");
- printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
- res = vpx_codec_dec_init(&codec, decoder->interface(), NULL,
+ res = vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL,
VPX_CODEC_USE_POSTPROC);
if (res == VPX_CODEC_INCAPABLE)
die_codec(&codec, "Postproc not supported by this decoder.");
|
CWE-119
|
printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
res = vpx_codec_dec_init(&codec, decoder->interface(), NULL,
|
printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
res = vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL,
|
150,805 |
int main(int argc, char *argv[]) {
char *fin, *fout;
FILE *fpin, *fpout;
uint8_t *inbuf, *outbuf;
uint8_t *inbuf_u, *outbuf_u;
uint8_t *inbuf_v, *outbuf_v;
int f, frames;
int width, height, target_width, target_height;
if (argc < 5) {
printf("Incorrect parameters:\n");
usage(argv[0]);
return 1;
}
fin = argv[1];
fout = argv[4];
if (!parse_dim(argv[2], &width, &height)) {
printf("Incorrect parameters: %s\n", argv[2]);
usage(argv[0]);
return 1;
}
if (!parse_dim(argv[3], &target_width, &target_height)) {
printf("Incorrect parameters: %s\n", argv[3]);
usage(argv[0]);
return 1;
}
fpin = fopen(fin, "rb");
if (fpin == NULL) {
printf("Can't open file %s to read\n", fin);
usage(argv[0]);
return 1;
}
fpout = fopen(fout, "wb");
if (fpout == NULL) {
printf("Can't open file %s to write\n", fout);
usage(argv[0]);
return 1;
}
if (argc >= 6)
frames = atoi(argv[5]);
else
frames = INT_MAX;
printf("Input size: %dx%d\n",
width, height);
printf("Target size: %dx%d, Frames: ",
target_width, target_height);
if (frames == INT_MAX)
printf("All\n");
else
printf("%d\n", frames);
inbuf = (uint8_t*)malloc(width * height * 3 / 2);
outbuf = (uint8_t*)malloc(target_width * target_height * 3 / 2);
inbuf_u = inbuf + width * height;
inbuf_v = inbuf_u + width * height / 4;
outbuf_u = outbuf + target_width * target_height;
outbuf_v = outbuf_u + target_width * target_height / 4;
f = 0;
while (f < frames) {
if (fread(inbuf, width * height * 3 / 2, 1, fpin) != 1)
break;
vp9_resize_frame420(inbuf, width, inbuf_u, inbuf_v, width / 2,
height, width,
outbuf, target_width, outbuf_u, outbuf_v,
target_width / 2,
target_height, target_width);
fwrite(outbuf, target_width * target_height * 3 / 2, 1, fpout);
f++;
}
printf("%d frames processed\n", f);
fclose(fpin);
fclose(fpout);
free(inbuf);
free(outbuf);
return 0;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char *argv[]) {
char *fin, *fout;
FILE *fpin, *fpout;
uint8_t *inbuf, *outbuf;
uint8_t *inbuf_u, *outbuf_u;
uint8_t *inbuf_v, *outbuf_v;
int f, frames;
int width, height, target_width, target_height;
exec_name = argv[0];
if (argc < 5) {
printf("Incorrect parameters:\n");
usage();
return 1;
}
fin = argv[1];
fout = argv[4];
if (!parse_dim(argv[2], &width, &height)) {
printf("Incorrect parameters: %s\n", argv[2]);
usage();
return 1;
}
if (!parse_dim(argv[3], &target_width, &target_height)) {
printf("Incorrect parameters: %s\n", argv[3]);
usage();
return 1;
}
fpin = fopen(fin, "rb");
if (fpin == NULL) {
printf("Can't open file %s to read\n", fin);
usage();
return 1;
}
fpout = fopen(fout, "wb");
if (fpout == NULL) {
printf("Can't open file %s to write\n", fout);
usage();
return 1;
}
if (argc >= 6)
frames = atoi(argv[5]);
else
frames = INT_MAX;
printf("Input size: %dx%d\n",
width, height);
printf("Target size: %dx%d, Frames: ",
target_width, target_height);
if (frames == INT_MAX)
printf("All\n");
else
printf("%d\n", frames);
inbuf = (uint8_t*)malloc(width * height * 3 / 2);
outbuf = (uint8_t*)malloc(target_width * target_height * 3 / 2);
inbuf_u = inbuf + width * height;
inbuf_v = inbuf_u + width * height / 4;
outbuf_u = outbuf + target_width * target_height;
outbuf_v = outbuf_u + target_width * target_height / 4;
f = 0;
while (f < frames) {
if (fread(inbuf, width * height * 3 / 2, 1, fpin) != 1)
break;
vp9_resize_frame420(inbuf, width, inbuf_u, inbuf_v, width / 2,
height, width,
outbuf, target_width, outbuf_u, outbuf_v,
target_width / 2,
target_height, target_width);
fwrite(outbuf, target_width * target_height * 3 / 2, 1, fpout);
f++;
}
printf("%d frames processed\n", f);
fclose(fpin);
fclose(fpout);
free(inbuf);
free(outbuf);
return 0;
}
|
@@ -15,15 +15,23 @@
#include <stdlib.h>
#include <string.h>
-#include "./vp9/encoder/vp9_resize.h"
+#include "../tools_common.h"
+#include "../vp9/encoder/vp9_resize.h"
-static void usage(char *progname) {
+static const char *exec_name = NULL;
+
+static void usage() {
printf("Usage:\n");
printf("%s <input_yuv> <width>x<height> <target_width>x<target_height> ",
- progname);
+ exec_name);
printf("<output_yuv> [<frames>]\n");
}
+void usage_exit(void) {
+ usage();
+ exit(EXIT_FAILURE);
+}
+
static int parse_dim(char *v, int *width, int *height) {
char *x = strchr(v, 'x');
if (x == NULL)
@@ -47,9 +55,11 @@
int f, frames;
int width, height, target_width, target_height;
+ exec_name = argv[0];
+
if (argc < 5) {
printf("Incorrect parameters:\n");
- usage(argv[0]);
+ usage();
return 1;
}
@@ -57,25 +67,25 @@
fout = argv[4];
if (!parse_dim(argv[2], &width, &height)) {
printf("Incorrect parameters: %s\n", argv[2]);
- usage(argv[0]);
+ usage();
return 1;
}
if (!parse_dim(argv[3], &target_width, &target_height)) {
printf("Incorrect parameters: %s\n", argv[3]);
- usage(argv[0]);
+ usage();
return 1;
}
fpin = fopen(fin, "rb");
if (fpin == NULL) {
printf("Can't open file %s to read\n", fin);
- usage(argv[0]);
+ usage();
return 1;
}
fpout = fopen(fout, "wb");
if (fpout == NULL) {
printf("Can't open file %s to write\n", fout);
- usage(argv[0]);
+ usage();
return 1;
}
if (argc >= 6)
|
CWE-119
|
usage(argv[0]);
usage(argv[0]);
usage(argv[0]);
usage(argv[0]);
usage(argv[0]);
|
exec_name = argv[0];
usage();
usage();
usage();
usage();
usage();
|
150,806 |
int main(int argc, char **argv) {
FILE *infile = NULL;
vpx_codec_ctx_t codec = {0};
vpx_codec_enc_cfg_t cfg = {0};
int frame_count = 0;
vpx_image_t raw = {0};
vpx_codec_err_t res;
VpxVideoInfo info = {0};
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 2; // TODO(dkovalev) add command line argument
const double bits_per_pixel_per_frame = 0.067;
exec_name = argv[0];
if (argc != 6)
die("Invalid number of arguments");
encoder = get_vpx_encoder_by_name(argv[1]);
if (!encoder)
die("Unsupported codec.");
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(argv[2], NULL, 0);
info.frame_height = strtol(argv[3], NULL, 0);
info.time_base.numerator = 1;
info.time_base.denominator = fps;
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image.");
}
printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
cfg.rc_target_bitrate = (unsigned int)(bits_per_pixel_per_frame * cfg.g_w *
cfg.g_h * fps / 1000);
cfg.g_lag_in_frames = 0;
writer = vpx_video_writer_open(argv[5], kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing.", argv[5]);
if (!(infile = fopen(argv[4], "rb")))
die("Failed to open %s for reading.", argv[4]);
if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
while (vpx_img_read(&raw, infile)) {
++frame_count;
if (frame_count == 22 && encoder->fourcc == VP8_FOURCC) {
set_roi_map(&cfg, &codec);
} else if (frame_count == 33) {
set_active_map(&cfg, &codec);
} else if (frame_count == 44) {
unset_active_map(&cfg, &codec);
}
encode_frame(&codec, &raw, frame_count, writer);
}
encode_frame(&codec, NULL, -1, writer);
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
vpx_img_free(&raw);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_writer_close(writer);
return EXIT_SUCCESS;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv) {
FILE *infile = NULL;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
vpx_image_t raw;
vpx_codec_err_t res;
VpxVideoInfo info;
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 2; // TODO(dkovalev) add command line argument
const double bits_per_pixel_per_frame = 0.067;
exec_name = argv[0];
if (argc != 6)
die("Invalid number of arguments");
memset(&info, 0, sizeof(info));
encoder = get_vpx_encoder_by_name(argv[1]);
if (encoder == NULL) {
die("Unsupported codec.");
}
assert(encoder != NULL);
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(argv[2], NULL, 0);
info.frame_height = strtol(argv[3], NULL, 0);
info.time_base.numerator = 1;
info.time_base.denominator = fps;
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image.");
}
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
cfg.rc_target_bitrate = (unsigned int)(bits_per_pixel_per_frame * cfg.g_w *
cfg.g_h * fps / 1000);
cfg.g_lag_in_frames = 0;
writer = vpx_video_writer_open(argv[5], kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing.", argv[5]);
if (!(infile = fopen(argv[4], "rb")))
die("Failed to open %s for reading.", argv[4]);
if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
// Encode frames.
while (vpx_img_read(&raw, infile)) {
++frame_count;
if (frame_count == 22 && encoder->fourcc == VP8_FOURCC) {
set_roi_map(&cfg, &codec);
} else if (frame_count == 33) {
set_active_map(&cfg, &codec);
} else if (frame_count == 44) {
unset_active_map(&cfg, &codec);
}
encode_frame(&codec, &raw, frame_count, writer);
}
// Flush encoder.
while (encode_frame(&codec, NULL, -1, writer)) {}
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
vpx_img_free(&raw);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_writer_close(writer);
return EXIT_SUCCESS;
}
|
@@ -42,20 +42,20 @@
// Use the `simple_decoder` example to decode this sample, and observe
// the change in the image at frames 22, 33, and 44.
+#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile>\n",
exec_name);
exit(EXIT_FAILURE);
@@ -64,7 +64,8 @@
static void set_roi_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_roi_map_t roi = {0};
+ vpx_roi_map_t roi;
+ memset(&roi, 0, sizeof(roi));
roi.rows = (cfg->g_h + 15) / 16;
roi.cols = (cfg->g_w + 15) / 16;
@@ -97,7 +98,7 @@
static void set_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
@@ -114,7 +115,7 @@
static void unset_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
@@ -124,10 +125,11 @@
die_codec(codec, "Failed to set active map");
}
-static void encode_frame(vpx_codec_ctx_t *codec,
- vpx_image_t *img,
- int frame_index,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *codec,
+ vpx_image_t *img,
+ int frame_index,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, 0,
@@ -136,6 +138,8 @@
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
@@ -149,30 +153,34 @@
fflush(stdout);
}
}
+
+ return got_pkts;
}
int main(int argc, char **argv) {
FILE *infile = NULL;
- vpx_codec_ctx_t codec = {0};
- vpx_codec_enc_cfg_t cfg = {0};
+ vpx_codec_ctx_t codec;
+ vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
- vpx_image_t raw = {0};
+ vpx_image_t raw;
vpx_codec_err_t res;
- VpxVideoInfo info = {0};
+ VpxVideoInfo info;
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 2; // TODO(dkovalev) add command line argument
const double bits_per_pixel_per_frame = 0.067;
exec_name = argv[0];
-
if (argc != 6)
die("Invalid number of arguments");
- encoder = get_vpx_encoder_by_name(argv[1]);
- if (!encoder)
- die("Unsupported codec.");
+ memset(&info, 0, sizeof(info));
+ encoder = get_vpx_encoder_by_name(argv[1]);
+ if (encoder == NULL) {
+ die("Unsupported codec.");
+ }
+ assert(encoder != NULL);
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(argv[2], NULL, 0);
info.frame_height = strtol(argv[3], NULL, 0);
@@ -191,9 +199,9 @@
die("Failed to allocate image.");
}
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
@@ -212,9 +220,10 @@
if (!(infile = fopen(argv[4], "rb")))
die("Failed to open %s for reading.", argv[4]);
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
+ // Encode frames.
while (vpx_img_read(&raw, infile)) {
++frame_count;
@@ -228,7 +237,10 @@
encode_frame(&codec, &raw, frame_count, writer);
}
- encode_frame(&codec, NULL, -1, writer);
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, writer)) {}
+
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
|
CWE-119
|
vpx_codec_ctx_t codec = {0};
vpx_codec_enc_cfg_t cfg = {0};
vpx_image_t raw = {0};
VpxVideoInfo info = {0};
encoder = get_vpx_encoder_by_name(argv[1]);
if (!encoder)
die("Unsupported codec.");
printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
encode_frame(&codec, NULL, -1, writer);
|
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
vpx_image_t raw;
VpxVideoInfo info;
memset(&info, 0, sizeof(info));
encoder = get_vpx_encoder_by_name(argv[1]);
if (encoder == NULL) {
die("Unsupported codec.");
}
assert(encoder != NULL);
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
// Encode frames.
// Flush encoder.
while (encode_frame(&codec, NULL, -1, writer)) {}
|
150,807 |
static void set_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
vpx_active_map_t map = {0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
map.active_map = (uint8_t *)malloc(map.rows * map.cols);
for (i = 0; i < map.rows * map.cols; ++i)
map.active_map[i] = i % 2;
if (vpx_codec_control(codec, VP8E_SET_ACTIVEMAP, &map))
die_codec(codec, "Failed to set active map");
free(map.active_map);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static void set_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
map.active_map = (uint8_t *)malloc(map.rows * map.cols);
for (i = 0; i < map.rows * map.cols; ++i)
map.active_map[i] = i % 2;
if (vpx_codec_control(codec, VP8E_SET_ACTIVEMAP, &map))
die_codec(codec, "Failed to set active map");
free(map.active_map);
}
|
@@ -42,20 +42,20 @@
// Use the `simple_decoder` example to decode this sample, and observe
// the change in the image at frames 22, 33, and 44.
+#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile>\n",
exec_name);
exit(EXIT_FAILURE);
@@ -64,7 +64,8 @@
static void set_roi_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_roi_map_t roi = {0};
+ vpx_roi_map_t roi;
+ memset(&roi, 0, sizeof(roi));
roi.rows = (cfg->g_h + 15) / 16;
roi.cols = (cfg->g_w + 15) / 16;
@@ -97,7 +98,7 @@
static void set_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
@@ -114,7 +115,7 @@
static void unset_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
@@ -124,10 +125,11 @@
die_codec(codec, "Failed to set active map");
}
-static void encode_frame(vpx_codec_ctx_t *codec,
- vpx_image_t *img,
- int frame_index,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *codec,
+ vpx_image_t *img,
+ int frame_index,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, 0,
@@ -136,6 +138,8 @@
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
@@ -149,30 +153,34 @@
fflush(stdout);
}
}
+
+ return got_pkts;
}
int main(int argc, char **argv) {
FILE *infile = NULL;
- vpx_codec_ctx_t codec = {0};
- vpx_codec_enc_cfg_t cfg = {0};
+ vpx_codec_ctx_t codec;
+ vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
- vpx_image_t raw = {0};
+ vpx_image_t raw;
vpx_codec_err_t res;
- VpxVideoInfo info = {0};
+ VpxVideoInfo info;
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 2; // TODO(dkovalev) add command line argument
const double bits_per_pixel_per_frame = 0.067;
exec_name = argv[0];
-
if (argc != 6)
die("Invalid number of arguments");
- encoder = get_vpx_encoder_by_name(argv[1]);
- if (!encoder)
- die("Unsupported codec.");
+ memset(&info, 0, sizeof(info));
+ encoder = get_vpx_encoder_by_name(argv[1]);
+ if (encoder == NULL) {
+ die("Unsupported codec.");
+ }
+ assert(encoder != NULL);
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(argv[2], NULL, 0);
info.frame_height = strtol(argv[3], NULL, 0);
@@ -191,9 +199,9 @@
die("Failed to allocate image.");
}
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
@@ -212,9 +220,10 @@
if (!(infile = fopen(argv[4], "rb")))
die("Failed to open %s for reading.", argv[4]);
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
+ // Encode frames.
while (vpx_img_read(&raw, infile)) {
++frame_count;
@@ -228,7 +237,10 @@
encode_frame(&codec, &raw, frame_count, writer);
}
- encode_frame(&codec, NULL, -1, writer);
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, writer)) {}
+
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
|
CWE-119
|
vpx_active_map_t map = {0};
|
vpx_active_map_t map = {0, 0, 0};
|
150,808 |
static void set_roi_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
vpx_roi_map_t roi = {0};
roi.rows = (cfg->g_h + 15) / 16;
roi.cols = (cfg->g_w + 15) / 16;
roi.delta_q[0] = 0;
roi.delta_q[1] = -2;
roi.delta_q[2] = -4;
roi.delta_q[3] = -6;
roi.delta_lf[0] = 0;
roi.delta_lf[1] = 1;
roi.delta_lf[2] = 2;
roi.delta_lf[3] = 3;
roi.static_threshold[0] = 1500;
roi.static_threshold[1] = 1000;
roi.static_threshold[2] = 500;
roi.static_threshold[3] = 0;
roi.roi_map = (uint8_t *)malloc(roi.rows * roi.cols);
for (i = 0; i < roi.rows * roi.cols; ++i)
roi.roi_map[i] = i % 4;
if (vpx_codec_control(codec, VP8E_SET_ROI_MAP, &roi))
die_codec(codec, "Failed to set ROI map");
free(roi.roi_map);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static void set_roi_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
vpx_roi_map_t roi;
memset(&roi, 0, sizeof(roi));
roi.rows = (cfg->g_h + 15) / 16;
roi.cols = (cfg->g_w + 15) / 16;
roi.delta_q[0] = 0;
roi.delta_q[1] = -2;
roi.delta_q[2] = -4;
roi.delta_q[3] = -6;
roi.delta_lf[0] = 0;
roi.delta_lf[1] = 1;
roi.delta_lf[2] = 2;
roi.delta_lf[3] = 3;
roi.static_threshold[0] = 1500;
roi.static_threshold[1] = 1000;
roi.static_threshold[2] = 500;
roi.static_threshold[3] = 0;
roi.roi_map = (uint8_t *)malloc(roi.rows * roi.cols);
for (i = 0; i < roi.rows * roi.cols; ++i)
roi.roi_map[i] = i % 4;
if (vpx_codec_control(codec, VP8E_SET_ROI_MAP, &roi))
die_codec(codec, "Failed to set ROI map");
free(roi.roi_map);
}
|
@@ -42,20 +42,20 @@
// Use the `simple_decoder` example to decode this sample, and observe
// the change in the image at frames 22, 33, and 44.
+#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile>\n",
exec_name);
exit(EXIT_FAILURE);
@@ -64,7 +64,8 @@
static void set_roi_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_roi_map_t roi = {0};
+ vpx_roi_map_t roi;
+ memset(&roi, 0, sizeof(roi));
roi.rows = (cfg->g_h + 15) / 16;
roi.cols = (cfg->g_w + 15) / 16;
@@ -97,7 +98,7 @@
static void set_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
@@ -114,7 +115,7 @@
static void unset_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
@@ -124,10 +125,11 @@
die_codec(codec, "Failed to set active map");
}
-static void encode_frame(vpx_codec_ctx_t *codec,
- vpx_image_t *img,
- int frame_index,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *codec,
+ vpx_image_t *img,
+ int frame_index,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, 0,
@@ -136,6 +138,8 @@
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
@@ -149,30 +153,34 @@
fflush(stdout);
}
}
+
+ return got_pkts;
}
int main(int argc, char **argv) {
FILE *infile = NULL;
- vpx_codec_ctx_t codec = {0};
- vpx_codec_enc_cfg_t cfg = {0};
+ vpx_codec_ctx_t codec;
+ vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
- vpx_image_t raw = {0};
+ vpx_image_t raw;
vpx_codec_err_t res;
- VpxVideoInfo info = {0};
+ VpxVideoInfo info;
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 2; // TODO(dkovalev) add command line argument
const double bits_per_pixel_per_frame = 0.067;
exec_name = argv[0];
-
if (argc != 6)
die("Invalid number of arguments");
- encoder = get_vpx_encoder_by_name(argv[1]);
- if (!encoder)
- die("Unsupported codec.");
+ memset(&info, 0, sizeof(info));
+ encoder = get_vpx_encoder_by_name(argv[1]);
+ if (encoder == NULL) {
+ die("Unsupported codec.");
+ }
+ assert(encoder != NULL);
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(argv[2], NULL, 0);
info.frame_height = strtol(argv[3], NULL, 0);
@@ -191,9 +199,9 @@
die("Failed to allocate image.");
}
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
@@ -212,9 +220,10 @@
if (!(infile = fopen(argv[4], "rb")))
die("Failed to open %s for reading.", argv[4]);
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
+ // Encode frames.
while (vpx_img_read(&raw, infile)) {
++frame_count;
@@ -228,7 +237,10 @@
encode_frame(&codec, &raw, frame_count, writer);
}
- encode_frame(&codec, NULL, -1, writer);
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, writer)) {}
+
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
|
CWE-119
|
vpx_roi_map_t roi = {0};
|
vpx_roi_map_t roi;
memset(&roi, 0, sizeof(roi));
|
150,809 |
static void unset_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
vpx_active_map_t map = {0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
map.active_map = NULL;
if (vpx_codec_control(codec, VP8E_SET_ACTIVEMAP, &map))
die_codec(codec, "Failed to set active map");
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static void unset_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
map.active_map = NULL;
if (vpx_codec_control(codec, VP8E_SET_ACTIVEMAP, &map))
die_codec(codec, "Failed to set active map");
}
|
@@ -42,20 +42,20 @@
// Use the `simple_decoder` example to decode this sample, and observe
// the change in the image at frames 22, 33, and 44.
+#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile>\n",
exec_name);
exit(EXIT_FAILURE);
@@ -64,7 +64,8 @@
static void set_roi_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_roi_map_t roi = {0};
+ vpx_roi_map_t roi;
+ memset(&roi, 0, sizeof(roi));
roi.rows = (cfg->g_h + 15) / 16;
roi.cols = (cfg->g_w + 15) / 16;
@@ -97,7 +98,7 @@
static void set_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
@@ -114,7 +115,7 @@
static void unset_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
@@ -124,10 +125,11 @@
die_codec(codec, "Failed to set active map");
}
-static void encode_frame(vpx_codec_ctx_t *codec,
- vpx_image_t *img,
- int frame_index,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *codec,
+ vpx_image_t *img,
+ int frame_index,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, 0,
@@ -136,6 +138,8 @@
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
@@ -149,30 +153,34 @@
fflush(stdout);
}
}
+
+ return got_pkts;
}
int main(int argc, char **argv) {
FILE *infile = NULL;
- vpx_codec_ctx_t codec = {0};
- vpx_codec_enc_cfg_t cfg = {0};
+ vpx_codec_ctx_t codec;
+ vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
- vpx_image_t raw = {0};
+ vpx_image_t raw;
vpx_codec_err_t res;
- VpxVideoInfo info = {0};
+ VpxVideoInfo info;
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 2; // TODO(dkovalev) add command line argument
const double bits_per_pixel_per_frame = 0.067;
exec_name = argv[0];
-
if (argc != 6)
die("Invalid number of arguments");
- encoder = get_vpx_encoder_by_name(argv[1]);
- if (!encoder)
- die("Unsupported codec.");
+ memset(&info, 0, sizeof(info));
+ encoder = get_vpx_encoder_by_name(argv[1]);
+ if (encoder == NULL) {
+ die("Unsupported codec.");
+ }
+ assert(encoder != NULL);
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(argv[2], NULL, 0);
info.frame_height = strtol(argv[3], NULL, 0);
@@ -191,9 +199,9 @@
die("Failed to allocate image.");
}
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
@@ -212,9 +220,10 @@
if (!(infile = fopen(argv[4], "rb")))
die("Failed to open %s for reading.", argv[4]);
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
+ // Encode frames.
while (vpx_img_read(&raw, infile)) {
++frame_count;
@@ -228,7 +237,10 @@
encode_frame(&codec, &raw, frame_count, writer);
}
- encode_frame(&codec, NULL, -1, writer);
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, writer)) {}
+
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
|
CWE-119
|
vpx_active_map_t map = {0};
|
vpx_active_map_t map = {0, 0, 0};
|
150,810 |
int main(int argc, char **argv) {
int frame_cnt = 0;
FILE *outfile = NULL;
vpx_codec_ctx_t codec;
VpxVideoReader *reader = NULL;
const VpxInterface *decoder = NULL;
const VpxVideoInfo *info = NULL;
exec_name = argv[0];
if (argc != 3)
die("Invalid number of arguments.");
reader = vpx_video_reader_open(argv[1]);
if (!reader)
die("Failed to open %s for reading.", argv[1]);
if (!(outfile = fopen(argv[2], "wb")))
die("Failed to open %s for writing.", argv[2]);
info = vpx_video_reader_get_info(reader);
decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
if (!decoder)
die("Unknown input codec.");
printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder.");
while (vpx_video_reader_read_frame(reader)) {
vpx_codec_iter_t iter = NULL;
vpx_image_t *img = NULL;
size_t frame_size = 0;
const unsigned char *frame = vpx_video_reader_get_frame(reader,
&frame_size);
if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
die_codec(&codec, "Failed to decode frame.");
while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL) {
vpx_img_write(img, outfile);
++frame_cnt;
}
}
printf("Processed %d frames.\n", frame_cnt);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec");
printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n",
info->frame_width, info->frame_height, argv[2]);
vpx_video_reader_close(reader);
fclose(outfile);
return EXIT_SUCCESS;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv) {
int frame_cnt = 0;
FILE *outfile = NULL;
vpx_codec_ctx_t codec;
VpxVideoReader *reader = NULL;
const VpxInterface *decoder = NULL;
const VpxVideoInfo *info = NULL;
exec_name = argv[0];
if (argc != 3)
die("Invalid number of arguments.");
reader = vpx_video_reader_open(argv[1]);
if (!reader)
die("Failed to open %s for reading.", argv[1]);
if (!(outfile = fopen(argv[2], "wb")))
die("Failed to open %s for writing.", argv[2]);
info = vpx_video_reader_get_info(reader);
decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc);
if (!decoder)
die("Unknown input codec.");
printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder.");
while (vpx_video_reader_read_frame(reader)) {
vpx_codec_iter_t iter = NULL;
vpx_image_t *img = NULL;
size_t frame_size = 0;
const unsigned char *frame = vpx_video_reader_get_frame(reader,
&frame_size);
if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0))
die_codec(&codec, "Failed to decode frame.");
while ((img = vpx_codec_get_frame(&codec, &iter)) != NULL) {
vpx_img_write(img, outfile);
++frame_cnt;
}
}
printf("Processed %d frames.\n", frame_cnt);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec");
printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n",
info->frame_width, info->frame_height, argv[2]);
vpx_video_reader_close(reader);
fclose(outfile);
return EXIT_SUCCESS;
}
|
@@ -29,30 +29,29 @@
// -----------------
// For decoders, you only have to include `vpx_decoder.h` and then any
// header files for the specific codecs you use. In this case, we're using
-// vp8. The `VPX_CODEC_DISABLE_COMPAT` macro can be defined to ensure
-// strict compliance with the latest SDK by disabling some backwards
-// compatibility features. Defining this macro is encouraged.
+// vp8.
//
// Initializing The Codec
// ----------------------
-// The decoder is initialized by the following code. This is an example for
-// the VP8 decoder, but the code is analogous for all algorithms. Replace
-// `vpx_codec_vp8_dx()` with a pointer to the interface exposed by the
-// algorithm you want to use. The `cfg` argument is left as NULL in this
-// example, because we want the algorithm to determine the stream
-// configuration (width/height) and allocate memory automatically. This
-// parameter is generally only used if you need to preallocate memory,
-// particularly in External Memory Allocation mode.
+// The libvpx decoder is initialized by the call to vpx_codec_dec_init().
+// Determining the codec interface to use is handled by VpxVideoReader and the
+// functions prefixed with vpx_video_reader_. Discussion of those functions is
+// beyond the scope of this example, but the main gist is to open the input file
+// and parse just enough of it to determine if it's a VPx file and which VPx
+// codec is contained within the file.
+// Note the NULL pointer passed to vpx_codec_dec_init(). We do that in this
+// example because we want the algorithm to determine the stream configuration
+// (width/height) and allocate memory automatically.
//
// Decoding A Frame
// ----------------
// Once the frame has been read into memory, it is decoded using the
// `vpx_codec_decode` function. The call takes a pointer to the data
-// (`frame`) and the length of the data (`frame_sz`). No application data
+// (`frame`) and the length of the data (`frame_size`). No application data
// is associated with the frame in this example, so the `user_priv`
// parameter is NULL. The `deadline` parameter is left at zero for this
-// example. This parameter is generally only used when doing adaptive
-// postprocessing.
+// example. This parameter is generally only used when doing adaptive post
+// processing.
//
// Codecs may produce a variable number of output frames for every call to
// `vpx_codec_decode`. These frames are retrieved by the
@@ -74,25 +73,22 @@
// --------------
// This example does not special case any error return codes. If there was
// an error, a descriptive message is printed and the program exits. With
-// few exeptions, vpx_codec functions return an enumerated error status,
+// few exceptions, vpx_codec functions return an enumerated error status,
// with the value `0` indicating success.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
-
-#include "vpx/vp8dx.h"
#include "vpx/vpx_decoder.h"
-#include "./tools_common.h"
-#include "./video_reader.h"
+#include "../tools_common.h"
+#include "../video_reader.h"
#include "./vpx_config.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <infile> <outfile>\n", exec_name);
exit(EXIT_FAILURE);
}
@@ -123,9 +119,9 @@
if (!decoder)
die("Unknown input codec.");
- printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
- if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
+ if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
die_codec(&codec, "Failed to initialize decoder.");
while (vpx_video_reader_read_frame(reader)) {
|
CWE-119
|
printf("Using %s\n", vpx_codec_iface_name(decoder->interface()));
if (vpx_codec_dec_init(&codec, decoder->interface(), NULL, 0))
|
printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface()));
if (vpx_codec_dec_init(&codec, decoder->codec_interface(), NULL, 0))
|
150,811 |
int main(int argc, char **argv) {
FILE *infile = NULL;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
vpx_image_t raw;
vpx_codec_err_t res;
VpxVideoInfo info = {0};
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
int keyframe_interval = 0;
const char *codec_arg = NULL;
const char *width_arg = NULL;
const char *height_arg = NULL;
const char *infile_arg = NULL;
const char *outfile_arg = NULL;
const char *keyframe_interval_arg = NULL;
exec_name = argv[0];
if (argc < 7)
die("Invalid number of arguments");
codec_arg = argv[1];
width_arg = argv[2];
height_arg = argv[3];
infile_arg = argv[4];
outfile_arg = argv[5];
keyframe_interval_arg = argv[6];
encoder = get_vpx_encoder_by_name(codec_arg);
if (!encoder)
die("Unsupported codec.");
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(width_arg, NULL, 0);
info.frame_height = strtol(height_arg, NULL, 0);
info.time_base.numerator = 1;
info.time_base.denominator = fps;
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image.");
}
keyframe_interval = strtol(keyframe_interval_arg, NULL, 0);
if (keyframe_interval < 0)
die("Invalid keyframe interval value.");
printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
cfg.rc_target_bitrate = bitrate;
cfg.g_error_resilient = argc > 7 ? strtol(argv[7], NULL, 0) : 0;
writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing.", outfile_arg);
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading.", infile_arg);
if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
while (vpx_img_read(&raw, infile)) {
int flags = 0;
if (keyframe_interval > 0 && frame_count % keyframe_interval == 0)
flags |= VPX_EFLAG_FORCE_KF;
encode_frame(&codec, &raw, frame_count++, flags, writer);
}
encode_frame(&codec, NULL, -1, 0, writer); // flush the encoder
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
vpx_img_free(&raw);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_writer_close(writer);
return EXIT_SUCCESS;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv) {
FILE *infile = NULL;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
vpx_image_t raw;
vpx_codec_err_t res;
VpxVideoInfo info = {0};
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
int keyframe_interval = 0;
const char *codec_arg = NULL;
const char *width_arg = NULL;
const char *height_arg = NULL;
const char *infile_arg = NULL;
const char *outfile_arg = NULL;
const char *keyframe_interval_arg = NULL;
exec_name = argv[0];
if (argc < 7)
die("Invalid number of arguments");
codec_arg = argv[1];
width_arg = argv[2];
height_arg = argv[3];
infile_arg = argv[4];
outfile_arg = argv[5];
keyframe_interval_arg = argv[6];
encoder = get_vpx_encoder_by_name(codec_arg);
if (!encoder)
die("Unsupported codec.");
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(width_arg, NULL, 0);
info.frame_height = strtol(height_arg, NULL, 0);
info.time_base.numerator = 1;
info.time_base.denominator = fps;
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image.");
}
keyframe_interval = strtol(keyframe_interval_arg, NULL, 0);
if (keyframe_interval < 0)
die("Invalid keyframe interval value.");
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
cfg.rc_target_bitrate = bitrate;
cfg.g_error_resilient = argc > 7 ? strtol(argv[7], NULL, 0) : 0;
writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing.", outfile_arg);
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading.", infile_arg);
if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
// Encode frames.
while (vpx_img_read(&raw, infile)) {
int flags = 0;
if (keyframe_interval > 0 && frame_count % keyframe_interval == 0)
flags |= VPX_EFLAG_FORCE_KF;
encode_frame(&codec, &raw, frame_count++, flags, writer);
}
// Flush encoder.
while (encode_frame(&codec, NULL, -1, 0, writer)) {};
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
vpx_img_free(&raw);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_writer_close(writer);
return EXIT_SUCCESS;
}
|
@@ -28,9 +28,7 @@
// -----------------
// For encoders, you only have to include `vpx_encoder.h` and then any
// header files for the specific codecs you use. In this case, we're using
-// vp8. The `VPX_CODEC_DISABLE_COMPAT` macro can be defined to ensure
-// strict compliance with the latest SDK by disabling some backwards
-// compatibility features. Defining this macro is encouraged.
+// vp8.
//
// Getting The Default Configuration
// ---------------------------------
@@ -101,15 +99,14 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr,
"Usage: %s <codec> <width> <height> <infile> <outfile> "
"<keyframe-interval> [<error-resilient>]\nSee comments in "
@@ -118,11 +115,12 @@
exit(EXIT_FAILURE);
}
-static void encode_frame(vpx_codec_ctx_t *codec,
- vpx_image_t *img,
- int frame_index,
- int flags,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *codec,
+ vpx_image_t *img,
+ int frame_index,
+ int flags,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1,
@@ -131,6 +129,8 @@
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
@@ -139,11 +139,12 @@
pkt->data.frame.pts)) {
die_codec(codec, "Failed to write compressed frame");
}
-
printf(keyframe ? "K" : ".");
fflush(stdout);
}
}
+
+ return got_pkts;
}
int main(int argc, char **argv) {
@@ -207,9 +208,9 @@
if (keyframe_interval < 0)
die("Invalid keyframe interval value.");
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
@@ -227,16 +228,19 @@
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading.", infile_arg);
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
+ // Encode frames.
while (vpx_img_read(&raw, infile)) {
int flags = 0;
if (keyframe_interval > 0 && frame_count % keyframe_interval == 0)
flags |= VPX_EFLAG_FORCE_KF;
encode_frame(&codec, &raw, frame_count++, flags, writer);
}
- encode_frame(&codec, NULL, -1, 0, writer); // flush the encoder
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, 0, writer)) {};
printf("\n");
fclose(infile);
|
CWE-119
|
printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
encode_frame(&codec, NULL, -1, 0, writer); // flush the encoder
|
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
// Encode frames.
// Flush encoder.
while (encode_frame(&codec, NULL, -1, 0, writer)) {};
|
150,812 |
void usage_exit() {
fprintf(stderr,
"Usage: %s <codec> <width> <height> <infile> <outfile> "
"<keyframe-interval> [<error-resilient>]\nSee comments in "
"simple_encoder.c for more information.\n",
exec_name);
exit(EXIT_FAILURE);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void usage_exit() {
void usage_exit(void) {
fprintf(stderr,
"Usage: %s <codec> <width> <height> <infile> <outfile> "
"<keyframe-interval> [<error-resilient>]\nSee comments in "
"simple_encoder.c for more information.\n",
exec_name);
exit(EXIT_FAILURE);
}
|
@@ -28,9 +28,7 @@
// -----------------
// For encoders, you only have to include `vpx_encoder.h` and then any
// header files for the specific codecs you use. In this case, we're using
-// vp8. The `VPX_CODEC_DISABLE_COMPAT` macro can be defined to ensure
-// strict compliance with the latest SDK by disabling some backwards
-// compatibility features. Defining this macro is encouraged.
+// vp8.
//
// Getting The Default Configuration
// ---------------------------------
@@ -101,15 +99,14 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr,
"Usage: %s <codec> <width> <height> <infile> <outfile> "
"<keyframe-interval> [<error-resilient>]\nSee comments in "
@@ -118,11 +115,12 @@
exit(EXIT_FAILURE);
}
-static void encode_frame(vpx_codec_ctx_t *codec,
- vpx_image_t *img,
- int frame_index,
- int flags,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *codec,
+ vpx_image_t *img,
+ int frame_index,
+ int flags,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1,
@@ -131,6 +129,8 @@
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
@@ -139,11 +139,12 @@
pkt->data.frame.pts)) {
die_codec(codec, "Failed to write compressed frame");
}
-
printf(keyframe ? "K" : ".");
fflush(stdout);
}
}
+
+ return got_pkts;
}
int main(int argc, char **argv) {
@@ -207,9 +208,9 @@
if (keyframe_interval < 0)
die("Invalid keyframe interval value.");
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
@@ -227,16 +228,19 @@
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading.", infile_arg);
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
+ // Encode frames.
while (vpx_img_read(&raw, infile)) {
int flags = 0;
if (keyframe_interval > 0 && frame_count % keyframe_interval == 0)
flags |= VPX_EFLAG_FORCE_KF;
encode_frame(&codec, &raw, frame_count++, flags, writer);
}
- encode_frame(&codec, NULL, -1, 0, writer); // flush the encoder
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, 0, writer)) {};
printf("\n");
fclose(infile);
|
CWE-119
| null |
void usage_exit(void) {
|
150,813 |
static void get_frame_stats(vpx_codec_ctx_t *ctx,
const vpx_image_t *img,
vpx_codec_pts_t pts,
unsigned int duration,
vpx_enc_frame_flags_t flags,
unsigned int deadline,
vpx_fixed_buf_t *stats) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags,
deadline);
if (res != VPX_CODEC_OK)
die_codec(ctx, "Failed to get frame stats.");
while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
if (pkt->kind == VPX_CODEC_STATS_PKT) {
const uint8_t *const pkt_buf = pkt->data.twopass_stats.buf;
const size_t pkt_size = pkt->data.twopass_stats.sz;
stats->buf = realloc(stats->buf, stats->sz + pkt_size);
memcpy((uint8_t *)stats->buf + stats->sz, pkt_buf, pkt_size);
stats->sz += pkt_size;
}
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static void get_frame_stats(vpx_codec_ctx_t *ctx,
static int get_frame_stats(vpx_codec_ctx_t *ctx,
const vpx_image_t *img,
vpx_codec_pts_t pts,
unsigned int duration,
vpx_enc_frame_flags_t flags,
unsigned int deadline,
vpx_fixed_buf_t *stats) {
int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags,
deadline);
if (res != VPX_CODEC_OK)
die_codec(ctx, "Failed to get frame stats.");
while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
got_pkts = 1;
if (pkt->kind == VPX_CODEC_STATS_PKT) {
const uint8_t *const pkt_buf = pkt->data.twopass_stats.buf;
const size_t pkt_size = pkt->data.twopass_stats.sz;
stats->buf = realloc(stats->buf, stats->sz + pkt_size);
memcpy((uint8_t *)stats->buf + stats->sz, pkt_buf, pkt_size);
stats->sz += pkt_size;
}
}
return got_pkts;
}
|
@@ -28,9 +28,8 @@
// Encoding A Frame
// ----------------
// Encoding a frame in two pass mode is identical to the simple encoder
-// example, except the deadline is set to VPX_DL_BEST_QUALITY to get the
-// best quality possible. VPX_DL_GOOD_QUALITY could also be used.
-//
+// example. To increase the quality while sacrificing encoding speed,
+// VPX_DL_BEST_QUALITY can be used in place of VPX_DL_GOOD_QUALITY.
//
// Processing Statistics Packets
// -----------------------------
@@ -52,27 +51,27 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile>\n",
exec_name);
exit(EXIT_FAILURE);
}
-static void get_frame_stats(vpx_codec_ctx_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned int duration,
- vpx_enc_frame_flags_t flags,
- unsigned int deadline,
- vpx_fixed_buf_t *stats) {
+static int get_frame_stats(vpx_codec_ctx_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned int duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned int deadline,
+ vpx_fixed_buf_t *stats) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags,
@@ -81,6 +80,8 @@
die_codec(ctx, "Failed to get frame stats.");
while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_STATS_PKT) {
const uint8_t *const pkt_buf = pkt->data.twopass_stats.buf;
const size_t pkt_size = pkt->data.twopass_stats.sz;
@@ -89,15 +90,18 @@
stats->sz += pkt_size;
}
}
+
+ return got_pkts;
}
-static void encode_frame(vpx_codec_ctx_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned int duration,
- vpx_enc_frame_flags_t flags,
- unsigned int deadline,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned int duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned int deadline,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags,
@@ -106,6 +110,7 @@
die_codec(ctx, "Failed to encode frame.");
while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
+ got_pkts = 1;
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
@@ -117,19 +122,90 @@
fflush(stdout);
}
}
+
+ return got_pkts;
+}
+
+static vpx_fixed_buf_t pass0(vpx_image_t *raw,
+ FILE *infile,
+ const VpxInterface *encoder,
+ const vpx_codec_enc_cfg_t *cfg) {
+ vpx_codec_ctx_t codec;
+ int frame_count = 0;
+ vpx_fixed_buf_t stats = {NULL, 0};
+
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
+ die_codec(&codec, "Failed to initialize encoder");
+
+ // Calculate frame statistics.
+ while (vpx_img_read(raw, infile)) {
+ ++frame_count;
+ get_frame_stats(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY,
+ &stats);
+ }
+
+ // Flush encoder.
+ while (get_frame_stats(&codec, NULL, frame_count, 1, 0,
+ VPX_DL_GOOD_QUALITY, &stats)) {}
+
+ printf("Pass 0 complete. Processed %d frames.\n", frame_count);
+ if (vpx_codec_destroy(&codec))
+ die_codec(&codec, "Failed to destroy codec.");
+
+ return stats;
+}
+
+static void pass1(vpx_image_t *raw,
+ FILE *infile,
+ const char *outfile_name,
+ const VpxInterface *encoder,
+ const vpx_codec_enc_cfg_t *cfg) {
+ VpxVideoInfo info = {
+ encoder->fourcc,
+ cfg->g_w,
+ cfg->g_h,
+ {cfg->g_timebase.num, cfg->g_timebase.den}
+ };
+ VpxVideoWriter *writer = NULL;
+ vpx_codec_ctx_t codec;
+ int frame_count = 0;
+
+ writer = vpx_video_writer_open(outfile_name, kContainerIVF, &info);
+ if (!writer)
+ die("Failed to open %s for writing", outfile_name);
+
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
+ die_codec(&codec, "Failed to initialize encoder");
+
+ // Encode frames.
+ while (vpx_img_read(raw, infile)) {
+ ++frame_count;
+ encode_frame(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY, writer);
+ }
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, 1, 0, VPX_DL_GOOD_QUALITY, writer)) {}
+
+ printf("\n");
+
+ if (vpx_codec_destroy(&codec))
+ die_codec(&codec, "Failed to destroy codec.");
+
+ vpx_video_writer_close(writer);
+
+ printf("Pass 1 complete. Processed %d frames.\n", frame_count);
}
int main(int argc, char **argv) {
FILE *infile = NULL;
- VpxVideoWriter *writer = NULL;
+ int w, h;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
vpx_image_t raw;
vpx_codec_err_t res;
- vpx_fixed_buf_t stats = {0};
- VpxVideoInfo info = {0};
+ vpx_fixed_buf_t stats;
+
const VpxInterface *encoder = NULL;
- int pass;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
const char *const codec_arg = argv[1];
@@ -146,85 +222,44 @@
if (!encoder)
die("Unsupported codec.");
- info.codec_fourcc = encoder->fourcc;
- info.time_base.numerator = 1;
- info.time_base.denominator = fps;
- info.frame_width = strtol(width_arg, NULL, 0);
- info.frame_height = strtol(height_arg, NULL, 0);
+ w = strtol(width_arg, NULL, 0);
+ h = strtol(height_arg, NULL, 0);
- if (info.frame_width <= 0 ||
- info.frame_height <= 0 ||
- (info.frame_width % 2) != 0 ||
- (info.frame_height % 2) != 0) {
- die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
- }
+ if (w <= 0 || h <= 0 || (w % 2) != 0 || (h % 2) != 0)
+ die("Invalid frame size: %dx%d", w, h);
- if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
- info.frame_height, 1)) {
- die("Failed to allocate image", info.frame_width, info.frame_height);
- }
+ if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, w, h, 1))
+ die("Failed to allocate image", w, h);
- writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
- if (!writer)
- die("Failed to open %s for writing", outfile_arg);
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
-
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ // Configuration
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
- cfg.g_w = info.frame_width;
- cfg.g_h = info.frame_height;
- cfg.g_timebase.num = info.time_base.numerator;
- cfg.g_timebase.den = info.time_base.denominator;
+ cfg.g_w = w;
+ cfg.g_h = h;
+ cfg.g_timebase.num = 1;
+ cfg.g_timebase.den = fps;
cfg.rc_target_bitrate = bitrate;
- for (pass = 0; pass < 2; ++pass) {
- int frame_count = 0;
+ if (!(infile = fopen(infile_arg, "rb")))
+ die("Failed to open %s for reading", infile_arg);
- if (pass == 0) {
- cfg.g_pass = VPX_RC_FIRST_PASS;
- } else {
- cfg.g_pass = VPX_RC_LAST_PASS;
- cfg.rc_twopass_stats_in = stats;
- }
+ // Pass 0
+ cfg.g_pass = VPX_RC_FIRST_PASS;
+ stats = pass0(&raw, infile, encoder, &cfg);
- if (!(infile = fopen(infile_arg, "rb")))
- die("Failed to open %s for reading", infile_arg);
-
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
- die_codec(&codec, "Failed to initialize encoder");
-
- while (vpx_img_read(&raw, infile)) {
- ++frame_count;
-
- if (pass == 0) {
- get_frame_stats(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
- &stats);
- } else {
- encode_frame(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
- writer);
- }
- }
-
- if (pass == 0) {
- get_frame_stats(&codec, NULL, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
- &stats);
- } else {
- printf("\n");
- }
-
- fclose(infile);
- printf("Pass %d complete. Processed %d frames.\n", pass + 1, frame_count);
- if (vpx_codec_destroy(&codec))
- die_codec(&codec, "Failed to destroy codec.");
- }
-
- vpx_img_free(&raw);
+ // Pass 1
+ rewind(infile);
+ cfg.g_pass = VPX_RC_LAST_PASS;
+ cfg.rc_twopass_stats_in = stats;
+ pass1(&raw, infile, outfile_arg, encoder, &cfg);
free(stats.buf);
- vpx_video_writer_close(writer);
+ vpx_img_free(&raw);
+ fclose(infile);
return EXIT_SUCCESS;
}
|
CWE-119
|
const vpx_image_t *img,
vpx_codec_pts_t pts,
unsigned int duration,
vpx_enc_frame_flags_t flags,
unsigned int deadline,
vpx_fixed_buf_t *stats) {
|
static int get_frame_stats(vpx_codec_ctx_t *ctx,
const vpx_image_t *img,
vpx_codec_pts_t pts,
unsigned int duration,
vpx_enc_frame_flags_t flags,
unsigned int deadline,
vpx_fixed_buf_t *stats) {
int got_pkts = 0;
got_pkts = 1;
return got_pkts;
|
150,814 |
int main(int argc, char **argv) {
FILE *infile = NULL;
VpxVideoWriter *writer = NULL;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
vpx_image_t raw;
vpx_codec_err_t res;
vpx_fixed_buf_t stats = {0};
VpxVideoInfo info = {0};
const VpxInterface *encoder = NULL;
int pass;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
const char *const codec_arg = argv[1];
const char *const width_arg = argv[2];
const char *const height_arg = argv[3];
const char *const infile_arg = argv[4];
const char *const outfile_arg = argv[5];
exec_name = argv[0];
if (argc != 6)
die("Invalid number of arguments.");
encoder = get_vpx_encoder_by_name(codec_arg);
if (!encoder)
die("Unsupported codec.");
info.codec_fourcc = encoder->fourcc;
info.time_base.numerator = 1;
info.time_base.denominator = fps;
info.frame_width = strtol(width_arg, NULL, 0);
info.frame_height = strtol(height_arg, NULL, 0);
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image", info.frame_width, info.frame_height);
}
writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing", outfile_arg);
printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
cfg.rc_target_bitrate = bitrate;
for (pass = 0; pass < 2; ++pass) {
int frame_count = 0;
if (pass == 0) {
cfg.g_pass = VPX_RC_FIRST_PASS;
} else {
cfg.g_pass = VPX_RC_LAST_PASS;
cfg.rc_twopass_stats_in = stats;
}
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading", infile_arg);
if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
while (vpx_img_read(&raw, infile)) {
++frame_count;
if (pass == 0) {
get_frame_stats(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
&stats);
} else {
encode_frame(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
writer);
}
}
if (pass == 0) {
get_frame_stats(&codec, NULL, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
&stats);
} else {
printf("\n");
}
fclose(infile);
printf("Pass %d complete. Processed %d frames.\n", pass + 1, frame_count);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
}
vpx_img_free(&raw);
free(stats.buf);
vpx_video_writer_close(writer);
return EXIT_SUCCESS;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv) {
FILE *infile = NULL;
int w, h;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
vpx_image_t raw;
vpx_codec_err_t res;
vpx_fixed_buf_t stats;
const VpxInterface *encoder = NULL;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
const char *const codec_arg = argv[1];
const char *const width_arg = argv[2];
const char *const height_arg = argv[3];
const char *const infile_arg = argv[4];
const char *const outfile_arg = argv[5];
exec_name = argv[0];
if (argc != 6)
die("Invalid number of arguments.");
encoder = get_vpx_encoder_by_name(codec_arg);
if (!encoder)
die("Unsupported codec.");
w = strtol(width_arg, NULL, 0);
h = strtol(height_arg, NULL, 0);
if (w <= 0 || h <= 0 || (w % 2) != 0 || (h % 2) != 0)
die("Invalid frame size: %dx%d", w, h);
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, w, h, 1))
die("Failed to allocate image", w, h);
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
// Configuration
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = w;
cfg.g_h = h;
cfg.g_timebase.num = 1;
cfg.g_timebase.den = fps;
cfg.rc_target_bitrate = bitrate;
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading", infile_arg);
// Pass 0
cfg.g_pass = VPX_RC_FIRST_PASS;
stats = pass0(&raw, infile, encoder, &cfg);
// Pass 1
rewind(infile);
cfg.g_pass = VPX_RC_LAST_PASS;
cfg.rc_twopass_stats_in = stats;
pass1(&raw, infile, outfile_arg, encoder, &cfg);
free(stats.buf);
vpx_img_free(&raw);
fclose(infile);
return EXIT_SUCCESS;
}
|
@@ -28,9 +28,8 @@
// Encoding A Frame
// ----------------
// Encoding a frame in two pass mode is identical to the simple encoder
-// example, except the deadline is set to VPX_DL_BEST_QUALITY to get the
-// best quality possible. VPX_DL_GOOD_QUALITY could also be used.
-//
+// example. To increase the quality while sacrificing encoding speed,
+// VPX_DL_BEST_QUALITY can be used in place of VPX_DL_GOOD_QUALITY.
//
// Processing Statistics Packets
// -----------------------------
@@ -52,27 +51,27 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile>\n",
exec_name);
exit(EXIT_FAILURE);
}
-static void get_frame_stats(vpx_codec_ctx_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned int duration,
- vpx_enc_frame_flags_t flags,
- unsigned int deadline,
- vpx_fixed_buf_t *stats) {
+static int get_frame_stats(vpx_codec_ctx_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned int duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned int deadline,
+ vpx_fixed_buf_t *stats) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags,
@@ -81,6 +80,8 @@
die_codec(ctx, "Failed to get frame stats.");
while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_STATS_PKT) {
const uint8_t *const pkt_buf = pkt->data.twopass_stats.buf;
const size_t pkt_size = pkt->data.twopass_stats.sz;
@@ -89,15 +90,18 @@
stats->sz += pkt_size;
}
}
+
+ return got_pkts;
}
-static void encode_frame(vpx_codec_ctx_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned int duration,
- vpx_enc_frame_flags_t flags,
- unsigned int deadline,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned int duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned int deadline,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags,
@@ -106,6 +110,7 @@
die_codec(ctx, "Failed to encode frame.");
while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
+ got_pkts = 1;
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
@@ -117,19 +122,90 @@
fflush(stdout);
}
}
+
+ return got_pkts;
+}
+
+static vpx_fixed_buf_t pass0(vpx_image_t *raw,
+ FILE *infile,
+ const VpxInterface *encoder,
+ const vpx_codec_enc_cfg_t *cfg) {
+ vpx_codec_ctx_t codec;
+ int frame_count = 0;
+ vpx_fixed_buf_t stats = {NULL, 0};
+
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
+ die_codec(&codec, "Failed to initialize encoder");
+
+ // Calculate frame statistics.
+ while (vpx_img_read(raw, infile)) {
+ ++frame_count;
+ get_frame_stats(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY,
+ &stats);
+ }
+
+ // Flush encoder.
+ while (get_frame_stats(&codec, NULL, frame_count, 1, 0,
+ VPX_DL_GOOD_QUALITY, &stats)) {}
+
+ printf("Pass 0 complete. Processed %d frames.\n", frame_count);
+ if (vpx_codec_destroy(&codec))
+ die_codec(&codec, "Failed to destroy codec.");
+
+ return stats;
+}
+
+static void pass1(vpx_image_t *raw,
+ FILE *infile,
+ const char *outfile_name,
+ const VpxInterface *encoder,
+ const vpx_codec_enc_cfg_t *cfg) {
+ VpxVideoInfo info = {
+ encoder->fourcc,
+ cfg->g_w,
+ cfg->g_h,
+ {cfg->g_timebase.num, cfg->g_timebase.den}
+ };
+ VpxVideoWriter *writer = NULL;
+ vpx_codec_ctx_t codec;
+ int frame_count = 0;
+
+ writer = vpx_video_writer_open(outfile_name, kContainerIVF, &info);
+ if (!writer)
+ die("Failed to open %s for writing", outfile_name);
+
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
+ die_codec(&codec, "Failed to initialize encoder");
+
+ // Encode frames.
+ while (vpx_img_read(raw, infile)) {
+ ++frame_count;
+ encode_frame(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY, writer);
+ }
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, 1, 0, VPX_DL_GOOD_QUALITY, writer)) {}
+
+ printf("\n");
+
+ if (vpx_codec_destroy(&codec))
+ die_codec(&codec, "Failed to destroy codec.");
+
+ vpx_video_writer_close(writer);
+
+ printf("Pass 1 complete. Processed %d frames.\n", frame_count);
}
int main(int argc, char **argv) {
FILE *infile = NULL;
- VpxVideoWriter *writer = NULL;
+ int w, h;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
vpx_image_t raw;
vpx_codec_err_t res;
- vpx_fixed_buf_t stats = {0};
- VpxVideoInfo info = {0};
+ vpx_fixed_buf_t stats;
+
const VpxInterface *encoder = NULL;
- int pass;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
const char *const codec_arg = argv[1];
@@ -146,85 +222,44 @@
if (!encoder)
die("Unsupported codec.");
- info.codec_fourcc = encoder->fourcc;
- info.time_base.numerator = 1;
- info.time_base.denominator = fps;
- info.frame_width = strtol(width_arg, NULL, 0);
- info.frame_height = strtol(height_arg, NULL, 0);
+ w = strtol(width_arg, NULL, 0);
+ h = strtol(height_arg, NULL, 0);
- if (info.frame_width <= 0 ||
- info.frame_height <= 0 ||
- (info.frame_width % 2) != 0 ||
- (info.frame_height % 2) != 0) {
- die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
- }
+ if (w <= 0 || h <= 0 || (w % 2) != 0 || (h % 2) != 0)
+ die("Invalid frame size: %dx%d", w, h);
- if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
- info.frame_height, 1)) {
- die("Failed to allocate image", info.frame_width, info.frame_height);
- }
+ if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, w, h, 1))
+ die("Failed to allocate image", w, h);
- writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
- if (!writer)
- die("Failed to open %s for writing", outfile_arg);
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
-
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ // Configuration
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
- cfg.g_w = info.frame_width;
- cfg.g_h = info.frame_height;
- cfg.g_timebase.num = info.time_base.numerator;
- cfg.g_timebase.den = info.time_base.denominator;
+ cfg.g_w = w;
+ cfg.g_h = h;
+ cfg.g_timebase.num = 1;
+ cfg.g_timebase.den = fps;
cfg.rc_target_bitrate = bitrate;
- for (pass = 0; pass < 2; ++pass) {
- int frame_count = 0;
+ if (!(infile = fopen(infile_arg, "rb")))
+ die("Failed to open %s for reading", infile_arg);
- if (pass == 0) {
- cfg.g_pass = VPX_RC_FIRST_PASS;
- } else {
- cfg.g_pass = VPX_RC_LAST_PASS;
- cfg.rc_twopass_stats_in = stats;
- }
+ // Pass 0
+ cfg.g_pass = VPX_RC_FIRST_PASS;
+ stats = pass0(&raw, infile, encoder, &cfg);
- if (!(infile = fopen(infile_arg, "rb")))
- die("Failed to open %s for reading", infile_arg);
-
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
- die_codec(&codec, "Failed to initialize encoder");
-
- while (vpx_img_read(&raw, infile)) {
- ++frame_count;
-
- if (pass == 0) {
- get_frame_stats(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
- &stats);
- } else {
- encode_frame(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
- writer);
- }
- }
-
- if (pass == 0) {
- get_frame_stats(&codec, NULL, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
- &stats);
- } else {
- printf("\n");
- }
-
- fclose(infile);
- printf("Pass %d complete. Processed %d frames.\n", pass + 1, frame_count);
- if (vpx_codec_destroy(&codec))
- die_codec(&codec, "Failed to destroy codec.");
- }
-
- vpx_img_free(&raw);
+ // Pass 1
+ rewind(infile);
+ cfg.g_pass = VPX_RC_LAST_PASS;
+ cfg.rc_twopass_stats_in = stats;
+ pass1(&raw, infile, outfile_arg, encoder, &cfg);
free(stats.buf);
- vpx_video_writer_close(writer);
+ vpx_img_free(&raw);
+ fclose(infile);
return EXIT_SUCCESS;
}
|
CWE-119
|
VpxVideoWriter *writer = NULL;
vpx_fixed_buf_t stats = {0};
VpxVideoInfo info = {0};
int pass;
info.codec_fourcc = encoder->fourcc;
info.time_base.numerator = 1;
info.time_base.denominator = fps;
info.frame_width = strtol(width_arg, NULL, 0);
info.frame_height = strtol(height_arg, NULL, 0);
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image", info.frame_width, info.frame_height);
}
writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing", outfile_arg);
printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
for (pass = 0; pass < 2; ++pass) {
int frame_count = 0;
if (pass == 0) {
cfg.g_pass = VPX_RC_FIRST_PASS;
} else {
cfg.g_pass = VPX_RC_LAST_PASS;
cfg.rc_twopass_stats_in = stats;
}
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading", infile_arg);
if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
while (vpx_img_read(&raw, infile)) {
++frame_count;
if (pass == 0) {
get_frame_stats(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
&stats);
} else {
encode_frame(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
writer);
}
}
if (pass == 0) {
get_frame_stats(&codec, NULL, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
&stats);
} else {
printf("\n");
}
fclose(infile);
printf("Pass %d complete. Processed %d frames.\n", pass + 1, frame_count);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
}
vpx_img_free(&raw);
vpx_video_writer_close(writer);
|
int w, h;
vpx_fixed_buf_t stats;
w = strtol(width_arg, NULL, 0);
h = strtol(height_arg, NULL, 0);
if (w <= 0 || h <= 0 || (w % 2) != 0 || (h % 2) != 0)
die("Invalid frame size: %dx%d", w, h);
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, w, h, 1))
die("Failed to allocate image", w, h);
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
// Configuration
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
cfg.g_w = w;
cfg.g_h = h;
cfg.g_timebase.num = 1;
cfg.g_timebase.den = fps;
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading", infile_arg);
// Pass 0
cfg.g_pass = VPX_RC_FIRST_PASS;
stats = pass0(&raw, infile, encoder, &cfg);
// Pass 1
rewind(infile);
cfg.g_pass = VPX_RC_LAST_PASS;
cfg.rc_twopass_stats_in = stats;
pass1(&raw, infile, outfile_arg, encoder, &cfg);
vpx_img_free(&raw);
fclose(infile);
|
150,815 |
void usage_exit() {
fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile>\n",
exec_name);
exit(EXIT_FAILURE);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void usage_exit() {
void usage_exit(void) {
fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile>\n",
exec_name);
exit(EXIT_FAILURE);
}
|
@@ -28,9 +28,8 @@
// Encoding A Frame
// ----------------
// Encoding a frame in two pass mode is identical to the simple encoder
-// example, except the deadline is set to VPX_DL_BEST_QUALITY to get the
-// best quality possible. VPX_DL_GOOD_QUALITY could also be used.
-//
+// example. To increase the quality while sacrificing encoding speed,
+// VPX_DL_BEST_QUALITY can be used in place of VPX_DL_GOOD_QUALITY.
//
// Processing Statistics Packets
// -----------------------------
@@ -52,27 +51,27 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <codec> <width> <height> <infile> <outfile>\n",
exec_name);
exit(EXIT_FAILURE);
}
-static void get_frame_stats(vpx_codec_ctx_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned int duration,
- vpx_enc_frame_flags_t flags,
- unsigned int deadline,
- vpx_fixed_buf_t *stats) {
+static int get_frame_stats(vpx_codec_ctx_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned int duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned int deadline,
+ vpx_fixed_buf_t *stats) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags,
@@ -81,6 +80,8 @@
die_codec(ctx, "Failed to get frame stats.");
while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_STATS_PKT) {
const uint8_t *const pkt_buf = pkt->data.twopass_stats.buf;
const size_t pkt_size = pkt->data.twopass_stats.sz;
@@ -89,15 +90,18 @@
stats->sz += pkt_size;
}
}
+
+ return got_pkts;
}
-static void encode_frame(vpx_codec_ctx_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned int duration,
- vpx_enc_frame_flags_t flags,
- unsigned int deadline,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned int duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned int deadline,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags,
@@ -106,6 +110,7 @@
die_codec(ctx, "Failed to encode frame.");
while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) {
+ got_pkts = 1;
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
@@ -117,19 +122,90 @@
fflush(stdout);
}
}
+
+ return got_pkts;
+}
+
+static vpx_fixed_buf_t pass0(vpx_image_t *raw,
+ FILE *infile,
+ const VpxInterface *encoder,
+ const vpx_codec_enc_cfg_t *cfg) {
+ vpx_codec_ctx_t codec;
+ int frame_count = 0;
+ vpx_fixed_buf_t stats = {NULL, 0};
+
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
+ die_codec(&codec, "Failed to initialize encoder");
+
+ // Calculate frame statistics.
+ while (vpx_img_read(raw, infile)) {
+ ++frame_count;
+ get_frame_stats(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY,
+ &stats);
+ }
+
+ // Flush encoder.
+ while (get_frame_stats(&codec, NULL, frame_count, 1, 0,
+ VPX_DL_GOOD_QUALITY, &stats)) {}
+
+ printf("Pass 0 complete. Processed %d frames.\n", frame_count);
+ if (vpx_codec_destroy(&codec))
+ die_codec(&codec, "Failed to destroy codec.");
+
+ return stats;
+}
+
+static void pass1(vpx_image_t *raw,
+ FILE *infile,
+ const char *outfile_name,
+ const VpxInterface *encoder,
+ const vpx_codec_enc_cfg_t *cfg) {
+ VpxVideoInfo info = {
+ encoder->fourcc,
+ cfg->g_w,
+ cfg->g_h,
+ {cfg->g_timebase.num, cfg->g_timebase.den}
+ };
+ VpxVideoWriter *writer = NULL;
+ vpx_codec_ctx_t codec;
+ int frame_count = 0;
+
+ writer = vpx_video_writer_open(outfile_name, kContainerIVF, &info);
+ if (!writer)
+ die("Failed to open %s for writing", outfile_name);
+
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
+ die_codec(&codec, "Failed to initialize encoder");
+
+ // Encode frames.
+ while (vpx_img_read(raw, infile)) {
+ ++frame_count;
+ encode_frame(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY, writer);
+ }
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, 1, 0, VPX_DL_GOOD_QUALITY, writer)) {}
+
+ printf("\n");
+
+ if (vpx_codec_destroy(&codec))
+ die_codec(&codec, "Failed to destroy codec.");
+
+ vpx_video_writer_close(writer);
+
+ printf("Pass 1 complete. Processed %d frames.\n", frame_count);
}
int main(int argc, char **argv) {
FILE *infile = NULL;
- VpxVideoWriter *writer = NULL;
+ int w, h;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
vpx_image_t raw;
vpx_codec_err_t res;
- vpx_fixed_buf_t stats = {0};
- VpxVideoInfo info = {0};
+ vpx_fixed_buf_t stats;
+
const VpxInterface *encoder = NULL;
- int pass;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
const char *const codec_arg = argv[1];
@@ -146,85 +222,44 @@
if (!encoder)
die("Unsupported codec.");
- info.codec_fourcc = encoder->fourcc;
- info.time_base.numerator = 1;
- info.time_base.denominator = fps;
- info.frame_width = strtol(width_arg, NULL, 0);
- info.frame_height = strtol(height_arg, NULL, 0);
+ w = strtol(width_arg, NULL, 0);
+ h = strtol(height_arg, NULL, 0);
- if (info.frame_width <= 0 ||
- info.frame_height <= 0 ||
- (info.frame_width % 2) != 0 ||
- (info.frame_height % 2) != 0) {
- die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
- }
+ if (w <= 0 || h <= 0 || (w % 2) != 0 || (h % 2) != 0)
+ die("Invalid frame size: %dx%d", w, h);
- if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
- info.frame_height, 1)) {
- die("Failed to allocate image", info.frame_width, info.frame_height);
- }
+ if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, w, h, 1))
+ die("Failed to allocate image", w, h);
- writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
- if (!writer)
- die("Failed to open %s for writing", outfile_arg);
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
-
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ // Configuration
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
- cfg.g_w = info.frame_width;
- cfg.g_h = info.frame_height;
- cfg.g_timebase.num = info.time_base.numerator;
- cfg.g_timebase.den = info.time_base.denominator;
+ cfg.g_w = w;
+ cfg.g_h = h;
+ cfg.g_timebase.num = 1;
+ cfg.g_timebase.den = fps;
cfg.rc_target_bitrate = bitrate;
- for (pass = 0; pass < 2; ++pass) {
- int frame_count = 0;
+ if (!(infile = fopen(infile_arg, "rb")))
+ die("Failed to open %s for reading", infile_arg);
- if (pass == 0) {
- cfg.g_pass = VPX_RC_FIRST_PASS;
- } else {
- cfg.g_pass = VPX_RC_LAST_PASS;
- cfg.rc_twopass_stats_in = stats;
- }
+ // Pass 0
+ cfg.g_pass = VPX_RC_FIRST_PASS;
+ stats = pass0(&raw, infile, encoder, &cfg);
- if (!(infile = fopen(infile_arg, "rb")))
- die("Failed to open %s for reading", infile_arg);
-
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
- die_codec(&codec, "Failed to initialize encoder");
-
- while (vpx_img_read(&raw, infile)) {
- ++frame_count;
-
- if (pass == 0) {
- get_frame_stats(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
- &stats);
- } else {
- encode_frame(&codec, &raw, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
- writer);
- }
- }
-
- if (pass == 0) {
- get_frame_stats(&codec, NULL, frame_count, 1, 0, VPX_DL_BEST_QUALITY,
- &stats);
- } else {
- printf("\n");
- }
-
- fclose(infile);
- printf("Pass %d complete. Processed %d frames.\n", pass + 1, frame_count);
- if (vpx_codec_destroy(&codec))
- die_codec(&codec, "Failed to destroy codec.");
- }
-
- vpx_img_free(&raw);
+ // Pass 1
+ rewind(infile);
+ cfg.g_pass = VPX_RC_LAST_PASS;
+ cfg.rc_twopass_stats_in = stats;
+ pass1(&raw, infile, outfile_arg, encoder, &cfg);
free(stats.buf);
- vpx_video_writer_close(writer);
+ vpx_img_free(&raw);
+ fclose(infile);
return EXIT_SUCCESS;
}
|
CWE-119
| null |
void usage_exit(void) {
|
150,816 |
static void die(const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
vprintf(fmt, ap);
if(fmt[strlen(fmt)-1] != '\n')
printf("\n");
exit(EXIT_FAILURE);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static void die(const char *fmt, ...) {
|
@@ -13,22 +13,40 @@
* High-resolution input video is down-sampled to lower-resolutions. The
* encoder then encodes the video and outputs multiple bitstreams with
* different resolutions.
+ *
+ * This test also allows for settings temporal layers for each spatial layer.
+ * Different number of temporal layers per spatial stream may be used.
+ * Currently up to 3 temporal layers per spatial stream (encoder) are supported
+ * in this test.
*/
+
+#include "./vpx_config.h"
+
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <math.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
+#include <assert.h>
+#include <sys/time.h>
+#if USE_POSIX_MMAP
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+#include "vpx_ports/vpx_timer.h"
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "vpx_ports/mem_ops.h"
-#include "./tools_common.h"
+#include "../tools_common.h"
#define interface (vpx_codec_vp8_cx())
#define fourcc 0x30385056
-#define IVF_FILE_HDR_SZ (32)
-#define IVF_FRAME_HDR_SZ (12)
+void usage_exit(void) {
+ exit(EXIT_FAILURE);
+}
/*
* The input video frame is downsampled several times to generate a multi-level
@@ -38,32 +56,18 @@
* bitstreams with resolution of 1280x720(level 0), 640x360(level 1), and
* 320x180(level 2) respectively.
*/
+
+/* Number of encoders (spatial resolutions) used in this test. */
#define NUM_ENCODERS 3
+/* Maximum number of temporal layers allowed for this test. */
+#define MAX_NUM_TEMPORAL_LAYERS 3
+
/* This example uses the scaler function in libyuv. */
#include "third_party/libyuv/include/libyuv/basic_types.h"
#include "third_party/libyuv/include/libyuv/scale.h"
#include "third_party/libyuv/include/libyuv/cpu_id.h"
-static void die(const char *fmt, ...) {
- va_list ap;
-
- va_start(ap, fmt);
- vprintf(fmt, ap);
- if(fmt[strlen(fmt)-1] != '\n')
- printf("\n");
- exit(EXIT_FAILURE);
-}
-
-static void die_codec(vpx_codec_ctx_t *ctx, const char *s) {
- const char *detail = vpx_codec_error_detail(ctx);
-
- printf("%s: %s\n", s, vpx_codec_error(ctx));
- if(detail)
- printf(" %s\n",detail);
- exit(EXIT_FAILURE);
-}
-
int (*read_frame_p)(FILE *f, vpx_image_t *img);
static int read_frame(FILE *f, vpx_image_t *img) {
@@ -170,21 +174,172 @@
(void) fwrite(header, 1, 12, outfile);
}
+/* Temporal scaling parameters */
+/* This sets all the temporal layer parameters given |num_temporal_layers|,
+ * including the target bit allocation across temporal layers. Bit allocation
+ * parameters will be passed in as user parameters in another version.
+ */
+static void set_temporal_layer_pattern(int num_temporal_layers,
+ vpx_codec_enc_cfg_t *cfg,
+ int bitrate,
+ int *layer_flags)
+{
+ assert(num_temporal_layers <= MAX_NUM_TEMPORAL_LAYERS);
+ switch (num_temporal_layers)
+ {
+ case 1:
+ {
+ /* 1-layer */
+ cfg->ts_number_layers = 1;
+ cfg->ts_periodicity = 1;
+ cfg->ts_rate_decimator[0] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_target_bitrate[0] = bitrate;
+
+ // Update L only.
+ layer_flags[0] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+ break;
+ }
+
+ case 2:
+ {
+ /* 2-layers, with sync point at first frame of layer 1. */
+ cfg->ts_number_layers = 2;
+ cfg->ts_periodicity = 2;
+ cfg->ts_rate_decimator[0] = 2;
+ cfg->ts_rate_decimator[1] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_layer_id[1] = 1;
+ // Use 60/40 bit allocation as example.
+ cfg->ts_target_bitrate[0] = 0.6f * bitrate;
+ cfg->ts_target_bitrate[1] = bitrate;
+
+ /* 0=L, 1=GF */
+ // ARF is used as predictor for all frames, and is only updated on
+ // key frame. Sync point every 8 frames.
+
+ // Layer 0: predict from L and ARF, update L and G.
+ layer_flags[0] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 1: sync point: predict from L and ARF, and update G.
+ layer_flags[1] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 0, predict from L and ARF, update L.
+ layer_flags[2] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 1: predict from L, G and ARF, and update G.
+ layer_flags[3] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 0
+ layer_flags[4] = layer_flags[2];
+
+ // Layer 1
+ layer_flags[5] = layer_flags[3];
+
+ // Layer 0
+ layer_flags[6] = layer_flags[4];
+
+ // Layer 1
+ layer_flags[7] = layer_flags[5];
+ break;
+ }
+
+ case 3:
+ default:
+ {
+ // 3-layers structure where ARF is used as predictor for all frames,
+ // and is only updated on key frame.
+ // Sync points for layer 1 and 2 every 8 frames.
+ cfg->ts_number_layers = 3;
+ cfg->ts_periodicity = 4;
+ cfg->ts_rate_decimator[0] = 4;
+ cfg->ts_rate_decimator[1] = 2;
+ cfg->ts_rate_decimator[2] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_layer_id[1] = 2;
+ cfg->ts_layer_id[2] = 1;
+ cfg->ts_layer_id[3] = 2;
+ // Use 40/20/40 bit allocation as example.
+ cfg->ts_target_bitrate[0] = 0.4f * bitrate;
+ cfg->ts_target_bitrate[1] = 0.6f * bitrate;
+ cfg->ts_target_bitrate[2] = bitrate;
+
+ /* 0=L, 1=GF, 2=ARF */
+
+ // Layer 0: predict from L and ARF; update L and G.
+ layer_flags[0] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF;
+
+ // Layer 2: sync point: predict from L and ARF; update none.
+ layer_flags[1] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 1: sync point: predict from L and ARF; update G.
+ layer_flags[2] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[3] = VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 0: predict from L and ARF; update L.
+ layer_flags[4] = VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[5] = layer_flags[3];
+
+ // Layer 1: predict from L, G, ARF; update G.
+ layer_flags[6] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[7] = layer_flags[3];
+ break;
+ }
+ }
+}
+
+/* The periodicity of the pattern given the number of temporal layers. */
+static int periodicity_to_num_layers[MAX_NUM_TEMPORAL_LAYERS] = {1, 8, 8};
+
int main(int argc, char **argv)
{
- FILE *infile, *outfile[NUM_ENCODERS];
+ FILE *infile, *outfile[NUM_ENCODERS];
+ FILE *downsampled_input[NUM_ENCODERS - 1];
+ char filename[50];
vpx_codec_ctx_t codec[NUM_ENCODERS];
vpx_codec_enc_cfg_t cfg[NUM_ENCODERS];
- vpx_codec_pts_t frame_cnt = 0;
+ int frame_cnt = 0;
vpx_image_t raw[NUM_ENCODERS];
vpx_codec_err_t res[NUM_ENCODERS];
int i;
long width;
long height;
+ int length_frame;
int frame_avail;
int got_data;
int flags = 0;
+ int layer_id = 0;
+
+ int layer_flags[VPX_TS_MAX_PERIODICITY * NUM_ENCODERS]
+ = {0};
+ int flag_periodicity;
/*Currently, only realtime mode is supported in multi-resolution encoding.*/
int arg_deadline = VPX_DL_REALTIME;
@@ -193,39 +348,51 @@
don't need to know PSNR, which will skip PSNR calculation and save
encoding time. */
int show_psnr = 0;
+ int key_frame_insert = 0;
uint64_t psnr_sse_total[NUM_ENCODERS] = {0};
uint64_t psnr_samples_total[NUM_ENCODERS] = {0};
double psnr_totals[NUM_ENCODERS][4] = {{0,0}};
int psnr_count[NUM_ENCODERS] = {0};
+ double cx_time = 0;
+ struct timeval tv1, tv2, difftv;
+
/* Set the required target bitrates for each resolution level.
* If target bitrate for highest-resolution level is set to 0,
* (i.e. target_bitrate[0]=0), we skip encoding at that level.
*/
unsigned int target_bitrate[NUM_ENCODERS]={1000, 500, 100};
+
/* Enter the frame rate of the input video */
int framerate = 30;
+
/* Set down-sampling factor for each resolution level.
dsf[0] controls down sampling from level 0 to level 1;
dsf[1] controls down sampling from level 1 to level 2;
dsf[2] is not used. */
vpx_rational_t dsf[NUM_ENCODERS] = {{2, 1}, {2, 1}, {1, 1}};
- if(argc!= (5+NUM_ENCODERS))
- die("Usage: %s <width> <height> <infile> <outfile(s)> <output psnr?>\n",
+ /* Set the number of temporal layers for each encoder/resolution level,
+ * starting from highest resoln down to lowest resoln. */
+ unsigned int num_temporal_layers[NUM_ENCODERS] = {3, 3, 3};
+
+ if(argc!= (7 + 3 * NUM_ENCODERS))
+ die("Usage: %s <width> <height> <frame_rate> <infile> <outfile(s)> "
+ "<rate_encoder(s)> <temporal_layer(s)> <key_frame_insert> <output psnr?> \n",
argv[0]);
printf("Using %s\n",vpx_codec_iface_name(interface));
width = strtol(argv[1], NULL, 0);
height = strtol(argv[2], NULL, 0);
+ framerate = strtol(argv[3], NULL, 0);
if(width < 16 || width%2 || height <16 || height%2)
die("Invalid resolution: %ldx%ld", width, height);
/* Open input video file for encoding */
- if(!(infile = fopen(argv[3], "rb")))
- die("Failed to open %s for reading", argv[3]);
+ if(!(infile = fopen(argv[4], "rb")))
+ die("Failed to open %s for reading", argv[4]);
/* Open output file for each encoder to output bitstreams */
for (i=0; i< NUM_ENCODERS; i++)
@@ -236,11 +403,40 @@
continue;
}
- if(!(outfile[i] = fopen(argv[i+4], "wb")))
+ if(!(outfile[i] = fopen(argv[i+5], "wb")))
die("Failed to open %s for writing", argv[i+4]);
}
- show_psnr = strtol(argv[NUM_ENCODERS + 4], NULL, 0);
+ // Bitrates per spatial layer: overwrite default rates above.
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ target_bitrate[i] = strtol(argv[NUM_ENCODERS + 5 + i], NULL, 0);
+ }
+
+ // Temporal layers per spatial layers: overwrite default settings above.
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ num_temporal_layers[i] = strtol(argv[2 * NUM_ENCODERS + 5 + i], NULL, 0);
+ if (num_temporal_layers[i] < 1 || num_temporal_layers[i] > 3)
+ die("Invalid temporal layers: %d, Must be 1, 2, or 3. \n",
+ num_temporal_layers);
+ }
+
+ /* Open file to write out each spatially downsampled input stream. */
+ for (i=0; i< NUM_ENCODERS - 1; i++)
+ {
+ // Highest resoln is encoder 0.
+ if (sprintf(filename,"ds%d.yuv",NUM_ENCODERS - i) < 0)
+ {
+ return EXIT_FAILURE;
+ }
+ downsampled_input[i] = fopen(filename,"wb");
+ }
+
+ key_frame_insert = strtol(argv[3 * NUM_ENCODERS + 5], NULL, 0);
+
+ show_psnr = strtol(argv[3 * NUM_ENCODERS + 6], NULL, 0);
+
/* Populate default encoder configuration */
for (i=0; i< NUM_ENCODERS; i++)
@@ -258,14 +454,13 @@
/* Highest-resolution encoder settings */
cfg[0].g_w = width;
cfg[0].g_h = height;
- cfg[0].g_threads = 1; /* number of threads used */
- cfg[0].rc_dropframe_thresh = 30;
+ cfg[0].rc_dropframe_thresh = 0;
cfg[0].rc_end_usage = VPX_CBR;
cfg[0].rc_resize_allowed = 0;
- cfg[0].rc_min_quantizer = 4;
+ cfg[0].rc_min_quantizer = 2;
cfg[0].rc_max_quantizer = 56;
- cfg[0].rc_undershoot_pct = 98;
- cfg[0].rc_overshoot_pct = 100;
+ cfg[0].rc_undershoot_pct = 100;
+ cfg[0].rc_overshoot_pct = 15;
cfg[0].rc_buf_initial_sz = 500;
cfg[0].rc_buf_optimal_sz = 600;
cfg[0].rc_buf_sz = 1000;
@@ -276,7 +471,6 @@
/* Note: These 3 settings are copied to all levels. But, except the lowest
* resolution level, all other levels are set to VPX_KF_DISABLED internally.
*/
- //cfg[0].kf_mode = VPX_KF_DISABLED;
cfg[0].kf_mode = VPX_KF_AUTO;
cfg[0].kf_min_dist = 3000;
cfg[0].kf_max_dist = 3000;
@@ -290,7 +484,6 @@
{
memcpy(&cfg[i], &cfg[0], sizeof(vpx_codec_enc_cfg_t));
- cfg[i].g_threads = 1; /* number of threads used */
cfg[i].rc_target_bitrate = target_bitrate[i];
/* Note: Width & height of other-resolution encoders are calculated
@@ -310,6 +503,13 @@
if((cfg[i].g_h)%2)cfg[i].g_h++;
}
+
+ // Set the number of threads per encode/spatial layer.
+ // (1, 1, 1) means no encoder threading.
+ cfg[0].g_threads = 2;
+ cfg[1].g_threads = 1;
+ cfg[2].g_threads = 1;
+
/* Allocate image for each encoder */
for (i=0; i< NUM_ENCODERS; i++)
if(!vpx_img_alloc(&raw[i], VPX_IMG_FMT_I420, cfg[i].g_w, cfg[i].g_h, 32))
@@ -324,6 +524,15 @@
if(outfile[i])
write_ivf_file_header(outfile[i], &cfg[i], 0);
+ /* Temporal layers settings */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ set_temporal_layer_pattern(num_temporal_layers[i],
+ &cfg[i],
+ cfg[i].rc_target_bitrate,
+ &layer_flags[i * VPX_TS_MAX_PERIODICITY]);
+ }
+
/* Initialize multi-encoder */
if(vpx_codec_enc_init_multi(&codec[0], interface, &cfg[0], NUM_ENCODERS,
(show_psnr ? VPX_CODEC_USE_PSNR : 0), &dsf[0]))
@@ -334,15 +543,16 @@
for ( i=0; i<NUM_ENCODERS; i++)
{
int speed = -6;
+ /* Lower speed for the lowest resolution. */
+ if (i == NUM_ENCODERS - 1) speed = -4;
if(vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, speed))
die_codec(&codec[i], "Failed to set cpu_used");
}
- /* Set static threshold. */
+ /* Set static threshold = 1 for all encoders */
for ( i=0; i<NUM_ENCODERS; i++)
{
- unsigned int static_thresh = 1;
- if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, static_thresh))
+ if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, 1))
die_codec(&codec[i], "Failed to set static threshold");
}
@@ -356,6 +566,23 @@
die_codec(&codec[i], "Failed to set noise_sensitivity");
}
+ /* Set the number of token partitions */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ if(vpx_codec_control(&codec[i], VP8E_SET_TOKEN_PARTITIONS, 1))
+ die_codec(&codec[i], "Failed to set static threshold");
+ }
+
+ /* Set the max intra target bitrate */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ unsigned int max_intra_size_pct =
+ (int)(((double)cfg[0].rc_buf_optimal_sz * 0.5) * framerate / 10);
+ if(vpx_codec_control(&codec[i], VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ max_intra_size_pct))
+ die_codec(&codec[i], "Failed to set static threshold");
+ //printf("%d %d \n",i,max_intra_size_pct);
+ }
frame_avail = 1;
got_data = 0;
@@ -382,18 +609,55 @@
raw[i].planes[VPX_PLANE_U], raw[i].stride[VPX_PLANE_U],
raw[i].planes[VPX_PLANE_V], raw[i].stride[VPX_PLANE_V],
raw[i].d_w, raw[i].d_h, 1);
+ /* Write out down-sampled input. */
+ length_frame = cfg[i].g_w * cfg[i].g_h *3/2;
+ if (fwrite(raw[i].planes[0], 1, length_frame,
+ downsampled_input[NUM_ENCODERS - i - 1]) !=
+ length_frame)
+ {
+ return EXIT_FAILURE;
+ }
}
}
- /* Encode each frame at multi-levels */
- if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
- frame_cnt, 1, flags, arg_deadline))
- die_codec(&codec[0], "Failed to encode frame");
+ /* Set the flags (reference and update) for all the encoders.*/
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ layer_id = cfg[i].ts_layer_id[frame_cnt % cfg[i].ts_periodicity];
+ flags = 0;
+ flag_periodicity = periodicity_to_num_layers
+ [num_temporal_layers[i] - 1];
+ flags = layer_flags[i * VPX_TS_MAX_PERIODICITY +
+ frame_cnt % flag_periodicity];
+ // Key frame flag for first frame.
+ if (frame_cnt == 0)
+ {
+ flags |= VPX_EFLAG_FORCE_KF;
+ }
+ if (frame_cnt > 0 && frame_cnt == key_frame_insert)
+ {
+ flags = VPX_EFLAG_FORCE_KF;
+ }
+ vpx_codec_control(&codec[i], VP8E_SET_FRAME_FLAGS, flags);
+ vpx_codec_control(&codec[i], VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
+ }
+
+ gettimeofday(&tv1, NULL);
+ /* Encode each frame at multi-levels */
+ /* Note the flags must be set to 0 in the encode call if they are set
+ for each frame with the vpx_codec_control(), as done above. */
+ if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
+ frame_cnt, 1, 0, arg_deadline))
+ {
+ die_codec(&codec[0], "Failed to encode frame");
+ }
+ gettimeofday(&tv2, NULL);
+ timersub(&tv2, &tv1, &difftv);
+ cx_time += (double)(difftv.tv_sec * 1000000 + difftv.tv_usec);
for (i=NUM_ENCODERS-1; i>=0 ; i--)
{
got_data = 0;
-
while( (pkt[i] = vpx_codec_get_cx_data(&codec[i], &iter[i])) )
{
got_data = 1;
@@ -412,7 +676,6 @@
psnr_samples_total[i] += pkt[i]->data.psnr.samples[0];
for (j = 0; j < 4; j++)
{
- //fprintf(stderr, "%.3lf ", pkt[i]->data.psnr.psnr[j]);
psnr_totals[i][j] += pkt[i]->data.psnr.psnr[j];
}
psnr_count[i]++;
@@ -423,13 +686,15 @@
break;
}
printf(pkt[i]->kind == VPX_CODEC_CX_FRAME_PKT
- && (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":".");
+ && (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":"");
fflush(stdout);
}
}
frame_cnt++;
}
printf("\n");
+ printf("FPS for encoding %d %f %f \n", frame_cnt, (float)cx_time / 1000000,
+ 1000000 * (double)frame_cnt / (double)cx_time);
fclose(infile);
|
CWE-119
|
va_list ap;
va_start(ap, fmt);
vprintf(fmt, ap);
if(fmt[strlen(fmt)-1] != '\n')
printf("\n");
exit(EXIT_FAILURE);
}
| null |
150,817 |
static void die_codec(vpx_codec_ctx_t *ctx, const char *s) {
const char *detail = vpx_codec_error_detail(ctx);
printf("%s: %s\n", s, vpx_codec_error(ctx));
if(detail)
printf(" %s\n",detail);
exit(EXIT_FAILURE);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static void die_codec(vpx_codec_ctx_t *ctx, const char *s) {
|
@@ -13,22 +13,40 @@
* High-resolution input video is down-sampled to lower-resolutions. The
* encoder then encodes the video and outputs multiple bitstreams with
* different resolutions.
+ *
+ * This test also allows for settings temporal layers for each spatial layer.
+ * Different number of temporal layers per spatial stream may be used.
+ * Currently up to 3 temporal layers per spatial stream (encoder) are supported
+ * in this test.
*/
+
+#include "./vpx_config.h"
+
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <math.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
+#include <assert.h>
+#include <sys/time.h>
+#if USE_POSIX_MMAP
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+#include "vpx_ports/vpx_timer.h"
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "vpx_ports/mem_ops.h"
-#include "./tools_common.h"
+#include "../tools_common.h"
#define interface (vpx_codec_vp8_cx())
#define fourcc 0x30385056
-#define IVF_FILE_HDR_SZ (32)
-#define IVF_FRAME_HDR_SZ (12)
+void usage_exit(void) {
+ exit(EXIT_FAILURE);
+}
/*
* The input video frame is downsampled several times to generate a multi-level
@@ -38,32 +56,18 @@
* bitstreams with resolution of 1280x720(level 0), 640x360(level 1), and
* 320x180(level 2) respectively.
*/
+
+/* Number of encoders (spatial resolutions) used in this test. */
#define NUM_ENCODERS 3
+/* Maximum number of temporal layers allowed for this test. */
+#define MAX_NUM_TEMPORAL_LAYERS 3
+
/* This example uses the scaler function in libyuv. */
#include "third_party/libyuv/include/libyuv/basic_types.h"
#include "third_party/libyuv/include/libyuv/scale.h"
#include "third_party/libyuv/include/libyuv/cpu_id.h"
-static void die(const char *fmt, ...) {
- va_list ap;
-
- va_start(ap, fmt);
- vprintf(fmt, ap);
- if(fmt[strlen(fmt)-1] != '\n')
- printf("\n");
- exit(EXIT_FAILURE);
-}
-
-static void die_codec(vpx_codec_ctx_t *ctx, const char *s) {
- const char *detail = vpx_codec_error_detail(ctx);
-
- printf("%s: %s\n", s, vpx_codec_error(ctx));
- if(detail)
- printf(" %s\n",detail);
- exit(EXIT_FAILURE);
-}
-
int (*read_frame_p)(FILE *f, vpx_image_t *img);
static int read_frame(FILE *f, vpx_image_t *img) {
@@ -170,21 +174,172 @@
(void) fwrite(header, 1, 12, outfile);
}
+/* Temporal scaling parameters */
+/* This sets all the temporal layer parameters given |num_temporal_layers|,
+ * including the target bit allocation across temporal layers. Bit allocation
+ * parameters will be passed in as user parameters in another version.
+ */
+static void set_temporal_layer_pattern(int num_temporal_layers,
+ vpx_codec_enc_cfg_t *cfg,
+ int bitrate,
+ int *layer_flags)
+{
+ assert(num_temporal_layers <= MAX_NUM_TEMPORAL_LAYERS);
+ switch (num_temporal_layers)
+ {
+ case 1:
+ {
+ /* 1-layer */
+ cfg->ts_number_layers = 1;
+ cfg->ts_periodicity = 1;
+ cfg->ts_rate_decimator[0] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_target_bitrate[0] = bitrate;
+
+ // Update L only.
+ layer_flags[0] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+ break;
+ }
+
+ case 2:
+ {
+ /* 2-layers, with sync point at first frame of layer 1. */
+ cfg->ts_number_layers = 2;
+ cfg->ts_periodicity = 2;
+ cfg->ts_rate_decimator[0] = 2;
+ cfg->ts_rate_decimator[1] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_layer_id[1] = 1;
+ // Use 60/40 bit allocation as example.
+ cfg->ts_target_bitrate[0] = 0.6f * bitrate;
+ cfg->ts_target_bitrate[1] = bitrate;
+
+ /* 0=L, 1=GF */
+ // ARF is used as predictor for all frames, and is only updated on
+ // key frame. Sync point every 8 frames.
+
+ // Layer 0: predict from L and ARF, update L and G.
+ layer_flags[0] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 1: sync point: predict from L and ARF, and update G.
+ layer_flags[1] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 0, predict from L and ARF, update L.
+ layer_flags[2] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 1: predict from L, G and ARF, and update G.
+ layer_flags[3] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 0
+ layer_flags[4] = layer_flags[2];
+
+ // Layer 1
+ layer_flags[5] = layer_flags[3];
+
+ // Layer 0
+ layer_flags[6] = layer_flags[4];
+
+ // Layer 1
+ layer_flags[7] = layer_flags[5];
+ break;
+ }
+
+ case 3:
+ default:
+ {
+ // 3-layers structure where ARF is used as predictor for all frames,
+ // and is only updated on key frame.
+ // Sync points for layer 1 and 2 every 8 frames.
+ cfg->ts_number_layers = 3;
+ cfg->ts_periodicity = 4;
+ cfg->ts_rate_decimator[0] = 4;
+ cfg->ts_rate_decimator[1] = 2;
+ cfg->ts_rate_decimator[2] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_layer_id[1] = 2;
+ cfg->ts_layer_id[2] = 1;
+ cfg->ts_layer_id[3] = 2;
+ // Use 40/20/40 bit allocation as example.
+ cfg->ts_target_bitrate[0] = 0.4f * bitrate;
+ cfg->ts_target_bitrate[1] = 0.6f * bitrate;
+ cfg->ts_target_bitrate[2] = bitrate;
+
+ /* 0=L, 1=GF, 2=ARF */
+
+ // Layer 0: predict from L and ARF; update L and G.
+ layer_flags[0] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF;
+
+ // Layer 2: sync point: predict from L and ARF; update none.
+ layer_flags[1] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 1: sync point: predict from L and ARF; update G.
+ layer_flags[2] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[3] = VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 0: predict from L and ARF; update L.
+ layer_flags[4] = VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[5] = layer_flags[3];
+
+ // Layer 1: predict from L, G, ARF; update G.
+ layer_flags[6] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[7] = layer_flags[3];
+ break;
+ }
+ }
+}
+
+/* The periodicity of the pattern given the number of temporal layers. */
+static int periodicity_to_num_layers[MAX_NUM_TEMPORAL_LAYERS] = {1, 8, 8};
+
int main(int argc, char **argv)
{
- FILE *infile, *outfile[NUM_ENCODERS];
+ FILE *infile, *outfile[NUM_ENCODERS];
+ FILE *downsampled_input[NUM_ENCODERS - 1];
+ char filename[50];
vpx_codec_ctx_t codec[NUM_ENCODERS];
vpx_codec_enc_cfg_t cfg[NUM_ENCODERS];
- vpx_codec_pts_t frame_cnt = 0;
+ int frame_cnt = 0;
vpx_image_t raw[NUM_ENCODERS];
vpx_codec_err_t res[NUM_ENCODERS];
int i;
long width;
long height;
+ int length_frame;
int frame_avail;
int got_data;
int flags = 0;
+ int layer_id = 0;
+
+ int layer_flags[VPX_TS_MAX_PERIODICITY * NUM_ENCODERS]
+ = {0};
+ int flag_periodicity;
/*Currently, only realtime mode is supported in multi-resolution encoding.*/
int arg_deadline = VPX_DL_REALTIME;
@@ -193,39 +348,51 @@
don't need to know PSNR, which will skip PSNR calculation and save
encoding time. */
int show_psnr = 0;
+ int key_frame_insert = 0;
uint64_t psnr_sse_total[NUM_ENCODERS] = {0};
uint64_t psnr_samples_total[NUM_ENCODERS] = {0};
double psnr_totals[NUM_ENCODERS][4] = {{0,0}};
int psnr_count[NUM_ENCODERS] = {0};
+ double cx_time = 0;
+ struct timeval tv1, tv2, difftv;
+
/* Set the required target bitrates for each resolution level.
* If target bitrate for highest-resolution level is set to 0,
* (i.e. target_bitrate[0]=0), we skip encoding at that level.
*/
unsigned int target_bitrate[NUM_ENCODERS]={1000, 500, 100};
+
/* Enter the frame rate of the input video */
int framerate = 30;
+
/* Set down-sampling factor for each resolution level.
dsf[0] controls down sampling from level 0 to level 1;
dsf[1] controls down sampling from level 1 to level 2;
dsf[2] is not used. */
vpx_rational_t dsf[NUM_ENCODERS] = {{2, 1}, {2, 1}, {1, 1}};
- if(argc!= (5+NUM_ENCODERS))
- die("Usage: %s <width> <height> <infile> <outfile(s)> <output psnr?>\n",
+ /* Set the number of temporal layers for each encoder/resolution level,
+ * starting from highest resoln down to lowest resoln. */
+ unsigned int num_temporal_layers[NUM_ENCODERS] = {3, 3, 3};
+
+ if(argc!= (7 + 3 * NUM_ENCODERS))
+ die("Usage: %s <width> <height> <frame_rate> <infile> <outfile(s)> "
+ "<rate_encoder(s)> <temporal_layer(s)> <key_frame_insert> <output psnr?> \n",
argv[0]);
printf("Using %s\n",vpx_codec_iface_name(interface));
width = strtol(argv[1], NULL, 0);
height = strtol(argv[2], NULL, 0);
+ framerate = strtol(argv[3], NULL, 0);
if(width < 16 || width%2 || height <16 || height%2)
die("Invalid resolution: %ldx%ld", width, height);
/* Open input video file for encoding */
- if(!(infile = fopen(argv[3], "rb")))
- die("Failed to open %s for reading", argv[3]);
+ if(!(infile = fopen(argv[4], "rb")))
+ die("Failed to open %s for reading", argv[4]);
/* Open output file for each encoder to output bitstreams */
for (i=0; i< NUM_ENCODERS; i++)
@@ -236,11 +403,40 @@
continue;
}
- if(!(outfile[i] = fopen(argv[i+4], "wb")))
+ if(!(outfile[i] = fopen(argv[i+5], "wb")))
die("Failed to open %s for writing", argv[i+4]);
}
- show_psnr = strtol(argv[NUM_ENCODERS + 4], NULL, 0);
+ // Bitrates per spatial layer: overwrite default rates above.
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ target_bitrate[i] = strtol(argv[NUM_ENCODERS + 5 + i], NULL, 0);
+ }
+
+ // Temporal layers per spatial layers: overwrite default settings above.
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ num_temporal_layers[i] = strtol(argv[2 * NUM_ENCODERS + 5 + i], NULL, 0);
+ if (num_temporal_layers[i] < 1 || num_temporal_layers[i] > 3)
+ die("Invalid temporal layers: %d, Must be 1, 2, or 3. \n",
+ num_temporal_layers);
+ }
+
+ /* Open file to write out each spatially downsampled input stream. */
+ for (i=0; i< NUM_ENCODERS - 1; i++)
+ {
+ // Highest resoln is encoder 0.
+ if (sprintf(filename,"ds%d.yuv",NUM_ENCODERS - i) < 0)
+ {
+ return EXIT_FAILURE;
+ }
+ downsampled_input[i] = fopen(filename,"wb");
+ }
+
+ key_frame_insert = strtol(argv[3 * NUM_ENCODERS + 5], NULL, 0);
+
+ show_psnr = strtol(argv[3 * NUM_ENCODERS + 6], NULL, 0);
+
/* Populate default encoder configuration */
for (i=0; i< NUM_ENCODERS; i++)
@@ -258,14 +454,13 @@
/* Highest-resolution encoder settings */
cfg[0].g_w = width;
cfg[0].g_h = height;
- cfg[0].g_threads = 1; /* number of threads used */
- cfg[0].rc_dropframe_thresh = 30;
+ cfg[0].rc_dropframe_thresh = 0;
cfg[0].rc_end_usage = VPX_CBR;
cfg[0].rc_resize_allowed = 0;
- cfg[0].rc_min_quantizer = 4;
+ cfg[0].rc_min_quantizer = 2;
cfg[0].rc_max_quantizer = 56;
- cfg[0].rc_undershoot_pct = 98;
- cfg[0].rc_overshoot_pct = 100;
+ cfg[0].rc_undershoot_pct = 100;
+ cfg[0].rc_overshoot_pct = 15;
cfg[0].rc_buf_initial_sz = 500;
cfg[0].rc_buf_optimal_sz = 600;
cfg[0].rc_buf_sz = 1000;
@@ -276,7 +471,6 @@
/* Note: These 3 settings are copied to all levels. But, except the lowest
* resolution level, all other levels are set to VPX_KF_DISABLED internally.
*/
- //cfg[0].kf_mode = VPX_KF_DISABLED;
cfg[0].kf_mode = VPX_KF_AUTO;
cfg[0].kf_min_dist = 3000;
cfg[0].kf_max_dist = 3000;
@@ -290,7 +484,6 @@
{
memcpy(&cfg[i], &cfg[0], sizeof(vpx_codec_enc_cfg_t));
- cfg[i].g_threads = 1; /* number of threads used */
cfg[i].rc_target_bitrate = target_bitrate[i];
/* Note: Width & height of other-resolution encoders are calculated
@@ -310,6 +503,13 @@
if((cfg[i].g_h)%2)cfg[i].g_h++;
}
+
+ // Set the number of threads per encode/spatial layer.
+ // (1, 1, 1) means no encoder threading.
+ cfg[0].g_threads = 2;
+ cfg[1].g_threads = 1;
+ cfg[2].g_threads = 1;
+
/* Allocate image for each encoder */
for (i=0; i< NUM_ENCODERS; i++)
if(!vpx_img_alloc(&raw[i], VPX_IMG_FMT_I420, cfg[i].g_w, cfg[i].g_h, 32))
@@ -324,6 +524,15 @@
if(outfile[i])
write_ivf_file_header(outfile[i], &cfg[i], 0);
+ /* Temporal layers settings */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ set_temporal_layer_pattern(num_temporal_layers[i],
+ &cfg[i],
+ cfg[i].rc_target_bitrate,
+ &layer_flags[i * VPX_TS_MAX_PERIODICITY]);
+ }
+
/* Initialize multi-encoder */
if(vpx_codec_enc_init_multi(&codec[0], interface, &cfg[0], NUM_ENCODERS,
(show_psnr ? VPX_CODEC_USE_PSNR : 0), &dsf[0]))
@@ -334,15 +543,16 @@
for ( i=0; i<NUM_ENCODERS; i++)
{
int speed = -6;
+ /* Lower speed for the lowest resolution. */
+ if (i == NUM_ENCODERS - 1) speed = -4;
if(vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, speed))
die_codec(&codec[i], "Failed to set cpu_used");
}
- /* Set static threshold. */
+ /* Set static threshold = 1 for all encoders */
for ( i=0; i<NUM_ENCODERS; i++)
{
- unsigned int static_thresh = 1;
- if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, static_thresh))
+ if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, 1))
die_codec(&codec[i], "Failed to set static threshold");
}
@@ -356,6 +566,23 @@
die_codec(&codec[i], "Failed to set noise_sensitivity");
}
+ /* Set the number of token partitions */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ if(vpx_codec_control(&codec[i], VP8E_SET_TOKEN_PARTITIONS, 1))
+ die_codec(&codec[i], "Failed to set static threshold");
+ }
+
+ /* Set the max intra target bitrate */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ unsigned int max_intra_size_pct =
+ (int)(((double)cfg[0].rc_buf_optimal_sz * 0.5) * framerate / 10);
+ if(vpx_codec_control(&codec[i], VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ max_intra_size_pct))
+ die_codec(&codec[i], "Failed to set static threshold");
+ //printf("%d %d \n",i,max_intra_size_pct);
+ }
frame_avail = 1;
got_data = 0;
@@ -382,18 +609,55 @@
raw[i].planes[VPX_PLANE_U], raw[i].stride[VPX_PLANE_U],
raw[i].planes[VPX_PLANE_V], raw[i].stride[VPX_PLANE_V],
raw[i].d_w, raw[i].d_h, 1);
+ /* Write out down-sampled input. */
+ length_frame = cfg[i].g_w * cfg[i].g_h *3/2;
+ if (fwrite(raw[i].planes[0], 1, length_frame,
+ downsampled_input[NUM_ENCODERS - i - 1]) !=
+ length_frame)
+ {
+ return EXIT_FAILURE;
+ }
}
}
- /* Encode each frame at multi-levels */
- if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
- frame_cnt, 1, flags, arg_deadline))
- die_codec(&codec[0], "Failed to encode frame");
+ /* Set the flags (reference and update) for all the encoders.*/
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ layer_id = cfg[i].ts_layer_id[frame_cnt % cfg[i].ts_periodicity];
+ flags = 0;
+ flag_periodicity = periodicity_to_num_layers
+ [num_temporal_layers[i] - 1];
+ flags = layer_flags[i * VPX_TS_MAX_PERIODICITY +
+ frame_cnt % flag_periodicity];
+ // Key frame flag for first frame.
+ if (frame_cnt == 0)
+ {
+ flags |= VPX_EFLAG_FORCE_KF;
+ }
+ if (frame_cnt > 0 && frame_cnt == key_frame_insert)
+ {
+ flags = VPX_EFLAG_FORCE_KF;
+ }
+ vpx_codec_control(&codec[i], VP8E_SET_FRAME_FLAGS, flags);
+ vpx_codec_control(&codec[i], VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
+ }
+
+ gettimeofday(&tv1, NULL);
+ /* Encode each frame at multi-levels */
+ /* Note the flags must be set to 0 in the encode call if they are set
+ for each frame with the vpx_codec_control(), as done above. */
+ if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
+ frame_cnt, 1, 0, arg_deadline))
+ {
+ die_codec(&codec[0], "Failed to encode frame");
+ }
+ gettimeofday(&tv2, NULL);
+ timersub(&tv2, &tv1, &difftv);
+ cx_time += (double)(difftv.tv_sec * 1000000 + difftv.tv_usec);
for (i=NUM_ENCODERS-1; i>=0 ; i--)
{
got_data = 0;
-
while( (pkt[i] = vpx_codec_get_cx_data(&codec[i], &iter[i])) )
{
got_data = 1;
@@ -412,7 +676,6 @@
psnr_samples_total[i] += pkt[i]->data.psnr.samples[0];
for (j = 0; j < 4; j++)
{
- //fprintf(stderr, "%.3lf ", pkt[i]->data.psnr.psnr[j]);
psnr_totals[i][j] += pkt[i]->data.psnr.psnr[j];
}
psnr_count[i]++;
@@ -423,13 +686,15 @@
break;
}
printf(pkt[i]->kind == VPX_CODEC_CX_FRAME_PKT
- && (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":".");
+ && (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":"");
fflush(stdout);
}
}
frame_cnt++;
}
printf("\n");
+ printf("FPS for encoding %d %f %f \n", frame_cnt, (float)cx_time / 1000000,
+ 1000000 * (double)frame_cnt / (double)cx_time);
fclose(infile);
|
CWE-119
|
const char *detail = vpx_codec_error_detail(ctx);
printf("%s: %s\n", s, vpx_codec_error(ctx));
if(detail)
printf(" %s\n",detail);
exit(EXIT_FAILURE);
}
| null |
150,818 |
int main(int argc, char **argv)
{
FILE *infile, *outfile[NUM_ENCODERS];
vpx_codec_ctx_t codec[NUM_ENCODERS];
vpx_codec_enc_cfg_t cfg[NUM_ENCODERS];
vpx_codec_pts_t frame_cnt = 0;
vpx_image_t raw[NUM_ENCODERS];
vpx_codec_err_t res[NUM_ENCODERS];
int i;
long width;
long height;
int frame_avail;
int got_data;
int flags = 0;
/*Currently, only realtime mode is supported in multi-resolution encoding.*/
int arg_deadline = VPX_DL_REALTIME;
/* Set show_psnr to 1/0 to show/not show PSNR. Choose show_psnr=0 if you
don't need to know PSNR, which will skip PSNR calculation and save
encoding time. */
int show_psnr = 0;
uint64_t psnr_sse_total[NUM_ENCODERS] = {0};
uint64_t psnr_samples_total[NUM_ENCODERS] = {0};
double psnr_totals[NUM_ENCODERS][4] = {{0,0}};
int psnr_count[NUM_ENCODERS] = {0};
/* Set the required target bitrates for each resolution level.
* If target bitrate for highest-resolution level is set to 0,
* (i.e. target_bitrate[0]=0), we skip encoding at that level.
*/
unsigned int target_bitrate[NUM_ENCODERS]={1000, 500, 100};
/* Enter the frame rate of the input video */
int framerate = 30;
/* Set down-sampling factor for each resolution level.
dsf[0] controls down sampling from level 0 to level 1;
dsf[1] controls down sampling from level 1 to level 2;
dsf[2] is not used. */
vpx_rational_t dsf[NUM_ENCODERS] = {{2, 1}, {2, 1}, {1, 1}};
if(argc!= (5+NUM_ENCODERS))
die("Usage: %s <width> <height> <infile> <outfile(s)> <output psnr?>\n",
argv[0]);
printf("Using %s\n",vpx_codec_iface_name(interface));
width = strtol(argv[1], NULL, 0);
height = strtol(argv[2], NULL, 0);
if(width < 16 || width%2 || height <16 || height%2)
die("Invalid resolution: %ldx%ld", width, height);
/* Open input video file for encoding */
if(!(infile = fopen(argv[3], "rb")))
die("Failed to open %s for reading", argv[3]);
/* Open output file for each encoder to output bitstreams */
for (i=0; i< NUM_ENCODERS; i++)
{
if(!target_bitrate[i])
{
outfile[i] = NULL;
continue;
}
if(!(outfile[i] = fopen(argv[i+4], "wb")))
die("Failed to open %s for writing", argv[i+4]);
}
show_psnr = strtol(argv[NUM_ENCODERS + 4], NULL, 0);
/* Populate default encoder configuration */
for (i=0; i< NUM_ENCODERS; i++)
{
res[i] = vpx_codec_enc_config_default(interface, &cfg[i], 0);
if(res[i]) {
printf("Failed to get config: %s\n", vpx_codec_err_to_string(res[i]));
return EXIT_FAILURE;
}
}
/*
* Update the default configuration according to needs of the application.
*/
/* Highest-resolution encoder settings */
cfg[0].g_w = width;
cfg[0].g_h = height;
cfg[0].g_threads = 1; /* number of threads used */
cfg[0].rc_dropframe_thresh = 30;
cfg[0].rc_end_usage = VPX_CBR;
cfg[0].rc_resize_allowed = 0;
cfg[0].rc_min_quantizer = 4;
cfg[0].rc_max_quantizer = 56;
cfg[0].rc_undershoot_pct = 98;
cfg[0].rc_overshoot_pct = 100;
cfg[0].rc_buf_initial_sz = 500;
cfg[0].rc_buf_optimal_sz = 600;
cfg[0].rc_buf_sz = 1000;
cfg[0].g_error_resilient = 1; /* Enable error resilient mode */
cfg[0].g_lag_in_frames = 0;
/* Disable automatic keyframe placement */
/* Note: These 3 settings are copied to all levels. But, except the lowest
* resolution level, all other levels are set to VPX_KF_DISABLED internally.
*/
cfg[0].kf_min_dist = 3000;
cfg[0].kf_max_dist = 3000;
cfg[0].rc_target_bitrate = target_bitrate[0]; /* Set target bitrate */
cfg[0].g_timebase.num = 1; /* Set fps */
cfg[0].g_timebase.den = framerate;
/* Other-resolution encoder settings */
for (i=1; i< NUM_ENCODERS; i++)
{
memcpy(&cfg[i], &cfg[0], sizeof(vpx_codec_enc_cfg_t));
cfg[i].g_threads = 1; /* number of threads used */
cfg[i].rc_target_bitrate = target_bitrate[i];
/* Note: Width & height of other-resolution encoders are calculated
* from the highest-resolution encoder's size and the corresponding
* down_sampling_factor.
*/
{
unsigned int iw = cfg[i-1].g_w*dsf[i-1].den + dsf[i-1].num - 1;
unsigned int ih = cfg[i-1].g_h*dsf[i-1].den + dsf[i-1].num - 1;
cfg[i].g_w = iw/dsf[i-1].num;
cfg[i].g_h = ih/dsf[i-1].num;
}
/* Make width & height to be multiplier of 2. */
if((cfg[i].g_w)%2)cfg[i].g_w++;
if((cfg[i].g_h)%2)cfg[i].g_h++;
}
/* Allocate image for each encoder */
for (i=0; i< NUM_ENCODERS; i++)
if(!vpx_img_alloc(&raw[i], VPX_IMG_FMT_I420, cfg[i].g_w, cfg[i].g_h, 32))
die("Failed to allocate image", cfg[i].g_w, cfg[i].g_h);
if (raw[0].stride[VPX_PLANE_Y] == raw[0].d_w)
read_frame_p = read_frame;
else
read_frame_p = read_frame_by_row;
for (i=0; i< NUM_ENCODERS; i++)
if(outfile[i])
write_ivf_file_header(outfile[i], &cfg[i], 0);
/* Initialize multi-encoder */
if(vpx_codec_enc_init_multi(&codec[0], interface, &cfg[0], NUM_ENCODERS,
(show_psnr ? VPX_CODEC_USE_PSNR : 0), &dsf[0]))
die_codec(&codec[0], "Failed to initialize encoder");
/* The extra encoding configuration parameters can be set as follows. */
/* Set encoding speed */
for ( i=0; i<NUM_ENCODERS; i++)
{
int speed = -6;
if(vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, speed))
die_codec(&codec[i], "Failed to set cpu_used");
}
/* Set static threshold. */
for ( i=0; i<NUM_ENCODERS; i++)
{
unsigned int static_thresh = 1;
if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, static_thresh))
die_codec(&codec[i], "Failed to set static threshold");
}
/* Set NOISE_SENSITIVITY to do TEMPORAL_DENOISING */
/* Enable denoising for the highest-resolution encoder. */
if(vpx_codec_control(&codec[0], VP8E_SET_NOISE_SENSITIVITY, 1))
die_codec(&codec[0], "Failed to set noise_sensitivity");
for ( i=1; i< NUM_ENCODERS; i++)
{
if(vpx_codec_control(&codec[i], VP8E_SET_NOISE_SENSITIVITY, 0))
die_codec(&codec[i], "Failed to set noise_sensitivity");
}
frame_avail = 1;
got_data = 0;
while(frame_avail || got_data)
{
vpx_codec_iter_t iter[NUM_ENCODERS]={NULL};
const vpx_codec_cx_pkt_t *pkt[NUM_ENCODERS];
flags = 0;
frame_avail = read_frame_p(infile, &raw[0]);
if(frame_avail)
{
for ( i=1; i<NUM_ENCODERS; i++)
{
/*Scale the image down a number of times by downsampling factor*/
/* FilterMode 1 or 2 give better psnr than FilterMode 0. */
I420Scale(raw[i-1].planes[VPX_PLANE_Y], raw[i-1].stride[VPX_PLANE_Y],
raw[i-1].planes[VPX_PLANE_U], raw[i-1].stride[VPX_PLANE_U],
raw[i-1].planes[VPX_PLANE_V], raw[i-1].stride[VPX_PLANE_V],
raw[i-1].d_w, raw[i-1].d_h,
raw[i].planes[VPX_PLANE_Y], raw[i].stride[VPX_PLANE_Y],
raw[i].planes[VPX_PLANE_U], raw[i].stride[VPX_PLANE_U],
raw[i].planes[VPX_PLANE_V], raw[i].stride[VPX_PLANE_V],
raw[i].d_w, raw[i].d_h, 1);
}
}
/* Encode each frame at multi-levels */
if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
frame_cnt, 1, flags, arg_deadline))
die_codec(&codec[0], "Failed to encode frame");
for (i=NUM_ENCODERS-1; i>=0 ; i--)
{
got_data = 0;
while( (pkt[i] = vpx_codec_get_cx_data(&codec[i], &iter[i])) )
{
got_data = 1;
switch(pkt[i]->kind) {
case VPX_CODEC_CX_FRAME_PKT:
write_ivf_frame_header(outfile[i], pkt[i]);
(void) fwrite(pkt[i]->data.frame.buf, 1,
pkt[i]->data.frame.sz, outfile[i]);
break;
case VPX_CODEC_PSNR_PKT:
if (show_psnr)
{
int j;
psnr_sse_total[i] += pkt[i]->data.psnr.sse[0];
psnr_samples_total[i] += pkt[i]->data.psnr.samples[0];
for (j = 0; j < 4; j++)
{
}
psnr_count[i]++;
}
break;
default:
break;
}
printf(pkt[i]->kind == VPX_CODEC_CX_FRAME_PKT
&& (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":".");
fflush(stdout);
}
}
frame_cnt++;
}
printf("\n");
fclose(infile);
printf("Processed %ld frames.\n",(long int)frame_cnt-1);
for (i=0; i< NUM_ENCODERS; i++)
{
/* Calculate PSNR and print it out */
if ( (show_psnr) && (psnr_count[i]>0) )
{
int j;
double ovpsnr = sse_to_psnr(psnr_samples_total[i], 255.0,
psnr_sse_total[i]);
fprintf(stderr, "\n ENC%d PSNR (Overall/Avg/Y/U/V)", i);
fprintf(stderr, " %.3lf", ovpsnr);
for (j = 0; j < 4; j++)
{
fprintf(stderr, " %.3lf", psnr_totals[i][j]/psnr_count[i]);
}
}
if(vpx_codec_destroy(&codec[i]))
die_codec(&codec[i], "Failed to destroy codec");
vpx_img_free(&raw[i]);
if(!outfile[i])
continue;
/* Try to rewrite the file header with the actual frame count */
if(!fseek(outfile[i], 0, SEEK_SET))
write_ivf_file_header(outfile[i], &cfg[i], frame_cnt-1);
fclose(outfile[i]);
}
printf("\n");
return EXIT_SUCCESS;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv)
{
FILE *infile, *outfile[NUM_ENCODERS];
FILE *downsampled_input[NUM_ENCODERS - 1];
char filename[50];
vpx_codec_ctx_t codec[NUM_ENCODERS];
vpx_codec_enc_cfg_t cfg[NUM_ENCODERS];
int frame_cnt = 0;
vpx_image_t raw[NUM_ENCODERS];
vpx_codec_err_t res[NUM_ENCODERS];
int i;
long width;
long height;
int length_frame;
int frame_avail;
int got_data;
int flags = 0;
int layer_id = 0;
int layer_flags[VPX_TS_MAX_PERIODICITY * NUM_ENCODERS]
= {0};
int flag_periodicity;
/*Currently, only realtime mode is supported in multi-resolution encoding.*/
int arg_deadline = VPX_DL_REALTIME;
/* Set show_psnr to 1/0 to show/not show PSNR. Choose show_psnr=0 if you
don't need to know PSNR, which will skip PSNR calculation and save
encoding time. */
int show_psnr = 0;
int key_frame_insert = 0;
uint64_t psnr_sse_total[NUM_ENCODERS] = {0};
uint64_t psnr_samples_total[NUM_ENCODERS] = {0};
double psnr_totals[NUM_ENCODERS][4] = {{0,0}};
int psnr_count[NUM_ENCODERS] = {0};
double cx_time = 0;
struct timeval tv1, tv2, difftv;
/* Set the required target bitrates for each resolution level.
* If target bitrate for highest-resolution level is set to 0,
* (i.e. target_bitrate[0]=0), we skip encoding at that level.
*/
unsigned int target_bitrate[NUM_ENCODERS]={1000, 500, 100};
/* Enter the frame rate of the input video */
int framerate = 30;
/* Set down-sampling factor for each resolution level.
dsf[0] controls down sampling from level 0 to level 1;
dsf[1] controls down sampling from level 1 to level 2;
dsf[2] is not used. */
vpx_rational_t dsf[NUM_ENCODERS] = {{2, 1}, {2, 1}, {1, 1}};
/* Set the number of temporal layers for each encoder/resolution level,
* starting from highest resoln down to lowest resoln. */
unsigned int num_temporal_layers[NUM_ENCODERS] = {3, 3, 3};
if(argc!= (7 + 3 * NUM_ENCODERS))
die("Usage: %s <width> <height> <frame_rate> <infile> <outfile(s)> "
"<rate_encoder(s)> <temporal_layer(s)> <key_frame_insert> <output psnr?> \n",
argv[0]);
printf("Using %s\n",vpx_codec_iface_name(interface));
width = strtol(argv[1], NULL, 0);
height = strtol(argv[2], NULL, 0);
framerate = strtol(argv[3], NULL, 0);
if(width < 16 || width%2 || height <16 || height%2)
die("Invalid resolution: %ldx%ld", width, height);
/* Open input video file for encoding */
if(!(infile = fopen(argv[4], "rb")))
die("Failed to open %s for reading", argv[4]);
/* Open output file for each encoder to output bitstreams */
for (i=0; i< NUM_ENCODERS; i++)
{
if(!target_bitrate[i])
{
outfile[i] = NULL;
continue;
}
if(!(outfile[i] = fopen(argv[i+5], "wb")))
die("Failed to open %s for writing", argv[i+4]);
}
// Bitrates per spatial layer: overwrite default rates above.
for (i=0; i< NUM_ENCODERS; i++)
{
target_bitrate[i] = strtol(argv[NUM_ENCODERS + 5 + i], NULL, 0);
}
// Temporal layers per spatial layers: overwrite default settings above.
for (i=0; i< NUM_ENCODERS; i++)
{
num_temporal_layers[i] = strtol(argv[2 * NUM_ENCODERS + 5 + i], NULL, 0);
if (num_temporal_layers[i] < 1 || num_temporal_layers[i] > 3)
die("Invalid temporal layers: %d, Must be 1, 2, or 3. \n",
num_temporal_layers);
}
/* Open file to write out each spatially downsampled input stream. */
for (i=0; i< NUM_ENCODERS - 1; i++)
{
// Highest resoln is encoder 0.
if (sprintf(filename,"ds%d.yuv",NUM_ENCODERS - i) < 0)
{
return EXIT_FAILURE;
}
downsampled_input[i] = fopen(filename,"wb");
}
key_frame_insert = strtol(argv[3 * NUM_ENCODERS + 5], NULL, 0);
show_psnr = strtol(argv[3 * NUM_ENCODERS + 6], NULL, 0);
/* Populate default encoder configuration */
for (i=0; i< NUM_ENCODERS; i++)
{
res[i] = vpx_codec_enc_config_default(interface, &cfg[i], 0);
if(res[i]) {
printf("Failed to get config: %s\n", vpx_codec_err_to_string(res[i]));
return EXIT_FAILURE;
}
}
/*
* Update the default configuration according to needs of the application.
*/
/* Highest-resolution encoder settings */
cfg[0].g_w = width;
cfg[0].g_h = height;
cfg[0].rc_dropframe_thresh = 0;
cfg[0].rc_end_usage = VPX_CBR;
cfg[0].rc_resize_allowed = 0;
cfg[0].rc_min_quantizer = 2;
cfg[0].rc_max_quantizer = 56;
cfg[0].rc_undershoot_pct = 100;
cfg[0].rc_overshoot_pct = 15;
cfg[0].rc_buf_initial_sz = 500;
cfg[0].rc_buf_optimal_sz = 600;
cfg[0].rc_buf_sz = 1000;
cfg[0].g_error_resilient = 1; /* Enable error resilient mode */
cfg[0].g_lag_in_frames = 0;
/* Disable automatic keyframe placement */
/* Note: These 3 settings are copied to all levels. But, except the lowest
* resolution level, all other levels are set to VPX_KF_DISABLED internally.
*/
cfg[0].kf_min_dist = 3000;
cfg[0].kf_max_dist = 3000;
cfg[0].rc_target_bitrate = target_bitrate[0]; /* Set target bitrate */
cfg[0].g_timebase.num = 1; /* Set fps */
cfg[0].g_timebase.den = framerate;
/* Other-resolution encoder settings */
for (i=1; i< NUM_ENCODERS; i++)
{
memcpy(&cfg[i], &cfg[0], sizeof(vpx_codec_enc_cfg_t));
cfg[i].rc_target_bitrate = target_bitrate[i];
/* Note: Width & height of other-resolution encoders are calculated
* from the highest-resolution encoder's size and the corresponding
* down_sampling_factor.
*/
{
unsigned int iw = cfg[i-1].g_w*dsf[i-1].den + dsf[i-1].num - 1;
unsigned int ih = cfg[i-1].g_h*dsf[i-1].den + dsf[i-1].num - 1;
cfg[i].g_w = iw/dsf[i-1].num;
cfg[i].g_h = ih/dsf[i-1].num;
}
/* Make width & height to be multiplier of 2. */
if((cfg[i].g_w)%2)cfg[i].g_w++;
if((cfg[i].g_h)%2)cfg[i].g_h++;
}
// Set the number of threads per encode/spatial layer.
// (1, 1, 1) means no encoder threading.
cfg[0].g_threads = 2;
cfg[1].g_threads = 1;
cfg[2].g_threads = 1;
/* Allocate image for each encoder */
for (i=0; i< NUM_ENCODERS; i++)
if(!vpx_img_alloc(&raw[i], VPX_IMG_FMT_I420, cfg[i].g_w, cfg[i].g_h, 32))
die("Failed to allocate image", cfg[i].g_w, cfg[i].g_h);
if (raw[0].stride[VPX_PLANE_Y] == raw[0].d_w)
read_frame_p = read_frame;
else
read_frame_p = read_frame_by_row;
for (i=0; i< NUM_ENCODERS; i++)
if(outfile[i])
write_ivf_file_header(outfile[i], &cfg[i], 0);
/* Temporal layers settings */
for ( i=0; i<NUM_ENCODERS; i++)
{
set_temporal_layer_pattern(num_temporal_layers[i],
&cfg[i],
cfg[i].rc_target_bitrate,
&layer_flags[i * VPX_TS_MAX_PERIODICITY]);
}
/* Initialize multi-encoder */
if(vpx_codec_enc_init_multi(&codec[0], interface, &cfg[0], NUM_ENCODERS,
(show_psnr ? VPX_CODEC_USE_PSNR : 0), &dsf[0]))
die_codec(&codec[0], "Failed to initialize encoder");
/* The extra encoding configuration parameters can be set as follows. */
/* Set encoding speed */
for ( i=0; i<NUM_ENCODERS; i++)
{
int speed = -6;
/* Lower speed for the lowest resolution. */
if (i == NUM_ENCODERS - 1) speed = -4;
if(vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, speed))
die_codec(&codec[i], "Failed to set cpu_used");
}
/* Set static threshold = 1 for all encoders */
for ( i=0; i<NUM_ENCODERS; i++)
{
if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, 1))
die_codec(&codec[i], "Failed to set static threshold");
}
/* Set NOISE_SENSITIVITY to do TEMPORAL_DENOISING */
/* Enable denoising for the highest-resolution encoder. */
if(vpx_codec_control(&codec[0], VP8E_SET_NOISE_SENSITIVITY, 1))
die_codec(&codec[0], "Failed to set noise_sensitivity");
for ( i=1; i< NUM_ENCODERS; i++)
{
if(vpx_codec_control(&codec[i], VP8E_SET_NOISE_SENSITIVITY, 0))
die_codec(&codec[i], "Failed to set noise_sensitivity");
}
/* Set the number of token partitions */
for ( i=0; i<NUM_ENCODERS; i++)
{
if(vpx_codec_control(&codec[i], VP8E_SET_TOKEN_PARTITIONS, 1))
die_codec(&codec[i], "Failed to set static threshold");
}
/* Set the max intra target bitrate */
for ( i=0; i<NUM_ENCODERS; i++)
{
unsigned int max_intra_size_pct =
(int)(((double)cfg[0].rc_buf_optimal_sz * 0.5) * framerate / 10);
if(vpx_codec_control(&codec[i], VP8E_SET_MAX_INTRA_BITRATE_PCT,
max_intra_size_pct))
die_codec(&codec[i], "Failed to set static threshold");
//printf("%d %d \n",i,max_intra_size_pct);
}
frame_avail = 1;
got_data = 0;
while(frame_avail || got_data)
{
vpx_codec_iter_t iter[NUM_ENCODERS]={NULL};
const vpx_codec_cx_pkt_t *pkt[NUM_ENCODERS];
flags = 0;
frame_avail = read_frame_p(infile, &raw[0]);
if(frame_avail)
{
for ( i=1; i<NUM_ENCODERS; i++)
{
/*Scale the image down a number of times by downsampling factor*/
/* FilterMode 1 or 2 give better psnr than FilterMode 0. */
I420Scale(raw[i-1].planes[VPX_PLANE_Y], raw[i-1].stride[VPX_PLANE_Y],
raw[i-1].planes[VPX_PLANE_U], raw[i-1].stride[VPX_PLANE_U],
raw[i-1].planes[VPX_PLANE_V], raw[i-1].stride[VPX_PLANE_V],
raw[i-1].d_w, raw[i-1].d_h,
raw[i].planes[VPX_PLANE_Y], raw[i].stride[VPX_PLANE_Y],
raw[i].planes[VPX_PLANE_U], raw[i].stride[VPX_PLANE_U],
raw[i].planes[VPX_PLANE_V], raw[i].stride[VPX_PLANE_V],
raw[i].d_w, raw[i].d_h, 1);
/* Write out down-sampled input. */
length_frame = cfg[i].g_w * cfg[i].g_h *3/2;
if (fwrite(raw[i].planes[0], 1, length_frame,
downsampled_input[NUM_ENCODERS - i - 1]) !=
length_frame)
{
return EXIT_FAILURE;
}
}
}
/* Set the flags (reference and update) for all the encoders.*/
for ( i=0; i<NUM_ENCODERS; i++)
{
layer_id = cfg[i].ts_layer_id[frame_cnt % cfg[i].ts_periodicity];
flags = 0;
flag_periodicity = periodicity_to_num_layers
[num_temporal_layers[i] - 1];
flags = layer_flags[i * VPX_TS_MAX_PERIODICITY +
frame_cnt % flag_periodicity];
// Key frame flag for first frame.
if (frame_cnt == 0)
{
flags |= VPX_EFLAG_FORCE_KF;
}
if (frame_cnt > 0 && frame_cnt == key_frame_insert)
{
flags = VPX_EFLAG_FORCE_KF;
}
vpx_codec_control(&codec[i], VP8E_SET_FRAME_FLAGS, flags);
vpx_codec_control(&codec[i], VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
}
gettimeofday(&tv1, NULL);
/* Encode each frame at multi-levels */
/* Note the flags must be set to 0 in the encode call if they are set
for each frame with the vpx_codec_control(), as done above. */
if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
frame_cnt, 1, 0, arg_deadline))
{
die_codec(&codec[0], "Failed to encode frame");
}
gettimeofday(&tv2, NULL);
timersub(&tv2, &tv1, &difftv);
cx_time += (double)(difftv.tv_sec * 1000000 + difftv.tv_usec);
for (i=NUM_ENCODERS-1; i>=0 ; i--)
{
got_data = 0;
while( (pkt[i] = vpx_codec_get_cx_data(&codec[i], &iter[i])) )
{
got_data = 1;
switch(pkt[i]->kind) {
case VPX_CODEC_CX_FRAME_PKT:
write_ivf_frame_header(outfile[i], pkt[i]);
(void) fwrite(pkt[i]->data.frame.buf, 1,
pkt[i]->data.frame.sz, outfile[i]);
break;
case VPX_CODEC_PSNR_PKT:
if (show_psnr)
{
int j;
psnr_sse_total[i] += pkt[i]->data.psnr.sse[0];
psnr_samples_total[i] += pkt[i]->data.psnr.samples[0];
for (j = 0; j < 4; j++)
{
}
psnr_count[i]++;
}
break;
default:
break;
}
printf(pkt[i]->kind == VPX_CODEC_CX_FRAME_PKT
&& (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":"");
fflush(stdout);
}
}
frame_cnt++;
}
printf("\n");
printf("FPS for encoding %d %f %f \n", frame_cnt, (float)cx_time / 1000000,
1000000 * (double)frame_cnt / (double)cx_time);
fclose(infile);
printf("Processed %ld frames.\n",(long int)frame_cnt-1);
for (i=0; i< NUM_ENCODERS; i++)
{
/* Calculate PSNR and print it out */
if ( (show_psnr) && (psnr_count[i]>0) )
{
int j;
double ovpsnr = sse_to_psnr(psnr_samples_total[i], 255.0,
psnr_sse_total[i]);
fprintf(stderr, "\n ENC%d PSNR (Overall/Avg/Y/U/V)", i);
fprintf(stderr, " %.3lf", ovpsnr);
for (j = 0; j < 4; j++)
{
fprintf(stderr, " %.3lf", psnr_totals[i][j]/psnr_count[i]);
}
}
if(vpx_codec_destroy(&codec[i]))
die_codec(&codec[i], "Failed to destroy codec");
vpx_img_free(&raw[i]);
if(!outfile[i])
continue;
/* Try to rewrite the file header with the actual frame count */
if(!fseek(outfile[i], 0, SEEK_SET))
write_ivf_file_header(outfile[i], &cfg[i], frame_cnt-1);
fclose(outfile[i]);
}
printf("\n");
return EXIT_SUCCESS;
}
|
@@ -13,22 +13,40 @@
* High-resolution input video is down-sampled to lower-resolutions. The
* encoder then encodes the video and outputs multiple bitstreams with
* different resolutions.
+ *
+ * This test also allows for settings temporal layers for each spatial layer.
+ * Different number of temporal layers per spatial stream may be used.
+ * Currently up to 3 temporal layers per spatial stream (encoder) are supported
+ * in this test.
*/
+
+#include "./vpx_config.h"
+
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <math.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
+#include <assert.h>
+#include <sys/time.h>
+#if USE_POSIX_MMAP
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+#include "vpx_ports/vpx_timer.h"
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "vpx_ports/mem_ops.h"
-#include "./tools_common.h"
+#include "../tools_common.h"
#define interface (vpx_codec_vp8_cx())
#define fourcc 0x30385056
-#define IVF_FILE_HDR_SZ (32)
-#define IVF_FRAME_HDR_SZ (12)
+void usage_exit(void) {
+ exit(EXIT_FAILURE);
+}
/*
* The input video frame is downsampled several times to generate a multi-level
@@ -38,32 +56,18 @@
* bitstreams with resolution of 1280x720(level 0), 640x360(level 1), and
* 320x180(level 2) respectively.
*/
+
+/* Number of encoders (spatial resolutions) used in this test. */
#define NUM_ENCODERS 3
+/* Maximum number of temporal layers allowed for this test. */
+#define MAX_NUM_TEMPORAL_LAYERS 3
+
/* This example uses the scaler function in libyuv. */
#include "third_party/libyuv/include/libyuv/basic_types.h"
#include "third_party/libyuv/include/libyuv/scale.h"
#include "third_party/libyuv/include/libyuv/cpu_id.h"
-static void die(const char *fmt, ...) {
- va_list ap;
-
- va_start(ap, fmt);
- vprintf(fmt, ap);
- if(fmt[strlen(fmt)-1] != '\n')
- printf("\n");
- exit(EXIT_FAILURE);
-}
-
-static void die_codec(vpx_codec_ctx_t *ctx, const char *s) {
- const char *detail = vpx_codec_error_detail(ctx);
-
- printf("%s: %s\n", s, vpx_codec_error(ctx));
- if(detail)
- printf(" %s\n",detail);
- exit(EXIT_FAILURE);
-}
-
int (*read_frame_p)(FILE *f, vpx_image_t *img);
static int read_frame(FILE *f, vpx_image_t *img) {
@@ -170,21 +174,172 @@
(void) fwrite(header, 1, 12, outfile);
}
+/* Temporal scaling parameters */
+/* This sets all the temporal layer parameters given |num_temporal_layers|,
+ * including the target bit allocation across temporal layers. Bit allocation
+ * parameters will be passed in as user parameters in another version.
+ */
+static void set_temporal_layer_pattern(int num_temporal_layers,
+ vpx_codec_enc_cfg_t *cfg,
+ int bitrate,
+ int *layer_flags)
+{
+ assert(num_temporal_layers <= MAX_NUM_TEMPORAL_LAYERS);
+ switch (num_temporal_layers)
+ {
+ case 1:
+ {
+ /* 1-layer */
+ cfg->ts_number_layers = 1;
+ cfg->ts_periodicity = 1;
+ cfg->ts_rate_decimator[0] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_target_bitrate[0] = bitrate;
+
+ // Update L only.
+ layer_flags[0] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+ break;
+ }
+
+ case 2:
+ {
+ /* 2-layers, with sync point at first frame of layer 1. */
+ cfg->ts_number_layers = 2;
+ cfg->ts_periodicity = 2;
+ cfg->ts_rate_decimator[0] = 2;
+ cfg->ts_rate_decimator[1] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_layer_id[1] = 1;
+ // Use 60/40 bit allocation as example.
+ cfg->ts_target_bitrate[0] = 0.6f * bitrate;
+ cfg->ts_target_bitrate[1] = bitrate;
+
+ /* 0=L, 1=GF */
+ // ARF is used as predictor for all frames, and is only updated on
+ // key frame. Sync point every 8 frames.
+
+ // Layer 0: predict from L and ARF, update L and G.
+ layer_flags[0] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 1: sync point: predict from L and ARF, and update G.
+ layer_flags[1] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 0, predict from L and ARF, update L.
+ layer_flags[2] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 1: predict from L, G and ARF, and update G.
+ layer_flags[3] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 0
+ layer_flags[4] = layer_flags[2];
+
+ // Layer 1
+ layer_flags[5] = layer_flags[3];
+
+ // Layer 0
+ layer_flags[6] = layer_flags[4];
+
+ // Layer 1
+ layer_flags[7] = layer_flags[5];
+ break;
+ }
+
+ case 3:
+ default:
+ {
+ // 3-layers structure where ARF is used as predictor for all frames,
+ // and is only updated on key frame.
+ // Sync points for layer 1 and 2 every 8 frames.
+ cfg->ts_number_layers = 3;
+ cfg->ts_periodicity = 4;
+ cfg->ts_rate_decimator[0] = 4;
+ cfg->ts_rate_decimator[1] = 2;
+ cfg->ts_rate_decimator[2] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_layer_id[1] = 2;
+ cfg->ts_layer_id[2] = 1;
+ cfg->ts_layer_id[3] = 2;
+ // Use 40/20/40 bit allocation as example.
+ cfg->ts_target_bitrate[0] = 0.4f * bitrate;
+ cfg->ts_target_bitrate[1] = 0.6f * bitrate;
+ cfg->ts_target_bitrate[2] = bitrate;
+
+ /* 0=L, 1=GF, 2=ARF */
+
+ // Layer 0: predict from L and ARF; update L and G.
+ layer_flags[0] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF;
+
+ // Layer 2: sync point: predict from L and ARF; update none.
+ layer_flags[1] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 1: sync point: predict from L and ARF; update G.
+ layer_flags[2] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[3] = VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 0: predict from L and ARF; update L.
+ layer_flags[4] = VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[5] = layer_flags[3];
+
+ // Layer 1: predict from L, G, ARF; update G.
+ layer_flags[6] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[7] = layer_flags[3];
+ break;
+ }
+ }
+}
+
+/* The periodicity of the pattern given the number of temporal layers. */
+static int periodicity_to_num_layers[MAX_NUM_TEMPORAL_LAYERS] = {1, 8, 8};
+
int main(int argc, char **argv)
{
- FILE *infile, *outfile[NUM_ENCODERS];
+ FILE *infile, *outfile[NUM_ENCODERS];
+ FILE *downsampled_input[NUM_ENCODERS - 1];
+ char filename[50];
vpx_codec_ctx_t codec[NUM_ENCODERS];
vpx_codec_enc_cfg_t cfg[NUM_ENCODERS];
- vpx_codec_pts_t frame_cnt = 0;
+ int frame_cnt = 0;
vpx_image_t raw[NUM_ENCODERS];
vpx_codec_err_t res[NUM_ENCODERS];
int i;
long width;
long height;
+ int length_frame;
int frame_avail;
int got_data;
int flags = 0;
+ int layer_id = 0;
+
+ int layer_flags[VPX_TS_MAX_PERIODICITY * NUM_ENCODERS]
+ = {0};
+ int flag_periodicity;
/*Currently, only realtime mode is supported in multi-resolution encoding.*/
int arg_deadline = VPX_DL_REALTIME;
@@ -193,39 +348,51 @@
don't need to know PSNR, which will skip PSNR calculation and save
encoding time. */
int show_psnr = 0;
+ int key_frame_insert = 0;
uint64_t psnr_sse_total[NUM_ENCODERS] = {0};
uint64_t psnr_samples_total[NUM_ENCODERS] = {0};
double psnr_totals[NUM_ENCODERS][4] = {{0,0}};
int psnr_count[NUM_ENCODERS] = {0};
+ double cx_time = 0;
+ struct timeval tv1, tv2, difftv;
+
/* Set the required target bitrates for each resolution level.
* If target bitrate for highest-resolution level is set to 0,
* (i.e. target_bitrate[0]=0), we skip encoding at that level.
*/
unsigned int target_bitrate[NUM_ENCODERS]={1000, 500, 100};
+
/* Enter the frame rate of the input video */
int framerate = 30;
+
/* Set down-sampling factor for each resolution level.
dsf[0] controls down sampling from level 0 to level 1;
dsf[1] controls down sampling from level 1 to level 2;
dsf[2] is not used. */
vpx_rational_t dsf[NUM_ENCODERS] = {{2, 1}, {2, 1}, {1, 1}};
- if(argc!= (5+NUM_ENCODERS))
- die("Usage: %s <width> <height> <infile> <outfile(s)> <output psnr?>\n",
+ /* Set the number of temporal layers for each encoder/resolution level,
+ * starting from highest resoln down to lowest resoln. */
+ unsigned int num_temporal_layers[NUM_ENCODERS] = {3, 3, 3};
+
+ if(argc!= (7 + 3 * NUM_ENCODERS))
+ die("Usage: %s <width> <height> <frame_rate> <infile> <outfile(s)> "
+ "<rate_encoder(s)> <temporal_layer(s)> <key_frame_insert> <output psnr?> \n",
argv[0]);
printf("Using %s\n",vpx_codec_iface_name(interface));
width = strtol(argv[1], NULL, 0);
height = strtol(argv[2], NULL, 0);
+ framerate = strtol(argv[3], NULL, 0);
if(width < 16 || width%2 || height <16 || height%2)
die("Invalid resolution: %ldx%ld", width, height);
/* Open input video file for encoding */
- if(!(infile = fopen(argv[3], "rb")))
- die("Failed to open %s for reading", argv[3]);
+ if(!(infile = fopen(argv[4], "rb")))
+ die("Failed to open %s for reading", argv[4]);
/* Open output file for each encoder to output bitstreams */
for (i=0; i< NUM_ENCODERS; i++)
@@ -236,11 +403,40 @@
continue;
}
- if(!(outfile[i] = fopen(argv[i+4], "wb")))
+ if(!(outfile[i] = fopen(argv[i+5], "wb")))
die("Failed to open %s for writing", argv[i+4]);
}
- show_psnr = strtol(argv[NUM_ENCODERS + 4], NULL, 0);
+ // Bitrates per spatial layer: overwrite default rates above.
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ target_bitrate[i] = strtol(argv[NUM_ENCODERS + 5 + i], NULL, 0);
+ }
+
+ // Temporal layers per spatial layers: overwrite default settings above.
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ num_temporal_layers[i] = strtol(argv[2 * NUM_ENCODERS + 5 + i], NULL, 0);
+ if (num_temporal_layers[i] < 1 || num_temporal_layers[i] > 3)
+ die("Invalid temporal layers: %d, Must be 1, 2, or 3. \n",
+ num_temporal_layers);
+ }
+
+ /* Open file to write out each spatially downsampled input stream. */
+ for (i=0; i< NUM_ENCODERS - 1; i++)
+ {
+ // Highest resoln is encoder 0.
+ if (sprintf(filename,"ds%d.yuv",NUM_ENCODERS - i) < 0)
+ {
+ return EXIT_FAILURE;
+ }
+ downsampled_input[i] = fopen(filename,"wb");
+ }
+
+ key_frame_insert = strtol(argv[3 * NUM_ENCODERS + 5], NULL, 0);
+
+ show_psnr = strtol(argv[3 * NUM_ENCODERS + 6], NULL, 0);
+
/* Populate default encoder configuration */
for (i=0; i< NUM_ENCODERS; i++)
@@ -258,14 +454,13 @@
/* Highest-resolution encoder settings */
cfg[0].g_w = width;
cfg[0].g_h = height;
- cfg[0].g_threads = 1; /* number of threads used */
- cfg[0].rc_dropframe_thresh = 30;
+ cfg[0].rc_dropframe_thresh = 0;
cfg[0].rc_end_usage = VPX_CBR;
cfg[0].rc_resize_allowed = 0;
- cfg[0].rc_min_quantizer = 4;
+ cfg[0].rc_min_quantizer = 2;
cfg[0].rc_max_quantizer = 56;
- cfg[0].rc_undershoot_pct = 98;
- cfg[0].rc_overshoot_pct = 100;
+ cfg[0].rc_undershoot_pct = 100;
+ cfg[0].rc_overshoot_pct = 15;
cfg[0].rc_buf_initial_sz = 500;
cfg[0].rc_buf_optimal_sz = 600;
cfg[0].rc_buf_sz = 1000;
@@ -276,7 +471,6 @@
/* Note: These 3 settings are copied to all levels. But, except the lowest
* resolution level, all other levels are set to VPX_KF_DISABLED internally.
*/
- //cfg[0].kf_mode = VPX_KF_DISABLED;
cfg[0].kf_mode = VPX_KF_AUTO;
cfg[0].kf_min_dist = 3000;
cfg[0].kf_max_dist = 3000;
@@ -290,7 +484,6 @@
{
memcpy(&cfg[i], &cfg[0], sizeof(vpx_codec_enc_cfg_t));
- cfg[i].g_threads = 1; /* number of threads used */
cfg[i].rc_target_bitrate = target_bitrate[i];
/* Note: Width & height of other-resolution encoders are calculated
@@ -310,6 +503,13 @@
if((cfg[i].g_h)%2)cfg[i].g_h++;
}
+
+ // Set the number of threads per encode/spatial layer.
+ // (1, 1, 1) means no encoder threading.
+ cfg[0].g_threads = 2;
+ cfg[1].g_threads = 1;
+ cfg[2].g_threads = 1;
+
/* Allocate image for each encoder */
for (i=0; i< NUM_ENCODERS; i++)
if(!vpx_img_alloc(&raw[i], VPX_IMG_FMT_I420, cfg[i].g_w, cfg[i].g_h, 32))
@@ -324,6 +524,15 @@
if(outfile[i])
write_ivf_file_header(outfile[i], &cfg[i], 0);
+ /* Temporal layers settings */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ set_temporal_layer_pattern(num_temporal_layers[i],
+ &cfg[i],
+ cfg[i].rc_target_bitrate,
+ &layer_flags[i * VPX_TS_MAX_PERIODICITY]);
+ }
+
/* Initialize multi-encoder */
if(vpx_codec_enc_init_multi(&codec[0], interface, &cfg[0], NUM_ENCODERS,
(show_psnr ? VPX_CODEC_USE_PSNR : 0), &dsf[0]))
@@ -334,15 +543,16 @@
for ( i=0; i<NUM_ENCODERS; i++)
{
int speed = -6;
+ /* Lower speed for the lowest resolution. */
+ if (i == NUM_ENCODERS - 1) speed = -4;
if(vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, speed))
die_codec(&codec[i], "Failed to set cpu_used");
}
- /* Set static threshold. */
+ /* Set static threshold = 1 for all encoders */
for ( i=0; i<NUM_ENCODERS; i++)
{
- unsigned int static_thresh = 1;
- if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, static_thresh))
+ if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, 1))
die_codec(&codec[i], "Failed to set static threshold");
}
@@ -356,6 +566,23 @@
die_codec(&codec[i], "Failed to set noise_sensitivity");
}
+ /* Set the number of token partitions */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ if(vpx_codec_control(&codec[i], VP8E_SET_TOKEN_PARTITIONS, 1))
+ die_codec(&codec[i], "Failed to set static threshold");
+ }
+
+ /* Set the max intra target bitrate */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ unsigned int max_intra_size_pct =
+ (int)(((double)cfg[0].rc_buf_optimal_sz * 0.5) * framerate / 10);
+ if(vpx_codec_control(&codec[i], VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ max_intra_size_pct))
+ die_codec(&codec[i], "Failed to set static threshold");
+ //printf("%d %d \n",i,max_intra_size_pct);
+ }
frame_avail = 1;
got_data = 0;
@@ -382,18 +609,55 @@
raw[i].planes[VPX_PLANE_U], raw[i].stride[VPX_PLANE_U],
raw[i].planes[VPX_PLANE_V], raw[i].stride[VPX_PLANE_V],
raw[i].d_w, raw[i].d_h, 1);
+ /* Write out down-sampled input. */
+ length_frame = cfg[i].g_w * cfg[i].g_h *3/2;
+ if (fwrite(raw[i].planes[0], 1, length_frame,
+ downsampled_input[NUM_ENCODERS - i - 1]) !=
+ length_frame)
+ {
+ return EXIT_FAILURE;
+ }
}
}
- /* Encode each frame at multi-levels */
- if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
- frame_cnt, 1, flags, arg_deadline))
- die_codec(&codec[0], "Failed to encode frame");
+ /* Set the flags (reference and update) for all the encoders.*/
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ layer_id = cfg[i].ts_layer_id[frame_cnt % cfg[i].ts_periodicity];
+ flags = 0;
+ flag_periodicity = periodicity_to_num_layers
+ [num_temporal_layers[i] - 1];
+ flags = layer_flags[i * VPX_TS_MAX_PERIODICITY +
+ frame_cnt % flag_periodicity];
+ // Key frame flag for first frame.
+ if (frame_cnt == 0)
+ {
+ flags |= VPX_EFLAG_FORCE_KF;
+ }
+ if (frame_cnt > 0 && frame_cnt == key_frame_insert)
+ {
+ flags = VPX_EFLAG_FORCE_KF;
+ }
+ vpx_codec_control(&codec[i], VP8E_SET_FRAME_FLAGS, flags);
+ vpx_codec_control(&codec[i], VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
+ }
+
+ gettimeofday(&tv1, NULL);
+ /* Encode each frame at multi-levels */
+ /* Note the flags must be set to 0 in the encode call if they are set
+ for each frame with the vpx_codec_control(), as done above. */
+ if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
+ frame_cnt, 1, 0, arg_deadline))
+ {
+ die_codec(&codec[0], "Failed to encode frame");
+ }
+ gettimeofday(&tv2, NULL);
+ timersub(&tv2, &tv1, &difftv);
+ cx_time += (double)(difftv.tv_sec * 1000000 + difftv.tv_usec);
for (i=NUM_ENCODERS-1; i>=0 ; i--)
{
got_data = 0;
-
while( (pkt[i] = vpx_codec_get_cx_data(&codec[i], &iter[i])) )
{
got_data = 1;
@@ -412,7 +676,6 @@
psnr_samples_total[i] += pkt[i]->data.psnr.samples[0];
for (j = 0; j < 4; j++)
{
- //fprintf(stderr, "%.3lf ", pkt[i]->data.psnr.psnr[j]);
psnr_totals[i][j] += pkt[i]->data.psnr.psnr[j];
}
psnr_count[i]++;
@@ -423,13 +686,15 @@
break;
}
printf(pkt[i]->kind == VPX_CODEC_CX_FRAME_PKT
- && (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":".");
+ && (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":"");
fflush(stdout);
}
}
frame_cnt++;
}
printf("\n");
+ printf("FPS for encoding %d %f %f \n", frame_cnt, (float)cx_time / 1000000,
+ 1000000 * (double)frame_cnt / (double)cx_time);
fclose(infile);
|
CWE-119
|
FILE *infile, *outfile[NUM_ENCODERS];
vpx_codec_pts_t frame_cnt = 0;
if(argc!= (5+NUM_ENCODERS))
die("Usage: %s <width> <height> <infile> <outfile(s)> <output psnr?>\n",
if(!(infile = fopen(argv[3], "rb")))
die("Failed to open %s for reading", argv[3]);
if(!(outfile[i] = fopen(argv[i+4], "wb")))
show_psnr = strtol(argv[NUM_ENCODERS + 4], NULL, 0);
cfg[0].g_threads = 1; /* number of threads used */
cfg[0].rc_dropframe_thresh = 30;
cfg[0].rc_min_quantizer = 4;
cfg[0].rc_undershoot_pct = 98;
cfg[0].rc_overshoot_pct = 100;
cfg[i].g_threads = 1; /* number of threads used */
/* Set static threshold. */
unsigned int static_thresh = 1;
if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, static_thresh))
/* Encode each frame at multi-levels */
if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
frame_cnt, 1, flags, arg_deadline))
die_codec(&codec[0], "Failed to encode frame");
&& (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":".");
|
FILE *infile, *outfile[NUM_ENCODERS];
FILE *downsampled_input[NUM_ENCODERS - 1];
char filename[50];
int frame_cnt = 0;
int length_frame;
int layer_id = 0;
int layer_flags[VPX_TS_MAX_PERIODICITY * NUM_ENCODERS]
= {0};
int flag_periodicity;
int key_frame_insert = 0;
double cx_time = 0;
struct timeval tv1, tv2, difftv;
/* Set the number of temporal layers for each encoder/resolution level,
* starting from highest resoln down to lowest resoln. */
unsigned int num_temporal_layers[NUM_ENCODERS] = {3, 3, 3};
if(argc!= (7 + 3 * NUM_ENCODERS))
die("Usage: %s <width> <height> <frame_rate> <infile> <outfile(s)> "
"<rate_encoder(s)> <temporal_layer(s)> <key_frame_insert> <output psnr?> \n",
framerate = strtol(argv[3], NULL, 0);
if(!(infile = fopen(argv[4], "rb")))
die("Failed to open %s for reading", argv[4]);
if(!(outfile[i] = fopen(argv[i+5], "wb")))
// Bitrates per spatial layer: overwrite default rates above.
for (i=0; i< NUM_ENCODERS; i++)
{
target_bitrate[i] = strtol(argv[NUM_ENCODERS + 5 + i], NULL, 0);
}
// Temporal layers per spatial layers: overwrite default settings above.
for (i=0; i< NUM_ENCODERS; i++)
{
num_temporal_layers[i] = strtol(argv[2 * NUM_ENCODERS + 5 + i], NULL, 0);
if (num_temporal_layers[i] < 1 || num_temporal_layers[i] > 3)
die("Invalid temporal layers: %d, Must be 1, 2, or 3. \n",
num_temporal_layers);
}
/* Open file to write out each spatially downsampled input stream. */
for (i=0; i< NUM_ENCODERS - 1; i++)
{
// Highest resoln is encoder 0.
if (sprintf(filename,"ds%d.yuv",NUM_ENCODERS - i) < 0)
{
return EXIT_FAILURE;
}
downsampled_input[i] = fopen(filename,"wb");
}
key_frame_insert = strtol(argv[3 * NUM_ENCODERS + 5], NULL, 0);
show_psnr = strtol(argv[3 * NUM_ENCODERS + 6], NULL, 0);
cfg[0].rc_dropframe_thresh = 0;
cfg[0].rc_min_quantizer = 2;
cfg[0].rc_undershoot_pct = 100;
cfg[0].rc_overshoot_pct = 15;
// Set the number of threads per encode/spatial layer.
// (1, 1, 1) means no encoder threading.
cfg[0].g_threads = 2;
cfg[1].g_threads = 1;
cfg[2].g_threads = 1;
/* Temporal layers settings */
for ( i=0; i<NUM_ENCODERS; i++)
{
set_temporal_layer_pattern(num_temporal_layers[i],
&cfg[i],
cfg[i].rc_target_bitrate,
&layer_flags[i * VPX_TS_MAX_PERIODICITY]);
}
/* Lower speed for the lowest resolution. */
if (i == NUM_ENCODERS - 1) speed = -4;
/* Set static threshold = 1 for all encoders */
if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, 1))
/* Set the number of token partitions */
for ( i=0; i<NUM_ENCODERS; i++)
{
if(vpx_codec_control(&codec[i], VP8E_SET_TOKEN_PARTITIONS, 1))
die_codec(&codec[i], "Failed to set static threshold");
}
/* Set the max intra target bitrate */
for ( i=0; i<NUM_ENCODERS; i++)
{
unsigned int max_intra_size_pct =
(int)(((double)cfg[0].rc_buf_optimal_sz * 0.5) * framerate / 10);
if(vpx_codec_control(&codec[i], VP8E_SET_MAX_INTRA_BITRATE_PCT,
max_intra_size_pct))
die_codec(&codec[i], "Failed to set static threshold");
//printf("%d %d \n",i,max_intra_size_pct);
}
/* Write out down-sampled input. */
length_frame = cfg[i].g_w * cfg[i].g_h *3/2;
if (fwrite(raw[i].planes[0], 1, length_frame,
downsampled_input[NUM_ENCODERS - i - 1]) !=
length_frame)
{
return EXIT_FAILURE;
}
/* Set the flags (reference and update) for all the encoders.*/
for ( i=0; i<NUM_ENCODERS; i++)
{
layer_id = cfg[i].ts_layer_id[frame_cnt % cfg[i].ts_periodicity];
flags = 0;
flag_periodicity = periodicity_to_num_layers
[num_temporal_layers[i] - 1];
flags = layer_flags[i * VPX_TS_MAX_PERIODICITY +
frame_cnt % flag_periodicity];
// Key frame flag for first frame.
if (frame_cnt == 0)
{
flags |= VPX_EFLAG_FORCE_KF;
}
if (frame_cnt > 0 && frame_cnt == key_frame_insert)
{
flags = VPX_EFLAG_FORCE_KF;
}
vpx_codec_control(&codec[i], VP8E_SET_FRAME_FLAGS, flags);
vpx_codec_control(&codec[i], VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
}
gettimeofday(&tv1, NULL);
/* Encode each frame at multi-levels */
/* Note the flags must be set to 0 in the encode call if they are set
for each frame with the vpx_codec_control(), as done above. */
if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
frame_cnt, 1, 0, arg_deadline))
{
die_codec(&codec[0], "Failed to encode frame");
}
gettimeofday(&tv2, NULL);
timersub(&tv2, &tv1, &difftv);
cx_time += (double)(difftv.tv_sec * 1000000 + difftv.tv_usec);
&& (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":"");
printf("FPS for encoding %d %f %f \n", frame_cnt, (float)cx_time / 1000000,
1000000 * (double)frame_cnt / (double)cx_time);
|
150,819 |
int main(int argc, char **argv) {
FILE *infile = NULL;
vpx_codec_ctx_t codec = {0};
vpx_codec_enc_cfg_t cfg = {0};
int frame_count = 0;
vpx_image_t raw;
vpx_codec_err_t res;
VpxVideoInfo info = {0};
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
int update_frame_num = 0;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
exec_name = argv[0];
if (argc != 6)
die("Invalid number of arguments");
encoder = get_vpx_encoder_by_name("vp8");
if (!encoder)
die("Unsupported codec.");
update_frame_num = atoi(argv[5]);
if (!update_frame_num)
die("Couldn't parse frame number '%s'\n", argv[5]);
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(argv[1], NULL, 0);
info.frame_height = strtol(argv[2], NULL, 0);
info.time_base.numerator = 1;
info.time_base.denominator = fps;
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image.");
}
printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
cfg.rc_target_bitrate = bitrate;
writer = vpx_video_writer_open(argv[4], kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing.", argv[4]);
if (!(infile = fopen(argv[3], "rb")))
die("Failed to open %s for reading.", argv[3]);
if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
while (vpx_img_read(&raw, infile)) {
if (frame_count + 1 == update_frame_num) {
vpx_ref_frame_t ref;
ref.frame_type = VP8_LAST_FRAME;
ref.img = raw;
if (vpx_codec_control(&codec, VP8_SET_REFERENCE, &ref))
die_codec(&codec, "Failed to set reference frame");
}
encode_frame(&codec, &raw, frame_count++, writer);
}
encode_frame(&codec, NULL, -1, writer);
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
vpx_img_free(&raw);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_writer_close(writer);
return EXIT_SUCCESS;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv) {
FILE *infile = NULL;
vpx_codec_ctx_t codec = {0};
vpx_codec_enc_cfg_t cfg = {0};
int frame_count = 0;
vpx_image_t raw;
vpx_codec_err_t res;
VpxVideoInfo info = {0};
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
int update_frame_num = 0;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
exec_name = argv[0];
if (argc != 6)
die("Invalid number of arguments");
encoder = get_vpx_encoder_by_name("vp8");
if (!encoder)
die("Unsupported codec.");
update_frame_num = atoi(argv[5]);
if (!update_frame_num)
die("Couldn't parse frame number '%s'\n", argv[5]);
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(argv[1], NULL, 0);
info.frame_height = strtol(argv[2], NULL, 0);
info.time_base.numerator = 1;
info.time_base.denominator = fps;
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image.");
}
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
cfg.rc_target_bitrate = bitrate;
writer = vpx_video_writer_open(argv[4], kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing.", argv[4]);
if (!(infile = fopen(argv[3], "rb")))
die("Failed to open %s for reading.", argv[3]);
if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
// Encode frames.
while (vpx_img_read(&raw, infile)) {
if (frame_count + 1 == update_frame_num) {
vpx_ref_frame_t ref;
ref.frame_type = VP8_LAST_FRAME;
ref.img = raw;
if (vpx_codec_control(&codec, VP8_SET_REFERENCE, &ref))
die_codec(&codec, "Failed to set reference frame");
}
encode_frame(&codec, &raw, frame_count++, writer);
}
// Flush encoder.
while (encode_frame(&codec, NULL, -1, writer)) {}
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
vpx_img_free(&raw);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_writer_close(writer);
return EXIT_SUCCESS;
}
|
@@ -50,25 +50,25 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <width> <height> <infile> <outfile> <frame>\n",
exec_name);
exit(EXIT_FAILURE);
}
-static void encode_frame(vpx_codec_ctx_t *codec,
- vpx_image_t *img,
- int frame_index,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *codec,
+ vpx_image_t *img,
+ int frame_index,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, 0,
@@ -77,6 +77,8 @@
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
@@ -90,6 +92,8 @@
fflush(stdout);
}
}
+
+ return got_pkts;
}
int main(int argc, char **argv) {
@@ -138,9 +142,9 @@
die("Failed to allocate image.");
}
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
@@ -157,9 +161,10 @@
if (!(infile = fopen(argv[3], "rb")))
die("Failed to open %s for reading.", argv[3]);
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
+ // Encode frames.
while (vpx_img_read(&raw, infile)) {
if (frame_count + 1 == update_frame_num) {
vpx_ref_frame_t ref;
@@ -171,7 +176,9 @@
encode_frame(&codec, &raw, frame_count++, writer);
}
- encode_frame(&codec, NULL, -1, writer);
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, writer)) {}
printf("\n");
fclose(infile);
|
CWE-119
|
printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
encode_frame(&codec, NULL, -1, writer);
|
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
// Encode frames.
// Flush encoder.
while (encode_frame(&codec, NULL, -1, writer)) {}
|
150,820 |
void usage_exit() {
fprintf(stderr, "Usage: %s <width> <height> <infile> <outfile> <frame>\n",
exec_name);
exit(EXIT_FAILURE);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void usage_exit() {
void usage_exit(void) {
fprintf(stderr, "Usage: %s <width> <height> <infile> <outfile> <frame>\n",
exec_name);
exit(EXIT_FAILURE);
}
|
@@ -50,25 +50,25 @@
#include <stdlib.h>
#include <string.h>
-#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
-#include "./tools_common.h"
-#include "./video_writer.h"
+#include "../tools_common.h"
+#include "../video_writer.h"
static const char *exec_name;
-void usage_exit() {
+void usage_exit(void) {
fprintf(stderr, "Usage: %s <width> <height> <infile> <outfile> <frame>\n",
exec_name);
exit(EXIT_FAILURE);
}
-static void encode_frame(vpx_codec_ctx_t *codec,
- vpx_image_t *img,
- int frame_index,
- VpxVideoWriter *writer) {
+static int encode_frame(vpx_codec_ctx_t *codec,
+ vpx_image_t *img,
+ int frame_index,
+ VpxVideoWriter *writer) {
+ int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, 0,
@@ -77,6 +77,8 @@
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
+ got_pkts = 1;
+
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
@@ -90,6 +92,8 @@
fflush(stdout);
}
}
+
+ return got_pkts;
}
int main(int argc, char **argv) {
@@ -138,9 +142,9 @@
die("Failed to allocate image.");
}
- printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
+ printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
- res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
+ res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
@@ -157,9 +161,10 @@
if (!(infile = fopen(argv[3], "rb")))
die("Failed to open %s for reading.", argv[3]);
- if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
+ if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
+ // Encode frames.
while (vpx_img_read(&raw, infile)) {
if (frame_count + 1 == update_frame_num) {
vpx_ref_frame_t ref;
@@ -171,7 +176,9 @@
encode_frame(&codec, &raw, frame_count++, writer);
}
- encode_frame(&codec, NULL, -1, writer);
+
+ // Flush encoder.
+ while (encode_frame(&codec, NULL, -1, writer)) {}
printf("\n");
fclose(infile);
|
CWE-119
| null |
void usage_exit(void) {
|
150,821 |
void update_rate_histogram(struct rate_hist *hist,
const vpx_codec_enc_cfg_t *cfg,
const vpx_codec_cx_pkt_t *pkt) {
int i;
int64_t then = 0;
int64_t avg_bitrate = 0;
int64_t sum_sz = 0;
const int64_t now = pkt->data.frame.pts * 1000 *
(uint64_t)cfg->g_timebase.num /
(uint64_t)cfg->g_timebase.den;
int idx = hist->frames++ % hist->samples;
hist->pts[idx] = now;
hist->sz[idx] = (int)pkt->data.frame.sz;
if (now < cfg->rc_buf_initial_sz)
return;
then = now;
/* Sum the size over the past rc_buf_sz ms */
for (i = hist->frames; i > 0 && hist->frames - i < hist->samples; i--) {
const int i_idx = (i - 1) % hist->samples;
then = hist->pts[i_idx];
if (now - then > cfg->rc_buf_sz)
break;
sum_sz += hist->sz[i_idx];
}
if (now == then)
return;
avg_bitrate = sum_sz * 8 * 1000 / (now - then);
idx = (int)(avg_bitrate * (RATE_BINS / 2) / (cfg->rc_target_bitrate * 1000));
if (idx < 0)
idx = 0;
if (idx > RATE_BINS - 1)
idx = RATE_BINS - 1;
if (hist->bucket[idx].low > avg_bitrate)
hist->bucket[idx].low = (int)avg_bitrate;
if (hist->bucket[idx].high < avg_bitrate)
hist->bucket[idx].high = (int)avg_bitrate;
hist->bucket[idx].count++;
hist->total++;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void update_rate_histogram(struct rate_hist *hist,
const vpx_codec_enc_cfg_t *cfg,
const vpx_codec_cx_pkt_t *pkt) {
int i;
int64_t then = 0;
int64_t avg_bitrate = 0;
int64_t sum_sz = 0;
const int64_t now = pkt->data.frame.pts * 1000 *
(uint64_t)cfg->g_timebase.num /
(uint64_t)cfg->g_timebase.den;
int idx = hist->frames++ % hist->samples;
hist->pts[idx] = now;
hist->sz[idx] = (int)pkt->data.frame.sz;
if (now < cfg->rc_buf_initial_sz)
return;
if (!cfg->rc_target_bitrate)
return;
then = now;
/* Sum the size over the past rc_buf_sz ms */
for (i = hist->frames; i > 0 && hist->frames - i < hist->samples; i--) {
const int i_idx = (i - 1) % hist->samples;
then = hist->pts[i_idx];
if (now - then > cfg->rc_buf_sz)
break;
sum_sz += hist->sz[i_idx];
}
if (now == then)
return;
avg_bitrate = sum_sz * 8 * 1000 / (now - then);
idx = (int)(avg_bitrate * (RATE_BINS / 2) / (cfg->rc_target_bitrate * 1000));
if (idx < 0)
idx = 0;
if (idx > RATE_BINS - 1)
idx = RATE_BINS - 1;
if (hist->bucket[idx].low > avg_bitrate)
hist->bucket[idx].low = (int)avg_bitrate;
if (hist->bucket[idx].high < avg_bitrate)
hist->bucket[idx].high = (int)avg_bitrate;
hist->bucket[idx].count++;
hist->total++;
}
|
@@ -88,6 +88,9 @@
if (now < cfg->rc_buf_initial_sz)
return;
+ if (!cfg->rc_target_bitrate)
+ return;
+
then = now;
/* Sum the size over the past rc_buf_sz ms */
|
CWE-119
| null |
if (!cfg->rc_target_bitrate)
return;
|
150,822 |
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
} else if (video->frame() == 3) {
vpx_active_map_t map = {0};
uint8_t active_map[9 * 13] = {
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1,
0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0,
};
map.cols = (kWidth + 15) / 16;
map.rows = (kHeight + 15) / 16;
ASSERT_EQ(map.cols, 13u);
ASSERT_EQ(map.rows, 9u);
map.active_map = active_map;
encoder->Control(VP8E_SET_ACTIVEMAP, &map);
} else if (video->frame() == 15) {
vpx_active_map_t map = {0};
map.cols = (kWidth + 15) / 16;
map.rows = (kHeight + 15) / 16;
map.active_map = NULL;
encoder->Control(VP8E_SET_ACTIVEMAP, &map);
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
} else if (video->frame() == 3) {
vpx_active_map_t map = vpx_active_map_t();
uint8_t active_map[9 * 13] = {
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1,
0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0,
};
map.cols = (kWidth + 15) / 16;
map.rows = (kHeight + 15) / 16;
ASSERT_EQ(map.cols, 13u);
ASSERT_EQ(map.rows, 9u);
map.active_map = active_map;
encoder->Control(VP8E_SET_ACTIVEMAP, &map);
} else if (video->frame() == 15) {
vpx_active_map_t map = vpx_active_map_t();
map.cols = (kWidth + 15) / 16;
map.rows = (kHeight + 15) / 16;
map.active_map = NULL;
encoder->Control(VP8E_SET_ACTIVEMAP, &map);
}
}
|
@@ -38,7 +38,7 @@
if (video->frame() == 1) {
encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
} else if (video->frame() == 3) {
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = vpx_active_map_t();
uint8_t active_map[9 * 13] = {
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
@@ -57,7 +57,7 @@
map.active_map = active_map;
encoder->Control(VP8E_SET_ACTIVEMAP, &map);
} else if (video->frame() == 15) {
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = vpx_active_map_t();
map.cols = (kWidth + 15) / 16;
map.rows = (kHeight + 15) / 16;
map.active_map = NULL;
@@ -83,9 +83,6 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-#define VP9_FACTORY \
- static_cast<const libvpx_test::CodecFactory *>(&libvpx_test::kVP9)
-
VP9_INSTANTIATE_TEST_CASE(ActiveMapTest,
::testing::Values(::libvpx_test::kRealTime),
::testing::Range(0, 6));
|
CWE-119
|
vpx_active_map_t map = {0};
vpx_active_map_t map = {0};
|
vpx_active_map_t map = vpx_active_map_t();
vpx_active_map_t map = vpx_active_map_t();
|
150,823 |
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
|
@@ -7,8 +7,6 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <climits>
-#include <vector>
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
@@ -17,11 +15,12 @@
namespace {
-class AqSegmentTest : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWith2Params<
- libvpx_test::TestMode, int> {
+class AqSegmentTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
protected:
AqSegmentTest() : EncoderTest(GET_PARAM(0)) {}
+ virtual ~AqSegmentTest() {}
virtual void SetUp() {
InitializeConfig();
@@ -39,10 +38,6 @@
}
}
- virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- }
- }
int set_cpu_used_;
int aq_mode_;
};
@@ -107,13 +102,8 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-using std::tr1::make_tuple;
-
-#define VP9_FACTORY \
- static_cast<const libvpx_test::CodecFactory*> (&libvpx_test::kVP9)
-
VP9_INSTANTIATE_TEST_CASE(AqSegmentTest,
::testing::Values(::libvpx_test::kRealTime,
::libvpx_test::kOnePassGood),
- ::testing::Range(3, 9));
+ ::testing::Range(3, 9));
} // namespace
|
CWE-119
|
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
}
}
| null |
150,824 |
ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
convolve_fn_t v8, convolve_fn_t v8_avg,
convolve_fn_t hv8, convolve_fn_t hv8_avg)
: h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
hv8_avg_(hv8_avg) {}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
|
@@ -9,40 +9,65 @@
*/
#include <string.h>
-#include "test/acm_random.h"
-#include "test/register_state_check.h"
-#include "test/util.h"
+
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_filter.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
namespace {
-typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h);
+
+static const unsigned int kMaxDimension = 64;
+
+typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h);
struct ConvolveFunctions {
- ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
- convolve_fn_t v8, convolve_fn_t v8_avg,
- convolve_fn_t hv8, convolve_fn_t hv8_avg)
- : h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
- hv8_avg_(hv8_avg) {}
+ ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg,
+ ConvolveFunc h8, ConvolveFunc h8_avg,
+ ConvolveFunc v8, ConvolveFunc v8_avg,
+ ConvolveFunc hv8, ConvolveFunc hv8_avg,
+ ConvolveFunc sh8, ConvolveFunc sh8_avg,
+ ConvolveFunc sv8, ConvolveFunc sv8_avg,
+ ConvolveFunc shv8, ConvolveFunc shv8_avg,
+ int bd)
+ : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
+ v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8),
+ sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg),
+ use_highbd_(bd) {}
- convolve_fn_t h8_;
- convolve_fn_t v8_;
- convolve_fn_t hv8_;
- convolve_fn_t h8_avg_;
- convolve_fn_t v8_avg_;
- convolve_fn_t hv8_avg_;
+ ConvolveFunc copy_;
+ ConvolveFunc avg_;
+ ConvolveFunc h8_;
+ ConvolveFunc v8_;
+ ConvolveFunc hv8_;
+ ConvolveFunc h8_avg_;
+ ConvolveFunc v8_avg_;
+ ConvolveFunc hv8_avg_;
+ ConvolveFunc sh8_; // scaled horiz
+ ConvolveFunc sv8_; // scaled vert
+ ConvolveFunc shv8_; // scaled horiz/vert
+ ConvolveFunc sh8_avg_; // scaled avg horiz
+ ConvolveFunc sv8_avg_; // scaled avg vert
+ ConvolveFunc shv8_avg_; // scaled avg horiz/vert
+ int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth.
};
-typedef std::tr1::tuple<int, int, const ConvolveFunctions*> convolve_param_t;
+typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
// Reference 8-tap subpixel filter, slightly modified to fit into this test.
#define VP9_FILTER_WEIGHT 128
@@ -68,71 +93,66 @@
const int kInterp_Extend = 4;
const unsigned int intermediate_height =
(kInterp_Extend - 1) + output_height + kInterp_Extend;
+ unsigned int i, j;
- /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
- * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
- * + kInterp_Extend
- * = 3 + 16 + 4
- * = 23
- * and filter_max_width = 16
- */
- uint8_t intermediate_buffer[71 * 64];
+ // Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ // where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ // + kInterp_Extend
+ // = 3 + 16 + 4
+ // = 23
+ // and filter_max_width = 16
+ //
+ uint8_t intermediate_buffer[71 * kMaxDimension];
const int intermediate_next_stride = 1 - intermediate_height * output_width;
// Horizontal pass (src -> transposed intermediate).
- {
- uint8_t *output_ptr = intermediate_buffer;
- const int src_next_row_stride = src_stride - output_width;
- unsigned int i, j;
- src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
- for (i = 0; i < intermediate_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * HFilter[0]) +
- (src_ptr[1] * HFilter[1]) +
- (src_ptr[2] * HFilter[2]) +
- (src_ptr[3] * HFilter[3]) +
- (src_ptr[4] * HFilter[4]) +
- (src_ptr[5] * HFilter[5]) +
- (src_ptr[6] * HFilter[6]) +
- (src_ptr[7] * HFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ uint8_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
- ++src_ptr;
- output_ptr += intermediate_height;
- }
- src_ptr += src_next_row_stride;
- output_ptr += intermediate_next_stride;
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ ++src_ptr;
+ output_ptr += intermediate_height;
}
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
}
// Vertical pass (transposed intermediate -> dst).
- {
- uint8_t *src_ptr = intermediate_buffer;
- const int dst_next_row_stride = dst_stride - output_width;
- unsigned int i, j;
- for (i = 0; i < output_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * VFilter[0]) +
- (src_ptr[1] * VFilter[1]) +
- (src_ptr[2] * VFilter[2]) +
- (src_ptr[3] * VFilter[3]) +
- (src_ptr[4] * VFilter[4]) +
- (src_ptr[5] * VFilter[5]) +
- (src_ptr[6] * VFilter[6]) +
- (src_ptr[7] * VFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
- src_ptr += intermediate_height;
- }
- src_ptr += intermediate_next_stride;
- dst_ptr += dst_next_row_stride;
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ src_ptr += intermediate_height;
}
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
}
}
@@ -159,17 +179,138 @@
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
- uint8_t tmp[64 * 64];
+ uint8_t tmp[kMaxDimension * kMaxDimension];
- assert(output_width <= 64);
- assert(output_height <= 64);
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
output_width, output_height);
block2d_average_c(tmp, 64, dst_ptr, dst_stride,
output_width, output_height);
}
-class ConvolveTest : public ::testing::TestWithParam<convolve_param_t> {
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ // Between passes, we use an intermediate buffer whose height is extended to
+ // have enough horizontally filtered values as input for the vertical pass.
+ // This buffer is allocated to be big enough for the largest block type we
+ // support.
+ const int kInterp_Extend = 4;
+ const unsigned int intermediate_height =
+ (kInterp_Extend - 1) + output_height + kInterp_Extend;
+
+ /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ * + kInterp_Extend
+ * = 3 + 16 + 4
+ * = 23
+ * and filter_max_width = 16
+ */
+ uint16_t intermediate_buffer[71 * kMaxDimension];
+ const int intermediate_next_stride = 1 - intermediate_height * output_width;
+
+ // Horizontal pass (src -> transposed intermediate).
+ {
+ uint16_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ unsigned int i, j;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ ++src_ptr;
+ output_ptr += intermediate_height;
+ }
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
+ }
+ }
+
+ // Vertical pass (transposed intermediate -> dst).
+ {
+ uint16_t *src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ src_ptr += intermediate_height;
+ }
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
+ }
+ }
+}
+
+void highbd_block2d_average_c(uint16_t *src,
+ unsigned int src_stride,
+ uint16_t *output_ptr,
+ unsigned int output_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
+ }
+ output_ptr += output_stride;
+ }
+}
+
+void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ uint16_t tmp[kMaxDimension * kMaxDimension];
+
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
+ highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
+ output_width, output_height, bd);
+ highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride,
+ output_width, output_height, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
public:
static void SetUpTestCase() {
// Force input_ to be unaligned, output to be 16 byte aligned.
@@ -177,13 +318,36 @@
vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
output_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kOutputBufferSize));
+ output_ref_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kOutputBufferSize));
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment,
+ (kInputBufferSize + 1) * sizeof(uint16_t))) + 1;
+ output16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+ output16_ref_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+#endif
}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
static void TearDownTestCase() {
vpx_free(input_ - 1);
input_ = NULL;
vpx_free(output_);
output_ = NULL;
+ vpx_free(output_ref_);
+ output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_free(input16_ - 1);
+ input16_ = NULL;
+ vpx_free(output16_);
+ output16_ = NULL;
+ vpx_free(output16_ref_);
+ output16_ref_ = NULL;
+#endif
}
protected:
@@ -191,7 +355,6 @@
static const int kOuterBlockSize = 256;
static const int kInputStride = kOuterBlockSize;
static const int kOutputStride = kOuterBlockSize;
- static const int kMaxDimension = 64;
static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
@@ -212,6 +375,12 @@
virtual void SetUp() {
UUT_ = GET_PARAM(2);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ != 0)
+ mask_ = (1 << UUT_->use_highbd_) - 1;
+ else
+ mask_ = 255;
+#endif
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
@@ -221,12 +390,33 @@
}
::libvpx_test::ACMRandom prng;
- for (int i = 0; i < kInputBufferSize; ++i)
- input_[i] = prng.Rand8Extremes();
+ for (int i = 0; i < kInputBufferSize; ++i) {
+ if (i & 1) {
+ input_[i] = 255;
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = mask_;
+#endif
+ } else {
+ input_[i] = prng.Rand8Extremes();
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = prng.Rand16() & mask_;
+#endif
+ }
+ }
}
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_memset16(input16_, value, kInputBufferSize);
+#endif
+ }
+
+ void CopyOutputToRef() {
+ memcpy(output_ref_, output_, kOutputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ memcpy(output16_ref_, output16_, kOutputBufferSize);
+#endif
}
void CheckGuardBlocks() {
@@ -236,39 +426,197 @@
}
}
- uint8_t* input() const {
+ uint8_t *input() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
}
- uint8_t* output() const {
+ uint8_t *output() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint8_t *output_ref() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint16_t lookup(uint8_t *list, int index) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return list[index];
+ } else {
+ return CONVERT_TO_SHORTPTR(list)[index];
+ }
+#else
+ return list[index];
+#endif
+ }
+
+ void assign_val(uint8_t *list, int index, uint16_t val) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ list[index] = (uint8_t) val;
+ } else {
+ CONVERT_TO_SHORTPTR(list)[index] = val;
+ }
+#else
+ list[index] = (uint8_t) val;
+#endif
+ }
+
+ void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+ } else {
+ highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
+ src_stride, HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr),
+ dst_stride, output_width, output_height,
+ UUT_->use_highbd_);
+ }
+#else
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+#endif
+ }
+
+ void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+ } else {
+ highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
+ HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
+ output_width, output_height, UUT_->use_highbd_);
+ }
+#else
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+#endif
}
const ConvolveFunctions* UUT_;
static uint8_t* input_;
static uint8_t* output_;
+ static uint8_t* output_ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ static uint16_t* input16_;
+ static uint16_t* output16_;
+ static uint16_t* output16_ref_;
+ int mask_;
+#endif
};
+
uint8_t* ConvolveTest::input_ = NULL;
uint8_t* ConvolveTest::output_ = NULL;
+uint8_t* ConvolveTest::output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+uint16_t* ConvolveTest::input16_ = NULL;
+uint16_t* ConvolveTest::output16_ = NULL;
+uint16_t* ConvolveTest::output16_ref_ = NULL;
+#endif
TEST_P(ConvolveTest, GuardBlocks) {
CheckGuardBlocks();
}
+TEST_P(ConvolveTest, Copy) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, Avg) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t* const out_ref = output_ref();
+ CopyOutputToRef();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
+ lookup(out_ref, y * kOutputStride + x), 1))
+ << "(" << x << "," << y << ")";
+}
+
TEST_P(ConvolveTest, CopyHoriz) {
uint8_t* const in = input();
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->h8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -277,15 +625,16 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->v8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -294,31 +643,26 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->hv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8,
+ 16, Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
-const int16_t (*kTestFilterList[])[8] = {
- vp9_bilinear_filters,
- vp9_sub_pel_filters_8,
- vp9_sub_pel_filters_8s,
- vp9_sub_pel_filters_8lp
-};
-const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
+const int kNumFilterBanks = 4;
const int kNumFilters = 16;
TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int i = 0; i < kNumFilters; i++) {
const int p0 = filters[i][0] + filters[i][1];
const int p1 = filters[i][2] + filters[i][3];
@@ -341,40 +685,57 @@
TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
-
+#endif
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_(in, kInputStride, out, kOutputStride,
kInvalidFilter, 16, filters[filter_y], 16,
Width(), Height()));
- else
- REGISTER_STATE_CHECK(
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
UUT_->h8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -386,54 +747,77 @@
TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
+#endif
// Populate ref and out with some random data
::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- const uint8_t r = prng.Rand8Extremes();
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
- out[y * kOutputStride + x] = r;
- ref[y * kOutputStride + x] = r;
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
}
}
- const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
-
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
- const int kNumFilters = 16;
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_average_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_average_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
else
- REGISTER_STATE_CHECK(
- UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -442,108 +826,102 @@
}
}
-DECLARE_ALIGNED(256, const int16_t, kChangeFilters[16][8]) = {
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128},
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128}
-};
-
-/* This test exercises the horizontal and vertical filter functions. */
-TEST_P(ConvolveTest, ChangeFilterWorks) {
- uint8_t* const in = input();
- uint8_t* const out = output();
-
- /* Assume that the first input sample is at the 8/16th position. */
- const int kInitialSubPelOffset = 8;
-
- /* Filters are 8-tap, so the first filter tap will be applied to the pixel
- * at position -3 with respect to the current filtering position. Since
- * kInitialSubPelOffset is set to 8, we first select sub-pixel filter 8,
- * which is non-zero only in the last tap. So, applying the filter at the
- * current input position will result in an output equal to the pixel at
- * offset +4 (-3 + 7) with respect to the current filtering position.
- */
- const int kPixelSelected = 4;
-
- /* Assume that each output pixel requires us to step on by 17/16th pixels in
- * the input.
- */
- const int kInputPixelStep = 17;
-
- /* The filters are setup in such a way that the expected output produces
- * sets of 8 identical output samples. As the filter position moves to the
- * next 1/16th pixel position the only active (=128) filter tap moves one
- * position to the left, resulting in the same input pixel being replicated
- * in to the output for 8 consecutive samples. After each set of 8 positions
- * the filters select a different input pixel. kFilterPeriodAdjust below
- * computes which input pixel is written to the output for a specified
- * x or y position.
- */
-
- /* Test the horizontal filter. */
- REGISTER_STATE_CHECK(UUT_->h8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, NULL, 0, Width(), Height()));
-
- for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjust = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_x], out[x]) << "x == " << x << "width = " << Width();
+TEST_P(ConvolveTest, FilterExtremes) {
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t *ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
}
+#else
+ uint8_t ref[kOutputStride * kMaxDimension];
+#endif
- /* Test the vertical filter. */
- REGISTER_STATE_CHECK(UUT_->v8_(in, kInputStride, out, kOutputStride,
- NULL, 0, kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, Width(), Height()));
-
+ // Populate ref and out with some random data
+ ::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjust = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_y * kInputStride], out[y * kInputStride]) << "y == " << y;
- }
-
- /* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- Width(), Height()));
-
- for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjustY = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustY * kInputPixelStep)
- >> SUBPEL_BITS);
for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjustX = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustX * kInputPixelStep)
- >> SUBPEL_BITS);
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
+ }
+ }
- ASSERT_EQ(in[ref_y * kInputStride + ref_x], out[y * kOutputStride + x])
- << "x == " << x << ", y == " << y;
+ for (int axis = 0; axis < 2; axis++) {
+ int seed_val = 0;
+ while (seed_val < 256) {
+ for (int y = 0; y < 8; ++y) {
+ for (int x = 0; x < 8; ++x) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * mask_);
+#else
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * 255);
+#endif
+ if (axis) seed_val++;
+ }
+ if (axis)
+ seed_val-= 8;
+ else
+ seed_val++;
+ }
+ if (axis) seed_val += 8;
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
+ Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
}
}
}
@@ -553,22 +931,24 @@
TEST_P(ConvolveTest, CheckScalingFiltering) {
uint8_t* const in = input();
uint8_t* const out = output();
+ const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
SetConstantInput(127);
for (int frac = 0; frac < 16; ++frac) {
for (int step = 1; step <= 32; ++step) {
/* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- vp9_sub_pel_filters_8[frac], step,
- vp9_sub_pel_filters_8[frac], step,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
+ eighttap[frac], step,
+ eighttap[frac], step,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- ASSERT_EQ(in[y * kInputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(in, y * kInputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "x == " << x << ", y == " << y
<< ", frac == " << frac << ", step == " << step;
}
@@ -579,10 +959,590 @@
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE2 && ARCH_X86_64
+void wrap_convolve8_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride, filter_x,
+ filter_x_stride, filter_y, filter_y_stride,
+ w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+#endif // HAVE_SSE2 && ARCH_X86_64
+
+void wrap_convolve_copy_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_copy_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_copy_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
const ConvolveFunctions convolve8_c(
- vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_c,
- vp9_convolve8_vert_c, vp9_convolve8_avg_vert_c,
- vp9_convolve8_c, vp9_convolve8_avg_c);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
+INSTANTIATE_TEST_CASE_P(C_8, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_c),
+ make_tuple(8, 4, &convolve8_c),
+ make_tuple(4, 8, &convolve8_c),
+ make_tuple(8, 8, &convolve8_c),
+ make_tuple(16, 8, &convolve8_c),
+ make_tuple(8, 16, &convolve8_c),
+ make_tuple(16, 16, &convolve8_c),
+ make_tuple(32, 16, &convolve8_c),
+ make_tuple(16, 32, &convolve8_c),
+ make_tuple(32, 32, &convolve8_c),
+ make_tuple(64, 32, &convolve8_c),
+ make_tuple(32, 64, &convolve8_c),
+ make_tuple(64, 64, &convolve8_c)));
+const ConvolveFunctions convolve10_c(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10);
+INSTANTIATE_TEST_CASE_P(C_10, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve10_c),
+ make_tuple(8, 4, &convolve10_c),
+ make_tuple(4, 8, &convolve10_c),
+ make_tuple(8, 8, &convolve10_c),
+ make_tuple(16, 8, &convolve10_c),
+ make_tuple(8, 16, &convolve10_c),
+ make_tuple(16, 16, &convolve10_c),
+ make_tuple(32, 16, &convolve10_c),
+ make_tuple(16, 32, &convolve10_c),
+ make_tuple(32, 32, &convolve10_c),
+ make_tuple(64, 32, &convolve10_c),
+ make_tuple(32, 64, &convolve10_c),
+ make_tuple(64, 64, &convolve10_c)));
+const ConvolveFunctions convolve12_c(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12);
+INSTANTIATE_TEST_CASE_P(C_12, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve12_c),
+ make_tuple(8, 4, &convolve12_c),
+ make_tuple(4, 8, &convolve12_c),
+ make_tuple(8, 8, &convolve12_c),
+ make_tuple(16, 8, &convolve12_c),
+ make_tuple(8, 16, &convolve12_c),
+ make_tuple(16, 16, &convolve12_c),
+ make_tuple(32, 16, &convolve12_c),
+ make_tuple(16, 32, &convolve12_c),
+ make_tuple(32, 32, &convolve12_c),
+ make_tuple(64, 32, &convolve12_c),
+ make_tuple(32, 64, &convolve12_c),
+ make_tuple(64, 64, &convolve12_c)));
+
+#else
+
+const ConvolveFunctions convolve8_c(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c,
+ vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+ vpx_convolve8_c, vpx_convolve8_avg_c,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_c),
@@ -598,12 +1558,87 @@
make_tuple(64, 32, &convolve8_c),
make_tuple(32, 64, &convolve8_c),
make_tuple(64, 64, &convolve8_c)));
+#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && ARCH_X86_64
+#if CONFIG_VP9_HIGHBITDEPTH
const ConvolveFunctions convolve8_sse2(
- vp9_convolve8_horiz_sse2, vp9_convolve8_avg_horiz_sse2,
- vp9_convolve8_vert_sse2, vp9_convolve8_avg_vert_sse2,
- vp9_convolve8_sse2, vp9_convolve8_avg_sse2);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8);
+const ConvolveFunctions convolve10_sse2(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10);
+const ConvolveFunctions convolve12_sse2(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
+INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_sse2),
+ make_tuple(8, 4, &convolve8_sse2),
+ make_tuple(4, 8, &convolve8_sse2),
+ make_tuple(8, 8, &convolve8_sse2),
+ make_tuple(16, 8, &convolve8_sse2),
+ make_tuple(8, 16, &convolve8_sse2),
+ make_tuple(16, 16, &convolve8_sse2),
+ make_tuple(32, 16, &convolve8_sse2),
+ make_tuple(16, 32, &convolve8_sse2),
+ make_tuple(32, 32, &convolve8_sse2),
+ make_tuple(64, 32, &convolve8_sse2),
+ make_tuple(32, 64, &convolve8_sse2),
+ make_tuple(64, 64, &convolve8_sse2),
+ make_tuple(4, 4, &convolve10_sse2),
+ make_tuple(8, 4, &convolve10_sse2),
+ make_tuple(4, 8, &convolve10_sse2),
+ make_tuple(8, 8, &convolve10_sse2),
+ make_tuple(16, 8, &convolve10_sse2),
+ make_tuple(8, 16, &convolve10_sse2),
+ make_tuple(16, 16, &convolve10_sse2),
+ make_tuple(32, 16, &convolve10_sse2),
+ make_tuple(16, 32, &convolve10_sse2),
+ make_tuple(32, 32, &convolve10_sse2),
+ make_tuple(64, 32, &convolve10_sse2),
+ make_tuple(32, 64, &convolve10_sse2),
+ make_tuple(64, 64, &convolve10_sse2),
+ make_tuple(4, 4, &convolve12_sse2),
+ make_tuple(8, 4, &convolve12_sse2),
+ make_tuple(4, 8, &convolve12_sse2),
+ make_tuple(8, 8, &convolve12_sse2),
+ make_tuple(16, 8, &convolve12_sse2),
+ make_tuple(8, 16, &convolve12_sse2),
+ make_tuple(16, 16, &convolve12_sse2),
+ make_tuple(32, 16, &convolve12_sse2),
+ make_tuple(16, 32, &convolve12_sse2),
+ make_tuple(32, 32, &convolve12_sse2),
+ make_tuple(64, 32, &convolve12_sse2),
+ make_tuple(32, 64, &convolve12_sse2),
+ make_tuple(64, 64, &convolve12_sse2)));
+#else
+const ConvolveFunctions convolve8_sse2(
+#if CONFIG_USE_X86INC
+ vpx_convolve_copy_sse2, vpx_convolve_avg_sse2,
+#else
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+#endif // CONFIG_USE_X86INC
+ vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2,
+ vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2,
+ vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_sse2),
@@ -619,13 +1654,18 @@
make_tuple(64, 32, &convolve8_sse2),
make_tuple(32, 64, &convolve8_sse2),
make_tuple(64, 64, &convolve8_sse2)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
#if HAVE_SSSE3
const ConvolveFunctions convolve8_ssse3(
- vp9_convolve8_horiz_ssse3, vp9_convolve8_avg_horiz_ssse3,
- vp9_convolve8_vert_ssse3, vp9_convolve8_avg_vert_ssse3,
- vp9_convolve8_ssse3, vp9_convolve8_avg_ssse3);
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_ssse3),
@@ -643,11 +1683,52 @@
make_tuple(64, 64, &convolve8_ssse3)));
#endif
+#if HAVE_AVX2 && HAVE_SSSE3
+const ConvolveFunctions convolve8_avx2(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_avx2),
+ make_tuple(8, 4, &convolve8_avx2),
+ make_tuple(4, 8, &convolve8_avx2),
+ make_tuple(8, 8, &convolve8_avx2),
+ make_tuple(8, 16, &convolve8_avx2),
+ make_tuple(16, 8, &convolve8_avx2),
+ make_tuple(16, 16, &convolve8_avx2),
+ make_tuple(32, 16, &convolve8_avx2),
+ make_tuple(16, 32, &convolve8_avx2),
+ make_tuple(32, 32, &convolve8_avx2),
+ make_tuple(64, 32, &convolve8_avx2),
+ make_tuple(32, 64, &convolve8_avx2),
+ make_tuple(64, 64, &convolve8_avx2)));
+#endif // HAVE_AVX2 && HAVE_SSSE3
+
#if HAVE_NEON
+#if HAVE_NEON_ASM
const ConvolveFunctions convolve8_neon(
- vp9_convolve8_horiz_neon, vp9_convolve8_avg_horiz_neon,
- vp9_convolve8_vert_neon, vp9_convolve8_avg_vert_neon,
- vp9_convolve8_neon, vp9_convolve8_avg_neon);
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#else // HAVE_NEON
+const ConvolveFunctions convolve8_neon(
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#endif // HAVE_NEON_ASM
INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_neon),
@@ -663,13 +1744,17 @@
make_tuple(64, 32, &convolve8_neon),
make_tuple(32, 64, &convolve8_neon),
make_tuple(64, 64, &convolve8_neon)));
-#endif
+#endif // HAVE_NEON
#if HAVE_DSPR2
const ConvolveFunctions convolve8_dspr2(
- vp9_convolve8_horiz_dspr2, vp9_convolve8_avg_horiz_dspr2,
- vp9_convolve8_vert_dspr2, vp9_convolve8_avg_vert_dspr2,
- vp9_convolve8_dspr2, vp9_convolve8_avg_dspr2);
+ vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2,
+ vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2,
+ vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2,
+ vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_dspr2),
@@ -686,4 +1771,30 @@
make_tuple(32, 64, &convolve8_dspr2),
make_tuple(64, 64, &convolve8_dspr2)));
#endif
+
+#if HAVE_MSA
+const ConvolveFunctions convolve8_msa(
+ vpx_convolve_copy_msa, vpx_convolve_avg_msa,
+ vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa,
+ vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa,
+ vpx_convolve8_msa, vpx_convolve8_avg_msa,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_msa),
+ make_tuple(8, 4, &convolve8_msa),
+ make_tuple(4, 8, &convolve8_msa),
+ make_tuple(8, 8, &convolve8_msa),
+ make_tuple(16, 8, &convolve8_msa),
+ make_tuple(8, 16, &convolve8_msa),
+ make_tuple(16, 16, &convolve8_msa),
+ make_tuple(32, 16, &convolve8_msa),
+ make_tuple(16, 32, &convolve8_msa),
+ make_tuple(32, 32, &convolve8_msa),
+ make_tuple(64, 32, &convolve8_msa),
+ make_tuple(32, 64, &convolve8_msa),
+ make_tuple(64, 64, &convolve8_msa)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
convolve_fn_t v8, convolve_fn_t v8_avg,
convolve_fn_t hv8, convolve_fn_t hv8_avg)
: h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
hv8_avg_(hv8_avg) {}
| null |
150,825 |
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
#if CONFIG_VP9_HIGHBITDEPTH
vpx_memset16(input16_, value, kInputBufferSize);
#endif
}
void CopyOutputToRef() {
memcpy(output_ref_, output_, kOutputBufferSize);
#if CONFIG_VP9_HIGHBITDEPTH
memcpy(output16_ref_, output16_, kOutputBufferSize);
#endif
}
|
@@ -9,40 +9,65 @@
*/
#include <string.h>
-#include "test/acm_random.h"
-#include "test/register_state_check.h"
-#include "test/util.h"
+
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_filter.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
namespace {
-typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h);
+
+static const unsigned int kMaxDimension = 64;
+
+typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h);
struct ConvolveFunctions {
- ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
- convolve_fn_t v8, convolve_fn_t v8_avg,
- convolve_fn_t hv8, convolve_fn_t hv8_avg)
- : h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
- hv8_avg_(hv8_avg) {}
+ ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg,
+ ConvolveFunc h8, ConvolveFunc h8_avg,
+ ConvolveFunc v8, ConvolveFunc v8_avg,
+ ConvolveFunc hv8, ConvolveFunc hv8_avg,
+ ConvolveFunc sh8, ConvolveFunc sh8_avg,
+ ConvolveFunc sv8, ConvolveFunc sv8_avg,
+ ConvolveFunc shv8, ConvolveFunc shv8_avg,
+ int bd)
+ : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
+ v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8),
+ sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg),
+ use_highbd_(bd) {}
- convolve_fn_t h8_;
- convolve_fn_t v8_;
- convolve_fn_t hv8_;
- convolve_fn_t h8_avg_;
- convolve_fn_t v8_avg_;
- convolve_fn_t hv8_avg_;
+ ConvolveFunc copy_;
+ ConvolveFunc avg_;
+ ConvolveFunc h8_;
+ ConvolveFunc v8_;
+ ConvolveFunc hv8_;
+ ConvolveFunc h8_avg_;
+ ConvolveFunc v8_avg_;
+ ConvolveFunc hv8_avg_;
+ ConvolveFunc sh8_; // scaled horiz
+ ConvolveFunc sv8_; // scaled vert
+ ConvolveFunc shv8_; // scaled horiz/vert
+ ConvolveFunc sh8_avg_; // scaled avg horiz
+ ConvolveFunc sv8_avg_; // scaled avg vert
+ ConvolveFunc shv8_avg_; // scaled avg horiz/vert
+ int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth.
};
-typedef std::tr1::tuple<int, int, const ConvolveFunctions*> convolve_param_t;
+typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
// Reference 8-tap subpixel filter, slightly modified to fit into this test.
#define VP9_FILTER_WEIGHT 128
@@ -68,71 +93,66 @@
const int kInterp_Extend = 4;
const unsigned int intermediate_height =
(kInterp_Extend - 1) + output_height + kInterp_Extend;
+ unsigned int i, j;
- /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
- * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
- * + kInterp_Extend
- * = 3 + 16 + 4
- * = 23
- * and filter_max_width = 16
- */
- uint8_t intermediate_buffer[71 * 64];
+ // Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ // where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ // + kInterp_Extend
+ // = 3 + 16 + 4
+ // = 23
+ // and filter_max_width = 16
+ //
+ uint8_t intermediate_buffer[71 * kMaxDimension];
const int intermediate_next_stride = 1 - intermediate_height * output_width;
// Horizontal pass (src -> transposed intermediate).
- {
- uint8_t *output_ptr = intermediate_buffer;
- const int src_next_row_stride = src_stride - output_width;
- unsigned int i, j;
- src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
- for (i = 0; i < intermediate_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * HFilter[0]) +
- (src_ptr[1] * HFilter[1]) +
- (src_ptr[2] * HFilter[2]) +
- (src_ptr[3] * HFilter[3]) +
- (src_ptr[4] * HFilter[4]) +
- (src_ptr[5] * HFilter[5]) +
- (src_ptr[6] * HFilter[6]) +
- (src_ptr[7] * HFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ uint8_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
- ++src_ptr;
- output_ptr += intermediate_height;
- }
- src_ptr += src_next_row_stride;
- output_ptr += intermediate_next_stride;
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ ++src_ptr;
+ output_ptr += intermediate_height;
}
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
}
// Vertical pass (transposed intermediate -> dst).
- {
- uint8_t *src_ptr = intermediate_buffer;
- const int dst_next_row_stride = dst_stride - output_width;
- unsigned int i, j;
- for (i = 0; i < output_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * VFilter[0]) +
- (src_ptr[1] * VFilter[1]) +
- (src_ptr[2] * VFilter[2]) +
- (src_ptr[3] * VFilter[3]) +
- (src_ptr[4] * VFilter[4]) +
- (src_ptr[5] * VFilter[5]) +
- (src_ptr[6] * VFilter[6]) +
- (src_ptr[7] * VFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
- src_ptr += intermediate_height;
- }
- src_ptr += intermediate_next_stride;
- dst_ptr += dst_next_row_stride;
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ src_ptr += intermediate_height;
}
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
}
}
@@ -159,17 +179,138 @@
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
- uint8_t tmp[64 * 64];
+ uint8_t tmp[kMaxDimension * kMaxDimension];
- assert(output_width <= 64);
- assert(output_height <= 64);
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
output_width, output_height);
block2d_average_c(tmp, 64, dst_ptr, dst_stride,
output_width, output_height);
}
-class ConvolveTest : public ::testing::TestWithParam<convolve_param_t> {
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ // Between passes, we use an intermediate buffer whose height is extended to
+ // have enough horizontally filtered values as input for the vertical pass.
+ // This buffer is allocated to be big enough for the largest block type we
+ // support.
+ const int kInterp_Extend = 4;
+ const unsigned int intermediate_height =
+ (kInterp_Extend - 1) + output_height + kInterp_Extend;
+
+ /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ * + kInterp_Extend
+ * = 3 + 16 + 4
+ * = 23
+ * and filter_max_width = 16
+ */
+ uint16_t intermediate_buffer[71 * kMaxDimension];
+ const int intermediate_next_stride = 1 - intermediate_height * output_width;
+
+ // Horizontal pass (src -> transposed intermediate).
+ {
+ uint16_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ unsigned int i, j;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ ++src_ptr;
+ output_ptr += intermediate_height;
+ }
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
+ }
+ }
+
+ // Vertical pass (transposed intermediate -> dst).
+ {
+ uint16_t *src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ src_ptr += intermediate_height;
+ }
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
+ }
+ }
+}
+
+void highbd_block2d_average_c(uint16_t *src,
+ unsigned int src_stride,
+ uint16_t *output_ptr,
+ unsigned int output_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
+ }
+ output_ptr += output_stride;
+ }
+}
+
+void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ uint16_t tmp[kMaxDimension * kMaxDimension];
+
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
+ highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
+ output_width, output_height, bd);
+ highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride,
+ output_width, output_height, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
public:
static void SetUpTestCase() {
// Force input_ to be unaligned, output to be 16 byte aligned.
@@ -177,13 +318,36 @@
vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
output_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kOutputBufferSize));
+ output_ref_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kOutputBufferSize));
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment,
+ (kInputBufferSize + 1) * sizeof(uint16_t))) + 1;
+ output16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+ output16_ref_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+#endif
}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
static void TearDownTestCase() {
vpx_free(input_ - 1);
input_ = NULL;
vpx_free(output_);
output_ = NULL;
+ vpx_free(output_ref_);
+ output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_free(input16_ - 1);
+ input16_ = NULL;
+ vpx_free(output16_);
+ output16_ = NULL;
+ vpx_free(output16_ref_);
+ output16_ref_ = NULL;
+#endif
}
protected:
@@ -191,7 +355,6 @@
static const int kOuterBlockSize = 256;
static const int kInputStride = kOuterBlockSize;
static const int kOutputStride = kOuterBlockSize;
- static const int kMaxDimension = 64;
static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
@@ -212,6 +375,12 @@
virtual void SetUp() {
UUT_ = GET_PARAM(2);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ != 0)
+ mask_ = (1 << UUT_->use_highbd_) - 1;
+ else
+ mask_ = 255;
+#endif
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
@@ -221,12 +390,33 @@
}
::libvpx_test::ACMRandom prng;
- for (int i = 0; i < kInputBufferSize; ++i)
- input_[i] = prng.Rand8Extremes();
+ for (int i = 0; i < kInputBufferSize; ++i) {
+ if (i & 1) {
+ input_[i] = 255;
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = mask_;
+#endif
+ } else {
+ input_[i] = prng.Rand8Extremes();
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = prng.Rand16() & mask_;
+#endif
+ }
+ }
}
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_memset16(input16_, value, kInputBufferSize);
+#endif
+ }
+
+ void CopyOutputToRef() {
+ memcpy(output_ref_, output_, kOutputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ memcpy(output16_ref_, output16_, kOutputBufferSize);
+#endif
}
void CheckGuardBlocks() {
@@ -236,39 +426,197 @@
}
}
- uint8_t* input() const {
+ uint8_t *input() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
}
- uint8_t* output() const {
+ uint8_t *output() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint8_t *output_ref() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint16_t lookup(uint8_t *list, int index) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return list[index];
+ } else {
+ return CONVERT_TO_SHORTPTR(list)[index];
+ }
+#else
+ return list[index];
+#endif
+ }
+
+ void assign_val(uint8_t *list, int index, uint16_t val) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ list[index] = (uint8_t) val;
+ } else {
+ CONVERT_TO_SHORTPTR(list)[index] = val;
+ }
+#else
+ list[index] = (uint8_t) val;
+#endif
+ }
+
+ void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+ } else {
+ highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
+ src_stride, HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr),
+ dst_stride, output_width, output_height,
+ UUT_->use_highbd_);
+ }
+#else
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+#endif
+ }
+
+ void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+ } else {
+ highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
+ HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
+ output_width, output_height, UUT_->use_highbd_);
+ }
+#else
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+#endif
}
const ConvolveFunctions* UUT_;
static uint8_t* input_;
static uint8_t* output_;
+ static uint8_t* output_ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ static uint16_t* input16_;
+ static uint16_t* output16_;
+ static uint16_t* output16_ref_;
+ int mask_;
+#endif
};
+
uint8_t* ConvolveTest::input_ = NULL;
uint8_t* ConvolveTest::output_ = NULL;
+uint8_t* ConvolveTest::output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+uint16_t* ConvolveTest::input16_ = NULL;
+uint16_t* ConvolveTest::output16_ = NULL;
+uint16_t* ConvolveTest::output16_ref_ = NULL;
+#endif
TEST_P(ConvolveTest, GuardBlocks) {
CheckGuardBlocks();
}
+TEST_P(ConvolveTest, Copy) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, Avg) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t* const out_ref = output_ref();
+ CopyOutputToRef();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
+ lookup(out_ref, y * kOutputStride + x), 1))
+ << "(" << x << "," << y << ")";
+}
+
TEST_P(ConvolveTest, CopyHoriz) {
uint8_t* const in = input();
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->h8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -277,15 +625,16 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->v8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -294,31 +643,26 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->hv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8,
+ 16, Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
-const int16_t (*kTestFilterList[])[8] = {
- vp9_bilinear_filters,
- vp9_sub_pel_filters_8,
- vp9_sub_pel_filters_8s,
- vp9_sub_pel_filters_8lp
-};
-const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
+const int kNumFilterBanks = 4;
const int kNumFilters = 16;
TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int i = 0; i < kNumFilters; i++) {
const int p0 = filters[i][0] + filters[i][1];
const int p1 = filters[i][2] + filters[i][3];
@@ -341,40 +685,57 @@
TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
-
+#endif
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_(in, kInputStride, out, kOutputStride,
kInvalidFilter, 16, filters[filter_y], 16,
Width(), Height()));
- else
- REGISTER_STATE_CHECK(
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
UUT_->h8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -386,54 +747,77 @@
TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
+#endif
// Populate ref and out with some random data
::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- const uint8_t r = prng.Rand8Extremes();
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
- out[y * kOutputStride + x] = r;
- ref[y * kOutputStride + x] = r;
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
}
}
- const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
-
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
- const int kNumFilters = 16;
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_average_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_average_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
else
- REGISTER_STATE_CHECK(
- UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -442,108 +826,102 @@
}
}
-DECLARE_ALIGNED(256, const int16_t, kChangeFilters[16][8]) = {
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128},
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128}
-};
-
-/* This test exercises the horizontal and vertical filter functions. */
-TEST_P(ConvolveTest, ChangeFilterWorks) {
- uint8_t* const in = input();
- uint8_t* const out = output();
-
- /* Assume that the first input sample is at the 8/16th position. */
- const int kInitialSubPelOffset = 8;
-
- /* Filters are 8-tap, so the first filter tap will be applied to the pixel
- * at position -3 with respect to the current filtering position. Since
- * kInitialSubPelOffset is set to 8, we first select sub-pixel filter 8,
- * which is non-zero only in the last tap. So, applying the filter at the
- * current input position will result in an output equal to the pixel at
- * offset +4 (-3 + 7) with respect to the current filtering position.
- */
- const int kPixelSelected = 4;
-
- /* Assume that each output pixel requires us to step on by 17/16th pixels in
- * the input.
- */
- const int kInputPixelStep = 17;
-
- /* The filters are setup in such a way that the expected output produces
- * sets of 8 identical output samples. As the filter position moves to the
- * next 1/16th pixel position the only active (=128) filter tap moves one
- * position to the left, resulting in the same input pixel being replicated
- * in to the output for 8 consecutive samples. After each set of 8 positions
- * the filters select a different input pixel. kFilterPeriodAdjust below
- * computes which input pixel is written to the output for a specified
- * x or y position.
- */
-
- /* Test the horizontal filter. */
- REGISTER_STATE_CHECK(UUT_->h8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, NULL, 0, Width(), Height()));
-
- for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjust = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_x], out[x]) << "x == " << x << "width = " << Width();
+TEST_P(ConvolveTest, FilterExtremes) {
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t *ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
}
+#else
+ uint8_t ref[kOutputStride * kMaxDimension];
+#endif
- /* Test the vertical filter. */
- REGISTER_STATE_CHECK(UUT_->v8_(in, kInputStride, out, kOutputStride,
- NULL, 0, kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, Width(), Height()));
-
+ // Populate ref and out with some random data
+ ::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjust = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_y * kInputStride], out[y * kInputStride]) << "y == " << y;
- }
-
- /* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- Width(), Height()));
-
- for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjustY = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustY * kInputPixelStep)
- >> SUBPEL_BITS);
for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjustX = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustX * kInputPixelStep)
- >> SUBPEL_BITS);
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
+ }
+ }
- ASSERT_EQ(in[ref_y * kInputStride + ref_x], out[y * kOutputStride + x])
- << "x == " << x << ", y == " << y;
+ for (int axis = 0; axis < 2; axis++) {
+ int seed_val = 0;
+ while (seed_val < 256) {
+ for (int y = 0; y < 8; ++y) {
+ for (int x = 0; x < 8; ++x) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * mask_);
+#else
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * 255);
+#endif
+ if (axis) seed_val++;
+ }
+ if (axis)
+ seed_val-= 8;
+ else
+ seed_val++;
+ }
+ if (axis) seed_val += 8;
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
+ Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
}
}
}
@@ -553,22 +931,24 @@
TEST_P(ConvolveTest, CheckScalingFiltering) {
uint8_t* const in = input();
uint8_t* const out = output();
+ const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
SetConstantInput(127);
for (int frac = 0; frac < 16; ++frac) {
for (int step = 1; step <= 32; ++step) {
/* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- vp9_sub_pel_filters_8[frac], step,
- vp9_sub_pel_filters_8[frac], step,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
+ eighttap[frac], step,
+ eighttap[frac], step,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- ASSERT_EQ(in[y * kInputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(in, y * kInputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "x == " << x << ", y == " << y
<< ", frac == " << frac << ", step == " << step;
}
@@ -579,10 +959,590 @@
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE2 && ARCH_X86_64
+void wrap_convolve8_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride, filter_x,
+ filter_x_stride, filter_y, filter_y_stride,
+ w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+#endif // HAVE_SSE2 && ARCH_X86_64
+
+void wrap_convolve_copy_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_copy_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_copy_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
const ConvolveFunctions convolve8_c(
- vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_c,
- vp9_convolve8_vert_c, vp9_convolve8_avg_vert_c,
- vp9_convolve8_c, vp9_convolve8_avg_c);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
+INSTANTIATE_TEST_CASE_P(C_8, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_c),
+ make_tuple(8, 4, &convolve8_c),
+ make_tuple(4, 8, &convolve8_c),
+ make_tuple(8, 8, &convolve8_c),
+ make_tuple(16, 8, &convolve8_c),
+ make_tuple(8, 16, &convolve8_c),
+ make_tuple(16, 16, &convolve8_c),
+ make_tuple(32, 16, &convolve8_c),
+ make_tuple(16, 32, &convolve8_c),
+ make_tuple(32, 32, &convolve8_c),
+ make_tuple(64, 32, &convolve8_c),
+ make_tuple(32, 64, &convolve8_c),
+ make_tuple(64, 64, &convolve8_c)));
+const ConvolveFunctions convolve10_c(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10);
+INSTANTIATE_TEST_CASE_P(C_10, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve10_c),
+ make_tuple(8, 4, &convolve10_c),
+ make_tuple(4, 8, &convolve10_c),
+ make_tuple(8, 8, &convolve10_c),
+ make_tuple(16, 8, &convolve10_c),
+ make_tuple(8, 16, &convolve10_c),
+ make_tuple(16, 16, &convolve10_c),
+ make_tuple(32, 16, &convolve10_c),
+ make_tuple(16, 32, &convolve10_c),
+ make_tuple(32, 32, &convolve10_c),
+ make_tuple(64, 32, &convolve10_c),
+ make_tuple(32, 64, &convolve10_c),
+ make_tuple(64, 64, &convolve10_c)));
+const ConvolveFunctions convolve12_c(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12);
+INSTANTIATE_TEST_CASE_P(C_12, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve12_c),
+ make_tuple(8, 4, &convolve12_c),
+ make_tuple(4, 8, &convolve12_c),
+ make_tuple(8, 8, &convolve12_c),
+ make_tuple(16, 8, &convolve12_c),
+ make_tuple(8, 16, &convolve12_c),
+ make_tuple(16, 16, &convolve12_c),
+ make_tuple(32, 16, &convolve12_c),
+ make_tuple(16, 32, &convolve12_c),
+ make_tuple(32, 32, &convolve12_c),
+ make_tuple(64, 32, &convolve12_c),
+ make_tuple(32, 64, &convolve12_c),
+ make_tuple(64, 64, &convolve12_c)));
+
+#else
+
+const ConvolveFunctions convolve8_c(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c,
+ vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+ vpx_convolve8_c, vpx_convolve8_avg_c,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_c),
@@ -598,12 +1558,87 @@
make_tuple(64, 32, &convolve8_c),
make_tuple(32, 64, &convolve8_c),
make_tuple(64, 64, &convolve8_c)));
+#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && ARCH_X86_64
+#if CONFIG_VP9_HIGHBITDEPTH
const ConvolveFunctions convolve8_sse2(
- vp9_convolve8_horiz_sse2, vp9_convolve8_avg_horiz_sse2,
- vp9_convolve8_vert_sse2, vp9_convolve8_avg_vert_sse2,
- vp9_convolve8_sse2, vp9_convolve8_avg_sse2);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8);
+const ConvolveFunctions convolve10_sse2(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10);
+const ConvolveFunctions convolve12_sse2(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
+INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_sse2),
+ make_tuple(8, 4, &convolve8_sse2),
+ make_tuple(4, 8, &convolve8_sse2),
+ make_tuple(8, 8, &convolve8_sse2),
+ make_tuple(16, 8, &convolve8_sse2),
+ make_tuple(8, 16, &convolve8_sse2),
+ make_tuple(16, 16, &convolve8_sse2),
+ make_tuple(32, 16, &convolve8_sse2),
+ make_tuple(16, 32, &convolve8_sse2),
+ make_tuple(32, 32, &convolve8_sse2),
+ make_tuple(64, 32, &convolve8_sse2),
+ make_tuple(32, 64, &convolve8_sse2),
+ make_tuple(64, 64, &convolve8_sse2),
+ make_tuple(4, 4, &convolve10_sse2),
+ make_tuple(8, 4, &convolve10_sse2),
+ make_tuple(4, 8, &convolve10_sse2),
+ make_tuple(8, 8, &convolve10_sse2),
+ make_tuple(16, 8, &convolve10_sse2),
+ make_tuple(8, 16, &convolve10_sse2),
+ make_tuple(16, 16, &convolve10_sse2),
+ make_tuple(32, 16, &convolve10_sse2),
+ make_tuple(16, 32, &convolve10_sse2),
+ make_tuple(32, 32, &convolve10_sse2),
+ make_tuple(64, 32, &convolve10_sse2),
+ make_tuple(32, 64, &convolve10_sse2),
+ make_tuple(64, 64, &convolve10_sse2),
+ make_tuple(4, 4, &convolve12_sse2),
+ make_tuple(8, 4, &convolve12_sse2),
+ make_tuple(4, 8, &convolve12_sse2),
+ make_tuple(8, 8, &convolve12_sse2),
+ make_tuple(16, 8, &convolve12_sse2),
+ make_tuple(8, 16, &convolve12_sse2),
+ make_tuple(16, 16, &convolve12_sse2),
+ make_tuple(32, 16, &convolve12_sse2),
+ make_tuple(16, 32, &convolve12_sse2),
+ make_tuple(32, 32, &convolve12_sse2),
+ make_tuple(64, 32, &convolve12_sse2),
+ make_tuple(32, 64, &convolve12_sse2),
+ make_tuple(64, 64, &convolve12_sse2)));
+#else
+const ConvolveFunctions convolve8_sse2(
+#if CONFIG_USE_X86INC
+ vpx_convolve_copy_sse2, vpx_convolve_avg_sse2,
+#else
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+#endif // CONFIG_USE_X86INC
+ vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2,
+ vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2,
+ vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_sse2),
@@ -619,13 +1654,18 @@
make_tuple(64, 32, &convolve8_sse2),
make_tuple(32, 64, &convolve8_sse2),
make_tuple(64, 64, &convolve8_sse2)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
#if HAVE_SSSE3
const ConvolveFunctions convolve8_ssse3(
- vp9_convolve8_horiz_ssse3, vp9_convolve8_avg_horiz_ssse3,
- vp9_convolve8_vert_ssse3, vp9_convolve8_avg_vert_ssse3,
- vp9_convolve8_ssse3, vp9_convolve8_avg_ssse3);
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_ssse3),
@@ -643,11 +1683,52 @@
make_tuple(64, 64, &convolve8_ssse3)));
#endif
+#if HAVE_AVX2 && HAVE_SSSE3
+const ConvolveFunctions convolve8_avx2(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_avx2),
+ make_tuple(8, 4, &convolve8_avx2),
+ make_tuple(4, 8, &convolve8_avx2),
+ make_tuple(8, 8, &convolve8_avx2),
+ make_tuple(8, 16, &convolve8_avx2),
+ make_tuple(16, 8, &convolve8_avx2),
+ make_tuple(16, 16, &convolve8_avx2),
+ make_tuple(32, 16, &convolve8_avx2),
+ make_tuple(16, 32, &convolve8_avx2),
+ make_tuple(32, 32, &convolve8_avx2),
+ make_tuple(64, 32, &convolve8_avx2),
+ make_tuple(32, 64, &convolve8_avx2),
+ make_tuple(64, 64, &convolve8_avx2)));
+#endif // HAVE_AVX2 && HAVE_SSSE3
+
#if HAVE_NEON
+#if HAVE_NEON_ASM
const ConvolveFunctions convolve8_neon(
- vp9_convolve8_horiz_neon, vp9_convolve8_avg_horiz_neon,
- vp9_convolve8_vert_neon, vp9_convolve8_avg_vert_neon,
- vp9_convolve8_neon, vp9_convolve8_avg_neon);
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#else // HAVE_NEON
+const ConvolveFunctions convolve8_neon(
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#endif // HAVE_NEON_ASM
INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_neon),
@@ -663,13 +1744,17 @@
make_tuple(64, 32, &convolve8_neon),
make_tuple(32, 64, &convolve8_neon),
make_tuple(64, 64, &convolve8_neon)));
-#endif
+#endif // HAVE_NEON
#if HAVE_DSPR2
const ConvolveFunctions convolve8_dspr2(
- vp9_convolve8_horiz_dspr2, vp9_convolve8_avg_horiz_dspr2,
- vp9_convolve8_vert_dspr2, vp9_convolve8_avg_vert_dspr2,
- vp9_convolve8_dspr2, vp9_convolve8_avg_dspr2);
+ vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2,
+ vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2,
+ vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2,
+ vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_dspr2),
@@ -686,4 +1771,30 @@
make_tuple(32, 64, &convolve8_dspr2),
make_tuple(64, 64, &convolve8_dspr2)));
#endif
+
+#if HAVE_MSA
+const ConvolveFunctions convolve8_msa(
+ vpx_convolve_copy_msa, vpx_convolve_avg_msa,
+ vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa,
+ vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa,
+ vpx_convolve8_msa, vpx_convolve8_avg_msa,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_msa),
+ make_tuple(8, 4, &convolve8_msa),
+ make_tuple(4, 8, &convolve8_msa),
+ make_tuple(8, 8, &convolve8_msa),
+ make_tuple(16, 8, &convolve8_msa),
+ make_tuple(8, 16, &convolve8_msa),
+ make_tuple(16, 16, &convolve8_msa),
+ make_tuple(32, 16, &convolve8_msa),
+ make_tuple(16, 32, &convolve8_msa),
+ make_tuple(32, 32, &convolve8_msa),
+ make_tuple(64, 32, &convolve8_msa),
+ make_tuple(32, 64, &convolve8_msa),
+ make_tuple(64, 64, &convolve8_msa)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
| null |
#if CONFIG_VP9_HIGHBITDEPTH
vpx_memset16(input16_, value, kInputBufferSize);
#endif
}
void CopyOutputToRef() {
memcpy(output_ref_, output_, kOutputBufferSize);
#if CONFIG_VP9_HIGHBITDEPTH
memcpy(output16_ref_, output16_, kOutputBufferSize);
#endif
|
150,826 |
virtual void SetUp() {
UUT_ = GET_PARAM(2);
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
output_[i] = 255;
else
output_[i] = 0;
}
::libvpx_test::ACMRandom prng;
for (int i = 0; i < kInputBufferSize; ++i)
input_[i] = prng.Rand8Extremes();
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
UUT_ = GET_PARAM(2);
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ != 0)
mask_ = (1 << UUT_->use_highbd_) - 1;
else
mask_ = 255;
#endif
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
output_[i] = 255;
else
output_[i] = 0;
}
::libvpx_test::ACMRandom prng;
for (int i = 0; i < kInputBufferSize; ++i) {
if (i & 1) {
input_[i] = 255;
#if CONFIG_VP9_HIGHBITDEPTH
input16_[i] = mask_;
#endif
} else {
input_[i] = prng.Rand8Extremes();
#if CONFIG_VP9_HIGHBITDEPTH
input16_[i] = prng.Rand16() & mask_;
#endif
}
}
}
|
@@ -9,40 +9,65 @@
*/
#include <string.h>
-#include "test/acm_random.h"
-#include "test/register_state_check.h"
-#include "test/util.h"
+
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_filter.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
namespace {
-typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h);
+
+static const unsigned int kMaxDimension = 64;
+
+typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h);
struct ConvolveFunctions {
- ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
- convolve_fn_t v8, convolve_fn_t v8_avg,
- convolve_fn_t hv8, convolve_fn_t hv8_avg)
- : h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
- hv8_avg_(hv8_avg) {}
+ ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg,
+ ConvolveFunc h8, ConvolveFunc h8_avg,
+ ConvolveFunc v8, ConvolveFunc v8_avg,
+ ConvolveFunc hv8, ConvolveFunc hv8_avg,
+ ConvolveFunc sh8, ConvolveFunc sh8_avg,
+ ConvolveFunc sv8, ConvolveFunc sv8_avg,
+ ConvolveFunc shv8, ConvolveFunc shv8_avg,
+ int bd)
+ : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
+ v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8),
+ sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg),
+ use_highbd_(bd) {}
- convolve_fn_t h8_;
- convolve_fn_t v8_;
- convolve_fn_t hv8_;
- convolve_fn_t h8_avg_;
- convolve_fn_t v8_avg_;
- convolve_fn_t hv8_avg_;
+ ConvolveFunc copy_;
+ ConvolveFunc avg_;
+ ConvolveFunc h8_;
+ ConvolveFunc v8_;
+ ConvolveFunc hv8_;
+ ConvolveFunc h8_avg_;
+ ConvolveFunc v8_avg_;
+ ConvolveFunc hv8_avg_;
+ ConvolveFunc sh8_; // scaled horiz
+ ConvolveFunc sv8_; // scaled vert
+ ConvolveFunc shv8_; // scaled horiz/vert
+ ConvolveFunc sh8_avg_; // scaled avg horiz
+ ConvolveFunc sv8_avg_; // scaled avg vert
+ ConvolveFunc shv8_avg_; // scaled avg horiz/vert
+ int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth.
};
-typedef std::tr1::tuple<int, int, const ConvolveFunctions*> convolve_param_t;
+typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
// Reference 8-tap subpixel filter, slightly modified to fit into this test.
#define VP9_FILTER_WEIGHT 128
@@ -68,71 +93,66 @@
const int kInterp_Extend = 4;
const unsigned int intermediate_height =
(kInterp_Extend - 1) + output_height + kInterp_Extend;
+ unsigned int i, j;
- /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
- * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
- * + kInterp_Extend
- * = 3 + 16 + 4
- * = 23
- * and filter_max_width = 16
- */
- uint8_t intermediate_buffer[71 * 64];
+ // Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ // where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ // + kInterp_Extend
+ // = 3 + 16 + 4
+ // = 23
+ // and filter_max_width = 16
+ //
+ uint8_t intermediate_buffer[71 * kMaxDimension];
const int intermediate_next_stride = 1 - intermediate_height * output_width;
// Horizontal pass (src -> transposed intermediate).
- {
- uint8_t *output_ptr = intermediate_buffer;
- const int src_next_row_stride = src_stride - output_width;
- unsigned int i, j;
- src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
- for (i = 0; i < intermediate_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * HFilter[0]) +
- (src_ptr[1] * HFilter[1]) +
- (src_ptr[2] * HFilter[2]) +
- (src_ptr[3] * HFilter[3]) +
- (src_ptr[4] * HFilter[4]) +
- (src_ptr[5] * HFilter[5]) +
- (src_ptr[6] * HFilter[6]) +
- (src_ptr[7] * HFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ uint8_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
- ++src_ptr;
- output_ptr += intermediate_height;
- }
- src_ptr += src_next_row_stride;
- output_ptr += intermediate_next_stride;
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ ++src_ptr;
+ output_ptr += intermediate_height;
}
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
}
// Vertical pass (transposed intermediate -> dst).
- {
- uint8_t *src_ptr = intermediate_buffer;
- const int dst_next_row_stride = dst_stride - output_width;
- unsigned int i, j;
- for (i = 0; i < output_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * VFilter[0]) +
- (src_ptr[1] * VFilter[1]) +
- (src_ptr[2] * VFilter[2]) +
- (src_ptr[3] * VFilter[3]) +
- (src_ptr[4] * VFilter[4]) +
- (src_ptr[5] * VFilter[5]) +
- (src_ptr[6] * VFilter[6]) +
- (src_ptr[7] * VFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
- src_ptr += intermediate_height;
- }
- src_ptr += intermediate_next_stride;
- dst_ptr += dst_next_row_stride;
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ src_ptr += intermediate_height;
}
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
}
}
@@ -159,17 +179,138 @@
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
- uint8_t tmp[64 * 64];
+ uint8_t tmp[kMaxDimension * kMaxDimension];
- assert(output_width <= 64);
- assert(output_height <= 64);
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
output_width, output_height);
block2d_average_c(tmp, 64, dst_ptr, dst_stride,
output_width, output_height);
}
-class ConvolveTest : public ::testing::TestWithParam<convolve_param_t> {
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ // Between passes, we use an intermediate buffer whose height is extended to
+ // have enough horizontally filtered values as input for the vertical pass.
+ // This buffer is allocated to be big enough for the largest block type we
+ // support.
+ const int kInterp_Extend = 4;
+ const unsigned int intermediate_height =
+ (kInterp_Extend - 1) + output_height + kInterp_Extend;
+
+ /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ * + kInterp_Extend
+ * = 3 + 16 + 4
+ * = 23
+ * and filter_max_width = 16
+ */
+ uint16_t intermediate_buffer[71 * kMaxDimension];
+ const int intermediate_next_stride = 1 - intermediate_height * output_width;
+
+ // Horizontal pass (src -> transposed intermediate).
+ {
+ uint16_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ unsigned int i, j;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ ++src_ptr;
+ output_ptr += intermediate_height;
+ }
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
+ }
+ }
+
+ // Vertical pass (transposed intermediate -> dst).
+ {
+ uint16_t *src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ src_ptr += intermediate_height;
+ }
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
+ }
+ }
+}
+
+void highbd_block2d_average_c(uint16_t *src,
+ unsigned int src_stride,
+ uint16_t *output_ptr,
+ unsigned int output_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
+ }
+ output_ptr += output_stride;
+ }
+}
+
+void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ uint16_t tmp[kMaxDimension * kMaxDimension];
+
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
+ highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
+ output_width, output_height, bd);
+ highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride,
+ output_width, output_height, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
public:
static void SetUpTestCase() {
// Force input_ to be unaligned, output to be 16 byte aligned.
@@ -177,13 +318,36 @@
vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
output_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kOutputBufferSize));
+ output_ref_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kOutputBufferSize));
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment,
+ (kInputBufferSize + 1) * sizeof(uint16_t))) + 1;
+ output16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+ output16_ref_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+#endif
}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
static void TearDownTestCase() {
vpx_free(input_ - 1);
input_ = NULL;
vpx_free(output_);
output_ = NULL;
+ vpx_free(output_ref_);
+ output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_free(input16_ - 1);
+ input16_ = NULL;
+ vpx_free(output16_);
+ output16_ = NULL;
+ vpx_free(output16_ref_);
+ output16_ref_ = NULL;
+#endif
}
protected:
@@ -191,7 +355,6 @@
static const int kOuterBlockSize = 256;
static const int kInputStride = kOuterBlockSize;
static const int kOutputStride = kOuterBlockSize;
- static const int kMaxDimension = 64;
static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
@@ -212,6 +375,12 @@
virtual void SetUp() {
UUT_ = GET_PARAM(2);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ != 0)
+ mask_ = (1 << UUT_->use_highbd_) - 1;
+ else
+ mask_ = 255;
+#endif
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
@@ -221,12 +390,33 @@
}
::libvpx_test::ACMRandom prng;
- for (int i = 0; i < kInputBufferSize; ++i)
- input_[i] = prng.Rand8Extremes();
+ for (int i = 0; i < kInputBufferSize; ++i) {
+ if (i & 1) {
+ input_[i] = 255;
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = mask_;
+#endif
+ } else {
+ input_[i] = prng.Rand8Extremes();
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = prng.Rand16() & mask_;
+#endif
+ }
+ }
}
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_memset16(input16_, value, kInputBufferSize);
+#endif
+ }
+
+ void CopyOutputToRef() {
+ memcpy(output_ref_, output_, kOutputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ memcpy(output16_ref_, output16_, kOutputBufferSize);
+#endif
}
void CheckGuardBlocks() {
@@ -236,39 +426,197 @@
}
}
- uint8_t* input() const {
+ uint8_t *input() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
}
- uint8_t* output() const {
+ uint8_t *output() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint8_t *output_ref() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint16_t lookup(uint8_t *list, int index) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return list[index];
+ } else {
+ return CONVERT_TO_SHORTPTR(list)[index];
+ }
+#else
+ return list[index];
+#endif
+ }
+
+ void assign_val(uint8_t *list, int index, uint16_t val) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ list[index] = (uint8_t) val;
+ } else {
+ CONVERT_TO_SHORTPTR(list)[index] = val;
+ }
+#else
+ list[index] = (uint8_t) val;
+#endif
+ }
+
+ void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+ } else {
+ highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
+ src_stride, HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr),
+ dst_stride, output_width, output_height,
+ UUT_->use_highbd_);
+ }
+#else
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+#endif
+ }
+
+ void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+ } else {
+ highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
+ HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
+ output_width, output_height, UUT_->use_highbd_);
+ }
+#else
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+#endif
}
const ConvolveFunctions* UUT_;
static uint8_t* input_;
static uint8_t* output_;
+ static uint8_t* output_ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ static uint16_t* input16_;
+ static uint16_t* output16_;
+ static uint16_t* output16_ref_;
+ int mask_;
+#endif
};
+
uint8_t* ConvolveTest::input_ = NULL;
uint8_t* ConvolveTest::output_ = NULL;
+uint8_t* ConvolveTest::output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+uint16_t* ConvolveTest::input16_ = NULL;
+uint16_t* ConvolveTest::output16_ = NULL;
+uint16_t* ConvolveTest::output16_ref_ = NULL;
+#endif
TEST_P(ConvolveTest, GuardBlocks) {
CheckGuardBlocks();
}
+TEST_P(ConvolveTest, Copy) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, Avg) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t* const out_ref = output_ref();
+ CopyOutputToRef();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
+ lookup(out_ref, y * kOutputStride + x), 1))
+ << "(" << x << "," << y << ")";
+}
+
TEST_P(ConvolveTest, CopyHoriz) {
uint8_t* const in = input();
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->h8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -277,15 +625,16 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->v8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -294,31 +643,26 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->hv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8,
+ 16, Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
-const int16_t (*kTestFilterList[])[8] = {
- vp9_bilinear_filters,
- vp9_sub_pel_filters_8,
- vp9_sub_pel_filters_8s,
- vp9_sub_pel_filters_8lp
-};
-const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
+const int kNumFilterBanks = 4;
const int kNumFilters = 16;
TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int i = 0; i < kNumFilters; i++) {
const int p0 = filters[i][0] + filters[i][1];
const int p1 = filters[i][2] + filters[i][3];
@@ -341,40 +685,57 @@
TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
-
+#endif
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_(in, kInputStride, out, kOutputStride,
kInvalidFilter, 16, filters[filter_y], 16,
Width(), Height()));
- else
- REGISTER_STATE_CHECK(
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
UUT_->h8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -386,54 +747,77 @@
TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
+#endif
// Populate ref and out with some random data
::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- const uint8_t r = prng.Rand8Extremes();
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
- out[y * kOutputStride + x] = r;
- ref[y * kOutputStride + x] = r;
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
}
}
- const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
-
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
- const int kNumFilters = 16;
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_average_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_average_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
else
- REGISTER_STATE_CHECK(
- UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -442,108 +826,102 @@
}
}
-DECLARE_ALIGNED(256, const int16_t, kChangeFilters[16][8]) = {
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128},
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128}
-};
-
-/* This test exercises the horizontal and vertical filter functions. */
-TEST_P(ConvolveTest, ChangeFilterWorks) {
- uint8_t* const in = input();
- uint8_t* const out = output();
-
- /* Assume that the first input sample is at the 8/16th position. */
- const int kInitialSubPelOffset = 8;
-
- /* Filters are 8-tap, so the first filter tap will be applied to the pixel
- * at position -3 with respect to the current filtering position. Since
- * kInitialSubPelOffset is set to 8, we first select sub-pixel filter 8,
- * which is non-zero only in the last tap. So, applying the filter at the
- * current input position will result in an output equal to the pixel at
- * offset +4 (-3 + 7) with respect to the current filtering position.
- */
- const int kPixelSelected = 4;
-
- /* Assume that each output pixel requires us to step on by 17/16th pixels in
- * the input.
- */
- const int kInputPixelStep = 17;
-
- /* The filters are setup in such a way that the expected output produces
- * sets of 8 identical output samples. As the filter position moves to the
- * next 1/16th pixel position the only active (=128) filter tap moves one
- * position to the left, resulting in the same input pixel being replicated
- * in to the output for 8 consecutive samples. After each set of 8 positions
- * the filters select a different input pixel. kFilterPeriodAdjust below
- * computes which input pixel is written to the output for a specified
- * x or y position.
- */
-
- /* Test the horizontal filter. */
- REGISTER_STATE_CHECK(UUT_->h8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, NULL, 0, Width(), Height()));
-
- for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjust = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_x], out[x]) << "x == " << x << "width = " << Width();
+TEST_P(ConvolveTest, FilterExtremes) {
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t *ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
}
+#else
+ uint8_t ref[kOutputStride * kMaxDimension];
+#endif
- /* Test the vertical filter. */
- REGISTER_STATE_CHECK(UUT_->v8_(in, kInputStride, out, kOutputStride,
- NULL, 0, kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, Width(), Height()));
-
+ // Populate ref and out with some random data
+ ::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjust = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_y * kInputStride], out[y * kInputStride]) << "y == " << y;
- }
-
- /* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- Width(), Height()));
-
- for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjustY = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustY * kInputPixelStep)
- >> SUBPEL_BITS);
for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjustX = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustX * kInputPixelStep)
- >> SUBPEL_BITS);
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
+ }
+ }
- ASSERT_EQ(in[ref_y * kInputStride + ref_x], out[y * kOutputStride + x])
- << "x == " << x << ", y == " << y;
+ for (int axis = 0; axis < 2; axis++) {
+ int seed_val = 0;
+ while (seed_val < 256) {
+ for (int y = 0; y < 8; ++y) {
+ for (int x = 0; x < 8; ++x) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * mask_);
+#else
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * 255);
+#endif
+ if (axis) seed_val++;
+ }
+ if (axis)
+ seed_val-= 8;
+ else
+ seed_val++;
+ }
+ if (axis) seed_val += 8;
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
+ Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
}
}
}
@@ -553,22 +931,24 @@
TEST_P(ConvolveTest, CheckScalingFiltering) {
uint8_t* const in = input();
uint8_t* const out = output();
+ const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
SetConstantInput(127);
for (int frac = 0; frac < 16; ++frac) {
for (int step = 1; step <= 32; ++step) {
/* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- vp9_sub_pel_filters_8[frac], step,
- vp9_sub_pel_filters_8[frac], step,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
+ eighttap[frac], step,
+ eighttap[frac], step,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- ASSERT_EQ(in[y * kInputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(in, y * kInputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "x == " << x << ", y == " << y
<< ", frac == " << frac << ", step == " << step;
}
@@ -579,10 +959,590 @@
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE2 && ARCH_X86_64
+void wrap_convolve8_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride, filter_x,
+ filter_x_stride, filter_y, filter_y_stride,
+ w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+#endif // HAVE_SSE2 && ARCH_X86_64
+
+void wrap_convolve_copy_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_copy_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_copy_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
const ConvolveFunctions convolve8_c(
- vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_c,
- vp9_convolve8_vert_c, vp9_convolve8_avg_vert_c,
- vp9_convolve8_c, vp9_convolve8_avg_c);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
+INSTANTIATE_TEST_CASE_P(C_8, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_c),
+ make_tuple(8, 4, &convolve8_c),
+ make_tuple(4, 8, &convolve8_c),
+ make_tuple(8, 8, &convolve8_c),
+ make_tuple(16, 8, &convolve8_c),
+ make_tuple(8, 16, &convolve8_c),
+ make_tuple(16, 16, &convolve8_c),
+ make_tuple(32, 16, &convolve8_c),
+ make_tuple(16, 32, &convolve8_c),
+ make_tuple(32, 32, &convolve8_c),
+ make_tuple(64, 32, &convolve8_c),
+ make_tuple(32, 64, &convolve8_c),
+ make_tuple(64, 64, &convolve8_c)));
+const ConvolveFunctions convolve10_c(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10);
+INSTANTIATE_TEST_CASE_P(C_10, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve10_c),
+ make_tuple(8, 4, &convolve10_c),
+ make_tuple(4, 8, &convolve10_c),
+ make_tuple(8, 8, &convolve10_c),
+ make_tuple(16, 8, &convolve10_c),
+ make_tuple(8, 16, &convolve10_c),
+ make_tuple(16, 16, &convolve10_c),
+ make_tuple(32, 16, &convolve10_c),
+ make_tuple(16, 32, &convolve10_c),
+ make_tuple(32, 32, &convolve10_c),
+ make_tuple(64, 32, &convolve10_c),
+ make_tuple(32, 64, &convolve10_c),
+ make_tuple(64, 64, &convolve10_c)));
+const ConvolveFunctions convolve12_c(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12);
+INSTANTIATE_TEST_CASE_P(C_12, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve12_c),
+ make_tuple(8, 4, &convolve12_c),
+ make_tuple(4, 8, &convolve12_c),
+ make_tuple(8, 8, &convolve12_c),
+ make_tuple(16, 8, &convolve12_c),
+ make_tuple(8, 16, &convolve12_c),
+ make_tuple(16, 16, &convolve12_c),
+ make_tuple(32, 16, &convolve12_c),
+ make_tuple(16, 32, &convolve12_c),
+ make_tuple(32, 32, &convolve12_c),
+ make_tuple(64, 32, &convolve12_c),
+ make_tuple(32, 64, &convolve12_c),
+ make_tuple(64, 64, &convolve12_c)));
+
+#else
+
+const ConvolveFunctions convolve8_c(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c,
+ vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+ vpx_convolve8_c, vpx_convolve8_avg_c,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_c),
@@ -598,12 +1558,87 @@
make_tuple(64, 32, &convolve8_c),
make_tuple(32, 64, &convolve8_c),
make_tuple(64, 64, &convolve8_c)));
+#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && ARCH_X86_64
+#if CONFIG_VP9_HIGHBITDEPTH
const ConvolveFunctions convolve8_sse2(
- vp9_convolve8_horiz_sse2, vp9_convolve8_avg_horiz_sse2,
- vp9_convolve8_vert_sse2, vp9_convolve8_avg_vert_sse2,
- vp9_convolve8_sse2, vp9_convolve8_avg_sse2);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8);
+const ConvolveFunctions convolve10_sse2(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10);
+const ConvolveFunctions convolve12_sse2(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
+INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_sse2),
+ make_tuple(8, 4, &convolve8_sse2),
+ make_tuple(4, 8, &convolve8_sse2),
+ make_tuple(8, 8, &convolve8_sse2),
+ make_tuple(16, 8, &convolve8_sse2),
+ make_tuple(8, 16, &convolve8_sse2),
+ make_tuple(16, 16, &convolve8_sse2),
+ make_tuple(32, 16, &convolve8_sse2),
+ make_tuple(16, 32, &convolve8_sse2),
+ make_tuple(32, 32, &convolve8_sse2),
+ make_tuple(64, 32, &convolve8_sse2),
+ make_tuple(32, 64, &convolve8_sse2),
+ make_tuple(64, 64, &convolve8_sse2),
+ make_tuple(4, 4, &convolve10_sse2),
+ make_tuple(8, 4, &convolve10_sse2),
+ make_tuple(4, 8, &convolve10_sse2),
+ make_tuple(8, 8, &convolve10_sse2),
+ make_tuple(16, 8, &convolve10_sse2),
+ make_tuple(8, 16, &convolve10_sse2),
+ make_tuple(16, 16, &convolve10_sse2),
+ make_tuple(32, 16, &convolve10_sse2),
+ make_tuple(16, 32, &convolve10_sse2),
+ make_tuple(32, 32, &convolve10_sse2),
+ make_tuple(64, 32, &convolve10_sse2),
+ make_tuple(32, 64, &convolve10_sse2),
+ make_tuple(64, 64, &convolve10_sse2),
+ make_tuple(4, 4, &convolve12_sse2),
+ make_tuple(8, 4, &convolve12_sse2),
+ make_tuple(4, 8, &convolve12_sse2),
+ make_tuple(8, 8, &convolve12_sse2),
+ make_tuple(16, 8, &convolve12_sse2),
+ make_tuple(8, 16, &convolve12_sse2),
+ make_tuple(16, 16, &convolve12_sse2),
+ make_tuple(32, 16, &convolve12_sse2),
+ make_tuple(16, 32, &convolve12_sse2),
+ make_tuple(32, 32, &convolve12_sse2),
+ make_tuple(64, 32, &convolve12_sse2),
+ make_tuple(32, 64, &convolve12_sse2),
+ make_tuple(64, 64, &convolve12_sse2)));
+#else
+const ConvolveFunctions convolve8_sse2(
+#if CONFIG_USE_X86INC
+ vpx_convolve_copy_sse2, vpx_convolve_avg_sse2,
+#else
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+#endif // CONFIG_USE_X86INC
+ vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2,
+ vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2,
+ vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_sse2),
@@ -619,13 +1654,18 @@
make_tuple(64, 32, &convolve8_sse2),
make_tuple(32, 64, &convolve8_sse2),
make_tuple(64, 64, &convolve8_sse2)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
#if HAVE_SSSE3
const ConvolveFunctions convolve8_ssse3(
- vp9_convolve8_horiz_ssse3, vp9_convolve8_avg_horiz_ssse3,
- vp9_convolve8_vert_ssse3, vp9_convolve8_avg_vert_ssse3,
- vp9_convolve8_ssse3, vp9_convolve8_avg_ssse3);
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_ssse3),
@@ -643,11 +1683,52 @@
make_tuple(64, 64, &convolve8_ssse3)));
#endif
+#if HAVE_AVX2 && HAVE_SSSE3
+const ConvolveFunctions convolve8_avx2(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_avx2),
+ make_tuple(8, 4, &convolve8_avx2),
+ make_tuple(4, 8, &convolve8_avx2),
+ make_tuple(8, 8, &convolve8_avx2),
+ make_tuple(8, 16, &convolve8_avx2),
+ make_tuple(16, 8, &convolve8_avx2),
+ make_tuple(16, 16, &convolve8_avx2),
+ make_tuple(32, 16, &convolve8_avx2),
+ make_tuple(16, 32, &convolve8_avx2),
+ make_tuple(32, 32, &convolve8_avx2),
+ make_tuple(64, 32, &convolve8_avx2),
+ make_tuple(32, 64, &convolve8_avx2),
+ make_tuple(64, 64, &convolve8_avx2)));
+#endif // HAVE_AVX2 && HAVE_SSSE3
+
#if HAVE_NEON
+#if HAVE_NEON_ASM
const ConvolveFunctions convolve8_neon(
- vp9_convolve8_horiz_neon, vp9_convolve8_avg_horiz_neon,
- vp9_convolve8_vert_neon, vp9_convolve8_avg_vert_neon,
- vp9_convolve8_neon, vp9_convolve8_avg_neon);
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#else // HAVE_NEON
+const ConvolveFunctions convolve8_neon(
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#endif // HAVE_NEON_ASM
INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_neon),
@@ -663,13 +1744,17 @@
make_tuple(64, 32, &convolve8_neon),
make_tuple(32, 64, &convolve8_neon),
make_tuple(64, 64, &convolve8_neon)));
-#endif
+#endif // HAVE_NEON
#if HAVE_DSPR2
const ConvolveFunctions convolve8_dspr2(
- vp9_convolve8_horiz_dspr2, vp9_convolve8_avg_horiz_dspr2,
- vp9_convolve8_vert_dspr2, vp9_convolve8_avg_vert_dspr2,
- vp9_convolve8_dspr2, vp9_convolve8_avg_dspr2);
+ vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2,
+ vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2,
+ vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2,
+ vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_dspr2),
@@ -686,4 +1771,30 @@
make_tuple(32, 64, &convolve8_dspr2),
make_tuple(64, 64, &convolve8_dspr2)));
#endif
+
+#if HAVE_MSA
+const ConvolveFunctions convolve8_msa(
+ vpx_convolve_copy_msa, vpx_convolve_avg_msa,
+ vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa,
+ vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa,
+ vpx_convolve8_msa, vpx_convolve8_avg_msa,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_msa),
+ make_tuple(8, 4, &convolve8_msa),
+ make_tuple(4, 8, &convolve8_msa),
+ make_tuple(8, 8, &convolve8_msa),
+ make_tuple(16, 8, &convolve8_msa),
+ make_tuple(8, 16, &convolve8_msa),
+ make_tuple(16, 16, &convolve8_msa),
+ make_tuple(32, 16, &convolve8_msa),
+ make_tuple(16, 32, &convolve8_msa),
+ make_tuple(32, 32, &convolve8_msa),
+ make_tuple(64, 32, &convolve8_msa),
+ make_tuple(32, 64, &convolve8_msa),
+ make_tuple(64, 64, &convolve8_msa)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
for (int i = 0; i < kInputBufferSize; ++i)
input_[i] = prng.Rand8Extremes();
|
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ != 0)
mask_ = (1 << UUT_->use_highbd_) - 1;
else
mask_ = 255;
#endif
for (int i = 0; i < kInputBufferSize; ++i) {
if (i & 1) {
input_[i] = 255;
#if CONFIG_VP9_HIGHBITDEPTH
input16_[i] = mask_;
#endif
} else {
input_[i] = prng.Rand8Extremes();
#if CONFIG_VP9_HIGHBITDEPTH
input16_[i] = prng.Rand16() & mask_;
#endif
}
}
|
150,827 |
static void TearDownTestCase() {
vpx_free(input_ - 1);
input_ = NULL;
vpx_free(output_);
output_ = NULL;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static void TearDownTestCase() {
vpx_free(input_ - 1);
input_ = NULL;
vpx_free(output_);
output_ = NULL;
vpx_free(output_ref_);
output_ref_ = NULL;
#if CONFIG_VP9_HIGHBITDEPTH
vpx_free(input16_ - 1);
input16_ = NULL;
vpx_free(output16_);
output16_ = NULL;
vpx_free(output16_ref_);
output16_ref_ = NULL;
#endif
}
|
@@ -9,40 +9,65 @@
*/
#include <string.h>
-#include "test/acm_random.h"
-#include "test/register_state_check.h"
-#include "test/util.h"
+
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_filter.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
namespace {
-typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h);
+
+static const unsigned int kMaxDimension = 64;
+
+typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h);
struct ConvolveFunctions {
- ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
- convolve_fn_t v8, convolve_fn_t v8_avg,
- convolve_fn_t hv8, convolve_fn_t hv8_avg)
- : h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
- hv8_avg_(hv8_avg) {}
+ ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg,
+ ConvolveFunc h8, ConvolveFunc h8_avg,
+ ConvolveFunc v8, ConvolveFunc v8_avg,
+ ConvolveFunc hv8, ConvolveFunc hv8_avg,
+ ConvolveFunc sh8, ConvolveFunc sh8_avg,
+ ConvolveFunc sv8, ConvolveFunc sv8_avg,
+ ConvolveFunc shv8, ConvolveFunc shv8_avg,
+ int bd)
+ : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
+ v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8),
+ sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg),
+ use_highbd_(bd) {}
- convolve_fn_t h8_;
- convolve_fn_t v8_;
- convolve_fn_t hv8_;
- convolve_fn_t h8_avg_;
- convolve_fn_t v8_avg_;
- convolve_fn_t hv8_avg_;
+ ConvolveFunc copy_;
+ ConvolveFunc avg_;
+ ConvolveFunc h8_;
+ ConvolveFunc v8_;
+ ConvolveFunc hv8_;
+ ConvolveFunc h8_avg_;
+ ConvolveFunc v8_avg_;
+ ConvolveFunc hv8_avg_;
+ ConvolveFunc sh8_; // scaled horiz
+ ConvolveFunc sv8_; // scaled vert
+ ConvolveFunc shv8_; // scaled horiz/vert
+ ConvolveFunc sh8_avg_; // scaled avg horiz
+ ConvolveFunc sv8_avg_; // scaled avg vert
+ ConvolveFunc shv8_avg_; // scaled avg horiz/vert
+ int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth.
};
-typedef std::tr1::tuple<int, int, const ConvolveFunctions*> convolve_param_t;
+typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
// Reference 8-tap subpixel filter, slightly modified to fit into this test.
#define VP9_FILTER_WEIGHT 128
@@ -68,71 +93,66 @@
const int kInterp_Extend = 4;
const unsigned int intermediate_height =
(kInterp_Extend - 1) + output_height + kInterp_Extend;
+ unsigned int i, j;
- /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
- * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
- * + kInterp_Extend
- * = 3 + 16 + 4
- * = 23
- * and filter_max_width = 16
- */
- uint8_t intermediate_buffer[71 * 64];
+ // Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ // where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ // + kInterp_Extend
+ // = 3 + 16 + 4
+ // = 23
+ // and filter_max_width = 16
+ //
+ uint8_t intermediate_buffer[71 * kMaxDimension];
const int intermediate_next_stride = 1 - intermediate_height * output_width;
// Horizontal pass (src -> transposed intermediate).
- {
- uint8_t *output_ptr = intermediate_buffer;
- const int src_next_row_stride = src_stride - output_width;
- unsigned int i, j;
- src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
- for (i = 0; i < intermediate_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * HFilter[0]) +
- (src_ptr[1] * HFilter[1]) +
- (src_ptr[2] * HFilter[2]) +
- (src_ptr[3] * HFilter[3]) +
- (src_ptr[4] * HFilter[4]) +
- (src_ptr[5] * HFilter[5]) +
- (src_ptr[6] * HFilter[6]) +
- (src_ptr[7] * HFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ uint8_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
- ++src_ptr;
- output_ptr += intermediate_height;
- }
- src_ptr += src_next_row_stride;
- output_ptr += intermediate_next_stride;
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ ++src_ptr;
+ output_ptr += intermediate_height;
}
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
}
// Vertical pass (transposed intermediate -> dst).
- {
- uint8_t *src_ptr = intermediate_buffer;
- const int dst_next_row_stride = dst_stride - output_width;
- unsigned int i, j;
- for (i = 0; i < output_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * VFilter[0]) +
- (src_ptr[1] * VFilter[1]) +
- (src_ptr[2] * VFilter[2]) +
- (src_ptr[3] * VFilter[3]) +
- (src_ptr[4] * VFilter[4]) +
- (src_ptr[5] * VFilter[5]) +
- (src_ptr[6] * VFilter[6]) +
- (src_ptr[7] * VFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
- src_ptr += intermediate_height;
- }
- src_ptr += intermediate_next_stride;
- dst_ptr += dst_next_row_stride;
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ src_ptr += intermediate_height;
}
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
}
}
@@ -159,17 +179,138 @@
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
- uint8_t tmp[64 * 64];
+ uint8_t tmp[kMaxDimension * kMaxDimension];
- assert(output_width <= 64);
- assert(output_height <= 64);
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
output_width, output_height);
block2d_average_c(tmp, 64, dst_ptr, dst_stride,
output_width, output_height);
}
-class ConvolveTest : public ::testing::TestWithParam<convolve_param_t> {
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ // Between passes, we use an intermediate buffer whose height is extended to
+ // have enough horizontally filtered values as input for the vertical pass.
+ // This buffer is allocated to be big enough for the largest block type we
+ // support.
+ const int kInterp_Extend = 4;
+ const unsigned int intermediate_height =
+ (kInterp_Extend - 1) + output_height + kInterp_Extend;
+
+ /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ * + kInterp_Extend
+ * = 3 + 16 + 4
+ * = 23
+ * and filter_max_width = 16
+ */
+ uint16_t intermediate_buffer[71 * kMaxDimension];
+ const int intermediate_next_stride = 1 - intermediate_height * output_width;
+
+ // Horizontal pass (src -> transposed intermediate).
+ {
+ uint16_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ unsigned int i, j;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ ++src_ptr;
+ output_ptr += intermediate_height;
+ }
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
+ }
+ }
+
+ // Vertical pass (transposed intermediate -> dst).
+ {
+ uint16_t *src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ src_ptr += intermediate_height;
+ }
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
+ }
+ }
+}
+
+void highbd_block2d_average_c(uint16_t *src,
+ unsigned int src_stride,
+ uint16_t *output_ptr,
+ unsigned int output_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
+ }
+ output_ptr += output_stride;
+ }
+}
+
+void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ uint16_t tmp[kMaxDimension * kMaxDimension];
+
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
+ highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
+ output_width, output_height, bd);
+ highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride,
+ output_width, output_height, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
public:
static void SetUpTestCase() {
// Force input_ to be unaligned, output to be 16 byte aligned.
@@ -177,13 +318,36 @@
vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
output_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kOutputBufferSize));
+ output_ref_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kOutputBufferSize));
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment,
+ (kInputBufferSize + 1) * sizeof(uint16_t))) + 1;
+ output16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+ output16_ref_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+#endif
}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
static void TearDownTestCase() {
vpx_free(input_ - 1);
input_ = NULL;
vpx_free(output_);
output_ = NULL;
+ vpx_free(output_ref_);
+ output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_free(input16_ - 1);
+ input16_ = NULL;
+ vpx_free(output16_);
+ output16_ = NULL;
+ vpx_free(output16_ref_);
+ output16_ref_ = NULL;
+#endif
}
protected:
@@ -191,7 +355,6 @@
static const int kOuterBlockSize = 256;
static const int kInputStride = kOuterBlockSize;
static const int kOutputStride = kOuterBlockSize;
- static const int kMaxDimension = 64;
static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
@@ -212,6 +375,12 @@
virtual void SetUp() {
UUT_ = GET_PARAM(2);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ != 0)
+ mask_ = (1 << UUT_->use_highbd_) - 1;
+ else
+ mask_ = 255;
+#endif
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
@@ -221,12 +390,33 @@
}
::libvpx_test::ACMRandom prng;
- for (int i = 0; i < kInputBufferSize; ++i)
- input_[i] = prng.Rand8Extremes();
+ for (int i = 0; i < kInputBufferSize; ++i) {
+ if (i & 1) {
+ input_[i] = 255;
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = mask_;
+#endif
+ } else {
+ input_[i] = prng.Rand8Extremes();
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = prng.Rand16() & mask_;
+#endif
+ }
+ }
}
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_memset16(input16_, value, kInputBufferSize);
+#endif
+ }
+
+ void CopyOutputToRef() {
+ memcpy(output_ref_, output_, kOutputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ memcpy(output16_ref_, output16_, kOutputBufferSize);
+#endif
}
void CheckGuardBlocks() {
@@ -236,39 +426,197 @@
}
}
- uint8_t* input() const {
+ uint8_t *input() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
}
- uint8_t* output() const {
+ uint8_t *output() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint8_t *output_ref() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint16_t lookup(uint8_t *list, int index) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return list[index];
+ } else {
+ return CONVERT_TO_SHORTPTR(list)[index];
+ }
+#else
+ return list[index];
+#endif
+ }
+
+ void assign_val(uint8_t *list, int index, uint16_t val) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ list[index] = (uint8_t) val;
+ } else {
+ CONVERT_TO_SHORTPTR(list)[index] = val;
+ }
+#else
+ list[index] = (uint8_t) val;
+#endif
+ }
+
+ void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+ } else {
+ highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
+ src_stride, HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr),
+ dst_stride, output_width, output_height,
+ UUT_->use_highbd_);
+ }
+#else
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+#endif
+ }
+
+ void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+ } else {
+ highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
+ HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
+ output_width, output_height, UUT_->use_highbd_);
+ }
+#else
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+#endif
}
const ConvolveFunctions* UUT_;
static uint8_t* input_;
static uint8_t* output_;
+ static uint8_t* output_ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ static uint16_t* input16_;
+ static uint16_t* output16_;
+ static uint16_t* output16_ref_;
+ int mask_;
+#endif
};
+
uint8_t* ConvolveTest::input_ = NULL;
uint8_t* ConvolveTest::output_ = NULL;
+uint8_t* ConvolveTest::output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+uint16_t* ConvolveTest::input16_ = NULL;
+uint16_t* ConvolveTest::output16_ = NULL;
+uint16_t* ConvolveTest::output16_ref_ = NULL;
+#endif
TEST_P(ConvolveTest, GuardBlocks) {
CheckGuardBlocks();
}
+TEST_P(ConvolveTest, Copy) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, Avg) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t* const out_ref = output_ref();
+ CopyOutputToRef();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
+ lookup(out_ref, y * kOutputStride + x), 1))
+ << "(" << x << "," << y << ")";
+}
+
TEST_P(ConvolveTest, CopyHoriz) {
uint8_t* const in = input();
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->h8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -277,15 +625,16 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->v8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -294,31 +643,26 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->hv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8,
+ 16, Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
-const int16_t (*kTestFilterList[])[8] = {
- vp9_bilinear_filters,
- vp9_sub_pel_filters_8,
- vp9_sub_pel_filters_8s,
- vp9_sub_pel_filters_8lp
-};
-const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
+const int kNumFilterBanks = 4;
const int kNumFilters = 16;
TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int i = 0; i < kNumFilters; i++) {
const int p0 = filters[i][0] + filters[i][1];
const int p1 = filters[i][2] + filters[i][3];
@@ -341,40 +685,57 @@
TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
-
+#endif
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_(in, kInputStride, out, kOutputStride,
kInvalidFilter, 16, filters[filter_y], 16,
Width(), Height()));
- else
- REGISTER_STATE_CHECK(
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
UUT_->h8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -386,54 +747,77 @@
TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
+#endif
// Populate ref and out with some random data
::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- const uint8_t r = prng.Rand8Extremes();
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
- out[y * kOutputStride + x] = r;
- ref[y * kOutputStride + x] = r;
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
}
}
- const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
-
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
- const int kNumFilters = 16;
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_average_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_average_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
else
- REGISTER_STATE_CHECK(
- UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -442,108 +826,102 @@
}
}
-DECLARE_ALIGNED(256, const int16_t, kChangeFilters[16][8]) = {
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128},
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128}
-};
-
-/* This test exercises the horizontal and vertical filter functions. */
-TEST_P(ConvolveTest, ChangeFilterWorks) {
- uint8_t* const in = input();
- uint8_t* const out = output();
-
- /* Assume that the first input sample is at the 8/16th position. */
- const int kInitialSubPelOffset = 8;
-
- /* Filters are 8-tap, so the first filter tap will be applied to the pixel
- * at position -3 with respect to the current filtering position. Since
- * kInitialSubPelOffset is set to 8, we first select sub-pixel filter 8,
- * which is non-zero only in the last tap. So, applying the filter at the
- * current input position will result in an output equal to the pixel at
- * offset +4 (-3 + 7) with respect to the current filtering position.
- */
- const int kPixelSelected = 4;
-
- /* Assume that each output pixel requires us to step on by 17/16th pixels in
- * the input.
- */
- const int kInputPixelStep = 17;
-
- /* The filters are setup in such a way that the expected output produces
- * sets of 8 identical output samples. As the filter position moves to the
- * next 1/16th pixel position the only active (=128) filter tap moves one
- * position to the left, resulting in the same input pixel being replicated
- * in to the output for 8 consecutive samples. After each set of 8 positions
- * the filters select a different input pixel. kFilterPeriodAdjust below
- * computes which input pixel is written to the output for a specified
- * x or y position.
- */
-
- /* Test the horizontal filter. */
- REGISTER_STATE_CHECK(UUT_->h8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, NULL, 0, Width(), Height()));
-
- for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjust = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_x], out[x]) << "x == " << x << "width = " << Width();
+TEST_P(ConvolveTest, FilterExtremes) {
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t *ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
}
+#else
+ uint8_t ref[kOutputStride * kMaxDimension];
+#endif
- /* Test the vertical filter. */
- REGISTER_STATE_CHECK(UUT_->v8_(in, kInputStride, out, kOutputStride,
- NULL, 0, kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, Width(), Height()));
-
+ // Populate ref and out with some random data
+ ::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjust = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_y * kInputStride], out[y * kInputStride]) << "y == " << y;
- }
-
- /* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- Width(), Height()));
-
- for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjustY = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustY * kInputPixelStep)
- >> SUBPEL_BITS);
for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjustX = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustX * kInputPixelStep)
- >> SUBPEL_BITS);
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
+ }
+ }
- ASSERT_EQ(in[ref_y * kInputStride + ref_x], out[y * kOutputStride + x])
- << "x == " << x << ", y == " << y;
+ for (int axis = 0; axis < 2; axis++) {
+ int seed_val = 0;
+ while (seed_val < 256) {
+ for (int y = 0; y < 8; ++y) {
+ for (int x = 0; x < 8; ++x) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * mask_);
+#else
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * 255);
+#endif
+ if (axis) seed_val++;
+ }
+ if (axis)
+ seed_val-= 8;
+ else
+ seed_val++;
+ }
+ if (axis) seed_val += 8;
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
+ Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
}
}
}
@@ -553,22 +931,24 @@
TEST_P(ConvolveTest, CheckScalingFiltering) {
uint8_t* const in = input();
uint8_t* const out = output();
+ const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
SetConstantInput(127);
for (int frac = 0; frac < 16; ++frac) {
for (int step = 1; step <= 32; ++step) {
/* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- vp9_sub_pel_filters_8[frac], step,
- vp9_sub_pel_filters_8[frac], step,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
+ eighttap[frac], step,
+ eighttap[frac], step,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- ASSERT_EQ(in[y * kInputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(in, y * kInputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "x == " << x << ", y == " << y
<< ", frac == " << frac << ", step == " << step;
}
@@ -579,10 +959,590 @@
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE2 && ARCH_X86_64
+void wrap_convolve8_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride, filter_x,
+ filter_x_stride, filter_y, filter_y_stride,
+ w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+#endif // HAVE_SSE2 && ARCH_X86_64
+
+void wrap_convolve_copy_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_copy_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_copy_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
const ConvolveFunctions convolve8_c(
- vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_c,
- vp9_convolve8_vert_c, vp9_convolve8_avg_vert_c,
- vp9_convolve8_c, vp9_convolve8_avg_c);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
+INSTANTIATE_TEST_CASE_P(C_8, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_c),
+ make_tuple(8, 4, &convolve8_c),
+ make_tuple(4, 8, &convolve8_c),
+ make_tuple(8, 8, &convolve8_c),
+ make_tuple(16, 8, &convolve8_c),
+ make_tuple(8, 16, &convolve8_c),
+ make_tuple(16, 16, &convolve8_c),
+ make_tuple(32, 16, &convolve8_c),
+ make_tuple(16, 32, &convolve8_c),
+ make_tuple(32, 32, &convolve8_c),
+ make_tuple(64, 32, &convolve8_c),
+ make_tuple(32, 64, &convolve8_c),
+ make_tuple(64, 64, &convolve8_c)));
+const ConvolveFunctions convolve10_c(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10);
+INSTANTIATE_TEST_CASE_P(C_10, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve10_c),
+ make_tuple(8, 4, &convolve10_c),
+ make_tuple(4, 8, &convolve10_c),
+ make_tuple(8, 8, &convolve10_c),
+ make_tuple(16, 8, &convolve10_c),
+ make_tuple(8, 16, &convolve10_c),
+ make_tuple(16, 16, &convolve10_c),
+ make_tuple(32, 16, &convolve10_c),
+ make_tuple(16, 32, &convolve10_c),
+ make_tuple(32, 32, &convolve10_c),
+ make_tuple(64, 32, &convolve10_c),
+ make_tuple(32, 64, &convolve10_c),
+ make_tuple(64, 64, &convolve10_c)));
+const ConvolveFunctions convolve12_c(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12);
+INSTANTIATE_TEST_CASE_P(C_12, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve12_c),
+ make_tuple(8, 4, &convolve12_c),
+ make_tuple(4, 8, &convolve12_c),
+ make_tuple(8, 8, &convolve12_c),
+ make_tuple(16, 8, &convolve12_c),
+ make_tuple(8, 16, &convolve12_c),
+ make_tuple(16, 16, &convolve12_c),
+ make_tuple(32, 16, &convolve12_c),
+ make_tuple(16, 32, &convolve12_c),
+ make_tuple(32, 32, &convolve12_c),
+ make_tuple(64, 32, &convolve12_c),
+ make_tuple(32, 64, &convolve12_c),
+ make_tuple(64, 64, &convolve12_c)));
+
+#else
+
+const ConvolveFunctions convolve8_c(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c,
+ vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+ vpx_convolve8_c, vpx_convolve8_avg_c,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_c),
@@ -598,12 +1558,87 @@
make_tuple(64, 32, &convolve8_c),
make_tuple(32, 64, &convolve8_c),
make_tuple(64, 64, &convolve8_c)));
+#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && ARCH_X86_64
+#if CONFIG_VP9_HIGHBITDEPTH
const ConvolveFunctions convolve8_sse2(
- vp9_convolve8_horiz_sse2, vp9_convolve8_avg_horiz_sse2,
- vp9_convolve8_vert_sse2, vp9_convolve8_avg_vert_sse2,
- vp9_convolve8_sse2, vp9_convolve8_avg_sse2);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8);
+const ConvolveFunctions convolve10_sse2(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10);
+const ConvolveFunctions convolve12_sse2(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
+INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_sse2),
+ make_tuple(8, 4, &convolve8_sse2),
+ make_tuple(4, 8, &convolve8_sse2),
+ make_tuple(8, 8, &convolve8_sse2),
+ make_tuple(16, 8, &convolve8_sse2),
+ make_tuple(8, 16, &convolve8_sse2),
+ make_tuple(16, 16, &convolve8_sse2),
+ make_tuple(32, 16, &convolve8_sse2),
+ make_tuple(16, 32, &convolve8_sse2),
+ make_tuple(32, 32, &convolve8_sse2),
+ make_tuple(64, 32, &convolve8_sse2),
+ make_tuple(32, 64, &convolve8_sse2),
+ make_tuple(64, 64, &convolve8_sse2),
+ make_tuple(4, 4, &convolve10_sse2),
+ make_tuple(8, 4, &convolve10_sse2),
+ make_tuple(4, 8, &convolve10_sse2),
+ make_tuple(8, 8, &convolve10_sse2),
+ make_tuple(16, 8, &convolve10_sse2),
+ make_tuple(8, 16, &convolve10_sse2),
+ make_tuple(16, 16, &convolve10_sse2),
+ make_tuple(32, 16, &convolve10_sse2),
+ make_tuple(16, 32, &convolve10_sse2),
+ make_tuple(32, 32, &convolve10_sse2),
+ make_tuple(64, 32, &convolve10_sse2),
+ make_tuple(32, 64, &convolve10_sse2),
+ make_tuple(64, 64, &convolve10_sse2),
+ make_tuple(4, 4, &convolve12_sse2),
+ make_tuple(8, 4, &convolve12_sse2),
+ make_tuple(4, 8, &convolve12_sse2),
+ make_tuple(8, 8, &convolve12_sse2),
+ make_tuple(16, 8, &convolve12_sse2),
+ make_tuple(8, 16, &convolve12_sse2),
+ make_tuple(16, 16, &convolve12_sse2),
+ make_tuple(32, 16, &convolve12_sse2),
+ make_tuple(16, 32, &convolve12_sse2),
+ make_tuple(32, 32, &convolve12_sse2),
+ make_tuple(64, 32, &convolve12_sse2),
+ make_tuple(32, 64, &convolve12_sse2),
+ make_tuple(64, 64, &convolve12_sse2)));
+#else
+const ConvolveFunctions convolve8_sse2(
+#if CONFIG_USE_X86INC
+ vpx_convolve_copy_sse2, vpx_convolve_avg_sse2,
+#else
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+#endif // CONFIG_USE_X86INC
+ vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2,
+ vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2,
+ vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_sse2),
@@ -619,13 +1654,18 @@
make_tuple(64, 32, &convolve8_sse2),
make_tuple(32, 64, &convolve8_sse2),
make_tuple(64, 64, &convolve8_sse2)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
#if HAVE_SSSE3
const ConvolveFunctions convolve8_ssse3(
- vp9_convolve8_horiz_ssse3, vp9_convolve8_avg_horiz_ssse3,
- vp9_convolve8_vert_ssse3, vp9_convolve8_avg_vert_ssse3,
- vp9_convolve8_ssse3, vp9_convolve8_avg_ssse3);
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_ssse3),
@@ -643,11 +1683,52 @@
make_tuple(64, 64, &convolve8_ssse3)));
#endif
+#if HAVE_AVX2 && HAVE_SSSE3
+const ConvolveFunctions convolve8_avx2(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_avx2),
+ make_tuple(8, 4, &convolve8_avx2),
+ make_tuple(4, 8, &convolve8_avx2),
+ make_tuple(8, 8, &convolve8_avx2),
+ make_tuple(8, 16, &convolve8_avx2),
+ make_tuple(16, 8, &convolve8_avx2),
+ make_tuple(16, 16, &convolve8_avx2),
+ make_tuple(32, 16, &convolve8_avx2),
+ make_tuple(16, 32, &convolve8_avx2),
+ make_tuple(32, 32, &convolve8_avx2),
+ make_tuple(64, 32, &convolve8_avx2),
+ make_tuple(32, 64, &convolve8_avx2),
+ make_tuple(64, 64, &convolve8_avx2)));
+#endif // HAVE_AVX2 && HAVE_SSSE3
+
#if HAVE_NEON
+#if HAVE_NEON_ASM
const ConvolveFunctions convolve8_neon(
- vp9_convolve8_horiz_neon, vp9_convolve8_avg_horiz_neon,
- vp9_convolve8_vert_neon, vp9_convolve8_avg_vert_neon,
- vp9_convolve8_neon, vp9_convolve8_avg_neon);
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#else // HAVE_NEON
+const ConvolveFunctions convolve8_neon(
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#endif // HAVE_NEON_ASM
INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_neon),
@@ -663,13 +1744,17 @@
make_tuple(64, 32, &convolve8_neon),
make_tuple(32, 64, &convolve8_neon),
make_tuple(64, 64, &convolve8_neon)));
-#endif
+#endif // HAVE_NEON
#if HAVE_DSPR2
const ConvolveFunctions convolve8_dspr2(
- vp9_convolve8_horiz_dspr2, vp9_convolve8_avg_horiz_dspr2,
- vp9_convolve8_vert_dspr2, vp9_convolve8_avg_vert_dspr2,
- vp9_convolve8_dspr2, vp9_convolve8_avg_dspr2);
+ vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2,
+ vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2,
+ vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2,
+ vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_dspr2),
@@ -686,4 +1771,30 @@
make_tuple(32, 64, &convolve8_dspr2),
make_tuple(64, 64, &convolve8_dspr2)));
#endif
+
+#if HAVE_MSA
+const ConvolveFunctions convolve8_msa(
+ vpx_convolve_copy_msa, vpx_convolve_avg_msa,
+ vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa,
+ vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa,
+ vpx_convolve8_msa, vpx_convolve8_avg_msa,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_msa),
+ make_tuple(8, 4, &convolve8_msa),
+ make_tuple(4, 8, &convolve8_msa),
+ make_tuple(8, 8, &convolve8_msa),
+ make_tuple(16, 8, &convolve8_msa),
+ make_tuple(8, 16, &convolve8_msa),
+ make_tuple(16, 16, &convolve8_msa),
+ make_tuple(32, 16, &convolve8_msa),
+ make_tuple(16, 32, &convolve8_msa),
+ make_tuple(32, 32, &convolve8_msa),
+ make_tuple(64, 32, &convolve8_msa),
+ make_tuple(32, 64, &convolve8_msa),
+ make_tuple(64, 64, &convolve8_msa)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
| null |
vpx_free(output_ref_);
output_ref_ = NULL;
#if CONFIG_VP9_HIGHBITDEPTH
vpx_free(input16_ - 1);
input16_ = NULL;
vpx_free(output16_);
output16_ = NULL;
vpx_free(output16_ref_);
output16_ref_ = NULL;
#endif
|
150,828 |
void filter_average_block2d_8_c(const uint8_t *src_ptr,
const unsigned int src_stride,
const int16_t *HFilter,
const int16_t *VFilter,
uint8_t *dst_ptr,
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
uint8_t tmp[64 * 64];
assert(output_width <= 64);
assert(output_height <= 64);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
output_width, output_height);
block2d_average_c(tmp, 64, dst_ptr, dst_stride,
output_width, output_height);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void filter_average_block2d_8_c(const uint8_t *src_ptr,
const unsigned int src_stride,
const int16_t *HFilter,
const int16_t *VFilter,
uint8_t *dst_ptr,
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
uint8_t tmp[kMaxDimension * kMaxDimension];
assert(output_width <= kMaxDimension);
assert(output_height <= kMaxDimension);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
output_width, output_height);
block2d_average_c(tmp, 64, dst_ptr, dst_stride,
output_width, output_height);
}
|
@@ -9,40 +9,65 @@
*/
#include <string.h>
-#include "test/acm_random.h"
-#include "test/register_state_check.h"
-#include "test/util.h"
+
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_filter.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
namespace {
-typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h);
+
+static const unsigned int kMaxDimension = 64;
+
+typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h);
struct ConvolveFunctions {
- ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
- convolve_fn_t v8, convolve_fn_t v8_avg,
- convolve_fn_t hv8, convolve_fn_t hv8_avg)
- : h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
- hv8_avg_(hv8_avg) {}
+ ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg,
+ ConvolveFunc h8, ConvolveFunc h8_avg,
+ ConvolveFunc v8, ConvolveFunc v8_avg,
+ ConvolveFunc hv8, ConvolveFunc hv8_avg,
+ ConvolveFunc sh8, ConvolveFunc sh8_avg,
+ ConvolveFunc sv8, ConvolveFunc sv8_avg,
+ ConvolveFunc shv8, ConvolveFunc shv8_avg,
+ int bd)
+ : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
+ v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8),
+ sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg),
+ use_highbd_(bd) {}
- convolve_fn_t h8_;
- convolve_fn_t v8_;
- convolve_fn_t hv8_;
- convolve_fn_t h8_avg_;
- convolve_fn_t v8_avg_;
- convolve_fn_t hv8_avg_;
+ ConvolveFunc copy_;
+ ConvolveFunc avg_;
+ ConvolveFunc h8_;
+ ConvolveFunc v8_;
+ ConvolveFunc hv8_;
+ ConvolveFunc h8_avg_;
+ ConvolveFunc v8_avg_;
+ ConvolveFunc hv8_avg_;
+ ConvolveFunc sh8_; // scaled horiz
+ ConvolveFunc sv8_; // scaled vert
+ ConvolveFunc shv8_; // scaled horiz/vert
+ ConvolveFunc sh8_avg_; // scaled avg horiz
+ ConvolveFunc sv8_avg_; // scaled avg vert
+ ConvolveFunc shv8_avg_; // scaled avg horiz/vert
+ int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth.
};
-typedef std::tr1::tuple<int, int, const ConvolveFunctions*> convolve_param_t;
+typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
// Reference 8-tap subpixel filter, slightly modified to fit into this test.
#define VP9_FILTER_WEIGHT 128
@@ -68,71 +93,66 @@
const int kInterp_Extend = 4;
const unsigned int intermediate_height =
(kInterp_Extend - 1) + output_height + kInterp_Extend;
+ unsigned int i, j;
- /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
- * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
- * + kInterp_Extend
- * = 3 + 16 + 4
- * = 23
- * and filter_max_width = 16
- */
- uint8_t intermediate_buffer[71 * 64];
+ // Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ // where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ // + kInterp_Extend
+ // = 3 + 16 + 4
+ // = 23
+ // and filter_max_width = 16
+ //
+ uint8_t intermediate_buffer[71 * kMaxDimension];
const int intermediate_next_stride = 1 - intermediate_height * output_width;
// Horizontal pass (src -> transposed intermediate).
- {
- uint8_t *output_ptr = intermediate_buffer;
- const int src_next_row_stride = src_stride - output_width;
- unsigned int i, j;
- src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
- for (i = 0; i < intermediate_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * HFilter[0]) +
- (src_ptr[1] * HFilter[1]) +
- (src_ptr[2] * HFilter[2]) +
- (src_ptr[3] * HFilter[3]) +
- (src_ptr[4] * HFilter[4]) +
- (src_ptr[5] * HFilter[5]) +
- (src_ptr[6] * HFilter[6]) +
- (src_ptr[7] * HFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ uint8_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
- ++src_ptr;
- output_ptr += intermediate_height;
- }
- src_ptr += src_next_row_stride;
- output_ptr += intermediate_next_stride;
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ ++src_ptr;
+ output_ptr += intermediate_height;
}
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
}
// Vertical pass (transposed intermediate -> dst).
- {
- uint8_t *src_ptr = intermediate_buffer;
- const int dst_next_row_stride = dst_stride - output_width;
- unsigned int i, j;
- for (i = 0; i < output_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * VFilter[0]) +
- (src_ptr[1] * VFilter[1]) +
- (src_ptr[2] * VFilter[2]) +
- (src_ptr[3] * VFilter[3]) +
- (src_ptr[4] * VFilter[4]) +
- (src_ptr[5] * VFilter[5]) +
- (src_ptr[6] * VFilter[6]) +
- (src_ptr[7] * VFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
- src_ptr += intermediate_height;
- }
- src_ptr += intermediate_next_stride;
- dst_ptr += dst_next_row_stride;
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ src_ptr += intermediate_height;
}
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
}
}
@@ -159,17 +179,138 @@
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
- uint8_t tmp[64 * 64];
+ uint8_t tmp[kMaxDimension * kMaxDimension];
- assert(output_width <= 64);
- assert(output_height <= 64);
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
output_width, output_height);
block2d_average_c(tmp, 64, dst_ptr, dst_stride,
output_width, output_height);
}
-class ConvolveTest : public ::testing::TestWithParam<convolve_param_t> {
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ // Between passes, we use an intermediate buffer whose height is extended to
+ // have enough horizontally filtered values as input for the vertical pass.
+ // This buffer is allocated to be big enough for the largest block type we
+ // support.
+ const int kInterp_Extend = 4;
+ const unsigned int intermediate_height =
+ (kInterp_Extend - 1) + output_height + kInterp_Extend;
+
+ /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ * + kInterp_Extend
+ * = 3 + 16 + 4
+ * = 23
+ * and filter_max_width = 16
+ */
+ uint16_t intermediate_buffer[71 * kMaxDimension];
+ const int intermediate_next_stride = 1 - intermediate_height * output_width;
+
+ // Horizontal pass (src -> transposed intermediate).
+ {
+ uint16_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ unsigned int i, j;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ ++src_ptr;
+ output_ptr += intermediate_height;
+ }
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
+ }
+ }
+
+ // Vertical pass (transposed intermediate -> dst).
+ {
+ uint16_t *src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ src_ptr += intermediate_height;
+ }
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
+ }
+ }
+}
+
+void highbd_block2d_average_c(uint16_t *src,
+ unsigned int src_stride,
+ uint16_t *output_ptr,
+ unsigned int output_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
+ }
+ output_ptr += output_stride;
+ }
+}
+
+void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ uint16_t tmp[kMaxDimension * kMaxDimension];
+
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
+ highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
+ output_width, output_height, bd);
+ highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride,
+ output_width, output_height, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
public:
static void SetUpTestCase() {
// Force input_ to be unaligned, output to be 16 byte aligned.
@@ -177,13 +318,36 @@
vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
output_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kOutputBufferSize));
+ output_ref_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kOutputBufferSize));
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment,
+ (kInputBufferSize + 1) * sizeof(uint16_t))) + 1;
+ output16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+ output16_ref_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+#endif
}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
static void TearDownTestCase() {
vpx_free(input_ - 1);
input_ = NULL;
vpx_free(output_);
output_ = NULL;
+ vpx_free(output_ref_);
+ output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_free(input16_ - 1);
+ input16_ = NULL;
+ vpx_free(output16_);
+ output16_ = NULL;
+ vpx_free(output16_ref_);
+ output16_ref_ = NULL;
+#endif
}
protected:
@@ -191,7 +355,6 @@
static const int kOuterBlockSize = 256;
static const int kInputStride = kOuterBlockSize;
static const int kOutputStride = kOuterBlockSize;
- static const int kMaxDimension = 64;
static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
@@ -212,6 +375,12 @@
virtual void SetUp() {
UUT_ = GET_PARAM(2);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ != 0)
+ mask_ = (1 << UUT_->use_highbd_) - 1;
+ else
+ mask_ = 255;
+#endif
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
@@ -221,12 +390,33 @@
}
::libvpx_test::ACMRandom prng;
- for (int i = 0; i < kInputBufferSize; ++i)
- input_[i] = prng.Rand8Extremes();
+ for (int i = 0; i < kInputBufferSize; ++i) {
+ if (i & 1) {
+ input_[i] = 255;
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = mask_;
+#endif
+ } else {
+ input_[i] = prng.Rand8Extremes();
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = prng.Rand16() & mask_;
+#endif
+ }
+ }
}
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_memset16(input16_, value, kInputBufferSize);
+#endif
+ }
+
+ void CopyOutputToRef() {
+ memcpy(output_ref_, output_, kOutputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ memcpy(output16_ref_, output16_, kOutputBufferSize);
+#endif
}
void CheckGuardBlocks() {
@@ -236,39 +426,197 @@
}
}
- uint8_t* input() const {
+ uint8_t *input() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
}
- uint8_t* output() const {
+ uint8_t *output() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint8_t *output_ref() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint16_t lookup(uint8_t *list, int index) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return list[index];
+ } else {
+ return CONVERT_TO_SHORTPTR(list)[index];
+ }
+#else
+ return list[index];
+#endif
+ }
+
+ void assign_val(uint8_t *list, int index, uint16_t val) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ list[index] = (uint8_t) val;
+ } else {
+ CONVERT_TO_SHORTPTR(list)[index] = val;
+ }
+#else
+ list[index] = (uint8_t) val;
+#endif
+ }
+
+ void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+ } else {
+ highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
+ src_stride, HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr),
+ dst_stride, output_width, output_height,
+ UUT_->use_highbd_);
+ }
+#else
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+#endif
+ }
+
+ void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+ } else {
+ highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
+ HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
+ output_width, output_height, UUT_->use_highbd_);
+ }
+#else
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+#endif
}
const ConvolveFunctions* UUT_;
static uint8_t* input_;
static uint8_t* output_;
+ static uint8_t* output_ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ static uint16_t* input16_;
+ static uint16_t* output16_;
+ static uint16_t* output16_ref_;
+ int mask_;
+#endif
};
+
uint8_t* ConvolveTest::input_ = NULL;
uint8_t* ConvolveTest::output_ = NULL;
+uint8_t* ConvolveTest::output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+uint16_t* ConvolveTest::input16_ = NULL;
+uint16_t* ConvolveTest::output16_ = NULL;
+uint16_t* ConvolveTest::output16_ref_ = NULL;
+#endif
TEST_P(ConvolveTest, GuardBlocks) {
CheckGuardBlocks();
}
+TEST_P(ConvolveTest, Copy) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, Avg) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t* const out_ref = output_ref();
+ CopyOutputToRef();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
+ lookup(out_ref, y * kOutputStride + x), 1))
+ << "(" << x << "," << y << ")";
+}
+
TEST_P(ConvolveTest, CopyHoriz) {
uint8_t* const in = input();
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->h8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -277,15 +625,16 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->v8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -294,31 +643,26 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->hv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8,
+ 16, Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
-const int16_t (*kTestFilterList[])[8] = {
- vp9_bilinear_filters,
- vp9_sub_pel_filters_8,
- vp9_sub_pel_filters_8s,
- vp9_sub_pel_filters_8lp
-};
-const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
+const int kNumFilterBanks = 4;
const int kNumFilters = 16;
TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int i = 0; i < kNumFilters; i++) {
const int p0 = filters[i][0] + filters[i][1];
const int p1 = filters[i][2] + filters[i][3];
@@ -341,40 +685,57 @@
TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
-
+#endif
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_(in, kInputStride, out, kOutputStride,
kInvalidFilter, 16, filters[filter_y], 16,
Width(), Height()));
- else
- REGISTER_STATE_CHECK(
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
UUT_->h8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -386,54 +747,77 @@
TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
+#endif
// Populate ref and out with some random data
::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- const uint8_t r = prng.Rand8Extremes();
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
- out[y * kOutputStride + x] = r;
- ref[y * kOutputStride + x] = r;
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
}
}
- const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
-
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
- const int kNumFilters = 16;
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_average_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_average_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
else
- REGISTER_STATE_CHECK(
- UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -442,108 +826,102 @@
}
}
-DECLARE_ALIGNED(256, const int16_t, kChangeFilters[16][8]) = {
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128},
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128}
-};
-
-/* This test exercises the horizontal and vertical filter functions. */
-TEST_P(ConvolveTest, ChangeFilterWorks) {
- uint8_t* const in = input();
- uint8_t* const out = output();
-
- /* Assume that the first input sample is at the 8/16th position. */
- const int kInitialSubPelOffset = 8;
-
- /* Filters are 8-tap, so the first filter tap will be applied to the pixel
- * at position -3 with respect to the current filtering position. Since
- * kInitialSubPelOffset is set to 8, we first select sub-pixel filter 8,
- * which is non-zero only in the last tap. So, applying the filter at the
- * current input position will result in an output equal to the pixel at
- * offset +4 (-3 + 7) with respect to the current filtering position.
- */
- const int kPixelSelected = 4;
-
- /* Assume that each output pixel requires us to step on by 17/16th pixels in
- * the input.
- */
- const int kInputPixelStep = 17;
-
- /* The filters are setup in such a way that the expected output produces
- * sets of 8 identical output samples. As the filter position moves to the
- * next 1/16th pixel position the only active (=128) filter tap moves one
- * position to the left, resulting in the same input pixel being replicated
- * in to the output for 8 consecutive samples. After each set of 8 positions
- * the filters select a different input pixel. kFilterPeriodAdjust below
- * computes which input pixel is written to the output for a specified
- * x or y position.
- */
-
- /* Test the horizontal filter. */
- REGISTER_STATE_CHECK(UUT_->h8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, NULL, 0, Width(), Height()));
-
- for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjust = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_x], out[x]) << "x == " << x << "width = " << Width();
+TEST_P(ConvolveTest, FilterExtremes) {
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t *ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
}
+#else
+ uint8_t ref[kOutputStride * kMaxDimension];
+#endif
- /* Test the vertical filter. */
- REGISTER_STATE_CHECK(UUT_->v8_(in, kInputStride, out, kOutputStride,
- NULL, 0, kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, Width(), Height()));
-
+ // Populate ref and out with some random data
+ ::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjust = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_y * kInputStride], out[y * kInputStride]) << "y == " << y;
- }
-
- /* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- Width(), Height()));
-
- for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjustY = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustY * kInputPixelStep)
- >> SUBPEL_BITS);
for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjustX = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustX * kInputPixelStep)
- >> SUBPEL_BITS);
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
+ }
+ }
- ASSERT_EQ(in[ref_y * kInputStride + ref_x], out[y * kOutputStride + x])
- << "x == " << x << ", y == " << y;
+ for (int axis = 0; axis < 2; axis++) {
+ int seed_val = 0;
+ while (seed_val < 256) {
+ for (int y = 0; y < 8; ++y) {
+ for (int x = 0; x < 8; ++x) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * mask_);
+#else
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * 255);
+#endif
+ if (axis) seed_val++;
+ }
+ if (axis)
+ seed_val-= 8;
+ else
+ seed_val++;
+ }
+ if (axis) seed_val += 8;
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
+ Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
}
}
}
@@ -553,22 +931,24 @@
TEST_P(ConvolveTest, CheckScalingFiltering) {
uint8_t* const in = input();
uint8_t* const out = output();
+ const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
SetConstantInput(127);
for (int frac = 0; frac < 16; ++frac) {
for (int step = 1; step <= 32; ++step) {
/* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- vp9_sub_pel_filters_8[frac], step,
- vp9_sub_pel_filters_8[frac], step,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
+ eighttap[frac], step,
+ eighttap[frac], step,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- ASSERT_EQ(in[y * kInputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(in, y * kInputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "x == " << x << ", y == " << y
<< ", frac == " << frac << ", step == " << step;
}
@@ -579,10 +959,590 @@
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE2 && ARCH_X86_64
+void wrap_convolve8_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride, filter_x,
+ filter_x_stride, filter_y, filter_y_stride,
+ w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+#endif // HAVE_SSE2 && ARCH_X86_64
+
+void wrap_convolve_copy_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_copy_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_copy_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
const ConvolveFunctions convolve8_c(
- vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_c,
- vp9_convolve8_vert_c, vp9_convolve8_avg_vert_c,
- vp9_convolve8_c, vp9_convolve8_avg_c);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
+INSTANTIATE_TEST_CASE_P(C_8, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_c),
+ make_tuple(8, 4, &convolve8_c),
+ make_tuple(4, 8, &convolve8_c),
+ make_tuple(8, 8, &convolve8_c),
+ make_tuple(16, 8, &convolve8_c),
+ make_tuple(8, 16, &convolve8_c),
+ make_tuple(16, 16, &convolve8_c),
+ make_tuple(32, 16, &convolve8_c),
+ make_tuple(16, 32, &convolve8_c),
+ make_tuple(32, 32, &convolve8_c),
+ make_tuple(64, 32, &convolve8_c),
+ make_tuple(32, 64, &convolve8_c),
+ make_tuple(64, 64, &convolve8_c)));
+const ConvolveFunctions convolve10_c(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10);
+INSTANTIATE_TEST_CASE_P(C_10, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve10_c),
+ make_tuple(8, 4, &convolve10_c),
+ make_tuple(4, 8, &convolve10_c),
+ make_tuple(8, 8, &convolve10_c),
+ make_tuple(16, 8, &convolve10_c),
+ make_tuple(8, 16, &convolve10_c),
+ make_tuple(16, 16, &convolve10_c),
+ make_tuple(32, 16, &convolve10_c),
+ make_tuple(16, 32, &convolve10_c),
+ make_tuple(32, 32, &convolve10_c),
+ make_tuple(64, 32, &convolve10_c),
+ make_tuple(32, 64, &convolve10_c),
+ make_tuple(64, 64, &convolve10_c)));
+const ConvolveFunctions convolve12_c(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12);
+INSTANTIATE_TEST_CASE_P(C_12, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve12_c),
+ make_tuple(8, 4, &convolve12_c),
+ make_tuple(4, 8, &convolve12_c),
+ make_tuple(8, 8, &convolve12_c),
+ make_tuple(16, 8, &convolve12_c),
+ make_tuple(8, 16, &convolve12_c),
+ make_tuple(16, 16, &convolve12_c),
+ make_tuple(32, 16, &convolve12_c),
+ make_tuple(16, 32, &convolve12_c),
+ make_tuple(32, 32, &convolve12_c),
+ make_tuple(64, 32, &convolve12_c),
+ make_tuple(32, 64, &convolve12_c),
+ make_tuple(64, 64, &convolve12_c)));
+
+#else
+
+const ConvolveFunctions convolve8_c(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c,
+ vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+ vpx_convolve8_c, vpx_convolve8_avg_c,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_c),
@@ -598,12 +1558,87 @@
make_tuple(64, 32, &convolve8_c),
make_tuple(32, 64, &convolve8_c),
make_tuple(64, 64, &convolve8_c)));
+#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && ARCH_X86_64
+#if CONFIG_VP9_HIGHBITDEPTH
const ConvolveFunctions convolve8_sse2(
- vp9_convolve8_horiz_sse2, vp9_convolve8_avg_horiz_sse2,
- vp9_convolve8_vert_sse2, vp9_convolve8_avg_vert_sse2,
- vp9_convolve8_sse2, vp9_convolve8_avg_sse2);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8);
+const ConvolveFunctions convolve10_sse2(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10);
+const ConvolveFunctions convolve12_sse2(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
+INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_sse2),
+ make_tuple(8, 4, &convolve8_sse2),
+ make_tuple(4, 8, &convolve8_sse2),
+ make_tuple(8, 8, &convolve8_sse2),
+ make_tuple(16, 8, &convolve8_sse2),
+ make_tuple(8, 16, &convolve8_sse2),
+ make_tuple(16, 16, &convolve8_sse2),
+ make_tuple(32, 16, &convolve8_sse2),
+ make_tuple(16, 32, &convolve8_sse2),
+ make_tuple(32, 32, &convolve8_sse2),
+ make_tuple(64, 32, &convolve8_sse2),
+ make_tuple(32, 64, &convolve8_sse2),
+ make_tuple(64, 64, &convolve8_sse2),
+ make_tuple(4, 4, &convolve10_sse2),
+ make_tuple(8, 4, &convolve10_sse2),
+ make_tuple(4, 8, &convolve10_sse2),
+ make_tuple(8, 8, &convolve10_sse2),
+ make_tuple(16, 8, &convolve10_sse2),
+ make_tuple(8, 16, &convolve10_sse2),
+ make_tuple(16, 16, &convolve10_sse2),
+ make_tuple(32, 16, &convolve10_sse2),
+ make_tuple(16, 32, &convolve10_sse2),
+ make_tuple(32, 32, &convolve10_sse2),
+ make_tuple(64, 32, &convolve10_sse2),
+ make_tuple(32, 64, &convolve10_sse2),
+ make_tuple(64, 64, &convolve10_sse2),
+ make_tuple(4, 4, &convolve12_sse2),
+ make_tuple(8, 4, &convolve12_sse2),
+ make_tuple(4, 8, &convolve12_sse2),
+ make_tuple(8, 8, &convolve12_sse2),
+ make_tuple(16, 8, &convolve12_sse2),
+ make_tuple(8, 16, &convolve12_sse2),
+ make_tuple(16, 16, &convolve12_sse2),
+ make_tuple(32, 16, &convolve12_sse2),
+ make_tuple(16, 32, &convolve12_sse2),
+ make_tuple(32, 32, &convolve12_sse2),
+ make_tuple(64, 32, &convolve12_sse2),
+ make_tuple(32, 64, &convolve12_sse2),
+ make_tuple(64, 64, &convolve12_sse2)));
+#else
+const ConvolveFunctions convolve8_sse2(
+#if CONFIG_USE_X86INC
+ vpx_convolve_copy_sse2, vpx_convolve_avg_sse2,
+#else
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+#endif // CONFIG_USE_X86INC
+ vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2,
+ vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2,
+ vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_sse2),
@@ -619,13 +1654,18 @@
make_tuple(64, 32, &convolve8_sse2),
make_tuple(32, 64, &convolve8_sse2),
make_tuple(64, 64, &convolve8_sse2)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
#if HAVE_SSSE3
const ConvolveFunctions convolve8_ssse3(
- vp9_convolve8_horiz_ssse3, vp9_convolve8_avg_horiz_ssse3,
- vp9_convolve8_vert_ssse3, vp9_convolve8_avg_vert_ssse3,
- vp9_convolve8_ssse3, vp9_convolve8_avg_ssse3);
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_ssse3),
@@ -643,11 +1683,52 @@
make_tuple(64, 64, &convolve8_ssse3)));
#endif
+#if HAVE_AVX2 && HAVE_SSSE3
+const ConvolveFunctions convolve8_avx2(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_avx2),
+ make_tuple(8, 4, &convolve8_avx2),
+ make_tuple(4, 8, &convolve8_avx2),
+ make_tuple(8, 8, &convolve8_avx2),
+ make_tuple(8, 16, &convolve8_avx2),
+ make_tuple(16, 8, &convolve8_avx2),
+ make_tuple(16, 16, &convolve8_avx2),
+ make_tuple(32, 16, &convolve8_avx2),
+ make_tuple(16, 32, &convolve8_avx2),
+ make_tuple(32, 32, &convolve8_avx2),
+ make_tuple(64, 32, &convolve8_avx2),
+ make_tuple(32, 64, &convolve8_avx2),
+ make_tuple(64, 64, &convolve8_avx2)));
+#endif // HAVE_AVX2 && HAVE_SSSE3
+
#if HAVE_NEON
+#if HAVE_NEON_ASM
const ConvolveFunctions convolve8_neon(
- vp9_convolve8_horiz_neon, vp9_convolve8_avg_horiz_neon,
- vp9_convolve8_vert_neon, vp9_convolve8_avg_vert_neon,
- vp9_convolve8_neon, vp9_convolve8_avg_neon);
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#else // HAVE_NEON
+const ConvolveFunctions convolve8_neon(
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#endif // HAVE_NEON_ASM
INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_neon),
@@ -663,13 +1744,17 @@
make_tuple(64, 32, &convolve8_neon),
make_tuple(32, 64, &convolve8_neon),
make_tuple(64, 64, &convolve8_neon)));
-#endif
+#endif // HAVE_NEON
#if HAVE_DSPR2
const ConvolveFunctions convolve8_dspr2(
- vp9_convolve8_horiz_dspr2, vp9_convolve8_avg_horiz_dspr2,
- vp9_convolve8_vert_dspr2, vp9_convolve8_avg_vert_dspr2,
- vp9_convolve8_dspr2, vp9_convolve8_avg_dspr2);
+ vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2,
+ vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2,
+ vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2,
+ vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_dspr2),
@@ -686,4 +1771,30 @@
make_tuple(32, 64, &convolve8_dspr2),
make_tuple(64, 64, &convolve8_dspr2)));
#endif
+
+#if HAVE_MSA
+const ConvolveFunctions convolve8_msa(
+ vpx_convolve_copy_msa, vpx_convolve_avg_msa,
+ vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa,
+ vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa,
+ vpx_convolve8_msa, vpx_convolve8_avg_msa,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_msa),
+ make_tuple(8, 4, &convolve8_msa),
+ make_tuple(4, 8, &convolve8_msa),
+ make_tuple(8, 8, &convolve8_msa),
+ make_tuple(16, 8, &convolve8_msa),
+ make_tuple(8, 16, &convolve8_msa),
+ make_tuple(16, 16, &convolve8_msa),
+ make_tuple(32, 16, &convolve8_msa),
+ make_tuple(16, 32, &convolve8_msa),
+ make_tuple(32, 32, &convolve8_msa),
+ make_tuple(64, 32, &convolve8_msa),
+ make_tuple(32, 64, &convolve8_msa),
+ make_tuple(64, 64, &convolve8_msa)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
uint8_t tmp[64 * 64];
assert(output_width <= 64);
assert(output_height <= 64);
|
uint8_t tmp[kMaxDimension * kMaxDimension];
assert(output_width <= kMaxDimension);
assert(output_height <= kMaxDimension);
|
150,829 |
uint8_t* input() const {
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
uint8_t* input() const {
uint8_t *input() const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
} else {
return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize +
BorderLeft());
}
#else
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
#endif
}
|
@@ -9,40 +9,65 @@
*/
#include <string.h>
-#include "test/acm_random.h"
-#include "test/register_state_check.h"
-#include "test/util.h"
+
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_filter.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
namespace {
-typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h);
+
+static const unsigned int kMaxDimension = 64;
+
+typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h);
struct ConvolveFunctions {
- ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
- convolve_fn_t v8, convolve_fn_t v8_avg,
- convolve_fn_t hv8, convolve_fn_t hv8_avg)
- : h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
- hv8_avg_(hv8_avg) {}
+ ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg,
+ ConvolveFunc h8, ConvolveFunc h8_avg,
+ ConvolveFunc v8, ConvolveFunc v8_avg,
+ ConvolveFunc hv8, ConvolveFunc hv8_avg,
+ ConvolveFunc sh8, ConvolveFunc sh8_avg,
+ ConvolveFunc sv8, ConvolveFunc sv8_avg,
+ ConvolveFunc shv8, ConvolveFunc shv8_avg,
+ int bd)
+ : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
+ v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8),
+ sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg),
+ use_highbd_(bd) {}
- convolve_fn_t h8_;
- convolve_fn_t v8_;
- convolve_fn_t hv8_;
- convolve_fn_t h8_avg_;
- convolve_fn_t v8_avg_;
- convolve_fn_t hv8_avg_;
+ ConvolveFunc copy_;
+ ConvolveFunc avg_;
+ ConvolveFunc h8_;
+ ConvolveFunc v8_;
+ ConvolveFunc hv8_;
+ ConvolveFunc h8_avg_;
+ ConvolveFunc v8_avg_;
+ ConvolveFunc hv8_avg_;
+ ConvolveFunc sh8_; // scaled horiz
+ ConvolveFunc sv8_; // scaled vert
+ ConvolveFunc shv8_; // scaled horiz/vert
+ ConvolveFunc sh8_avg_; // scaled avg horiz
+ ConvolveFunc sv8_avg_; // scaled avg vert
+ ConvolveFunc shv8_avg_; // scaled avg horiz/vert
+ int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth.
};
-typedef std::tr1::tuple<int, int, const ConvolveFunctions*> convolve_param_t;
+typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
// Reference 8-tap subpixel filter, slightly modified to fit into this test.
#define VP9_FILTER_WEIGHT 128
@@ -68,71 +93,66 @@
const int kInterp_Extend = 4;
const unsigned int intermediate_height =
(kInterp_Extend - 1) + output_height + kInterp_Extend;
+ unsigned int i, j;
- /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
- * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
- * + kInterp_Extend
- * = 3 + 16 + 4
- * = 23
- * and filter_max_width = 16
- */
- uint8_t intermediate_buffer[71 * 64];
+ // Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ // where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ // + kInterp_Extend
+ // = 3 + 16 + 4
+ // = 23
+ // and filter_max_width = 16
+ //
+ uint8_t intermediate_buffer[71 * kMaxDimension];
const int intermediate_next_stride = 1 - intermediate_height * output_width;
// Horizontal pass (src -> transposed intermediate).
- {
- uint8_t *output_ptr = intermediate_buffer;
- const int src_next_row_stride = src_stride - output_width;
- unsigned int i, j;
- src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
- for (i = 0; i < intermediate_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * HFilter[0]) +
- (src_ptr[1] * HFilter[1]) +
- (src_ptr[2] * HFilter[2]) +
- (src_ptr[3] * HFilter[3]) +
- (src_ptr[4] * HFilter[4]) +
- (src_ptr[5] * HFilter[5]) +
- (src_ptr[6] * HFilter[6]) +
- (src_ptr[7] * HFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ uint8_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
- ++src_ptr;
- output_ptr += intermediate_height;
- }
- src_ptr += src_next_row_stride;
- output_ptr += intermediate_next_stride;
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ ++src_ptr;
+ output_ptr += intermediate_height;
}
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
}
// Vertical pass (transposed intermediate -> dst).
- {
- uint8_t *src_ptr = intermediate_buffer;
- const int dst_next_row_stride = dst_stride - output_width;
- unsigned int i, j;
- for (i = 0; i < output_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * VFilter[0]) +
- (src_ptr[1] * VFilter[1]) +
- (src_ptr[2] * VFilter[2]) +
- (src_ptr[3] * VFilter[3]) +
- (src_ptr[4] * VFilter[4]) +
- (src_ptr[5] * VFilter[5]) +
- (src_ptr[6] * VFilter[6]) +
- (src_ptr[7] * VFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
- src_ptr += intermediate_height;
- }
- src_ptr += intermediate_next_stride;
- dst_ptr += dst_next_row_stride;
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ src_ptr += intermediate_height;
}
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
}
}
@@ -159,17 +179,138 @@
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
- uint8_t tmp[64 * 64];
+ uint8_t tmp[kMaxDimension * kMaxDimension];
- assert(output_width <= 64);
- assert(output_height <= 64);
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
output_width, output_height);
block2d_average_c(tmp, 64, dst_ptr, dst_stride,
output_width, output_height);
}
-class ConvolveTest : public ::testing::TestWithParam<convolve_param_t> {
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ // Between passes, we use an intermediate buffer whose height is extended to
+ // have enough horizontally filtered values as input for the vertical pass.
+ // This buffer is allocated to be big enough for the largest block type we
+ // support.
+ const int kInterp_Extend = 4;
+ const unsigned int intermediate_height =
+ (kInterp_Extend - 1) + output_height + kInterp_Extend;
+
+ /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ * + kInterp_Extend
+ * = 3 + 16 + 4
+ * = 23
+ * and filter_max_width = 16
+ */
+ uint16_t intermediate_buffer[71 * kMaxDimension];
+ const int intermediate_next_stride = 1 - intermediate_height * output_width;
+
+ // Horizontal pass (src -> transposed intermediate).
+ {
+ uint16_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ unsigned int i, j;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ ++src_ptr;
+ output_ptr += intermediate_height;
+ }
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
+ }
+ }
+
+ // Vertical pass (transposed intermediate -> dst).
+ {
+ uint16_t *src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ src_ptr += intermediate_height;
+ }
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
+ }
+ }
+}
+
+void highbd_block2d_average_c(uint16_t *src,
+ unsigned int src_stride,
+ uint16_t *output_ptr,
+ unsigned int output_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
+ }
+ output_ptr += output_stride;
+ }
+}
+
+void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ uint16_t tmp[kMaxDimension * kMaxDimension];
+
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
+ highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
+ output_width, output_height, bd);
+ highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride,
+ output_width, output_height, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
public:
static void SetUpTestCase() {
// Force input_ to be unaligned, output to be 16 byte aligned.
@@ -177,13 +318,36 @@
vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
output_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kOutputBufferSize));
+ output_ref_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kOutputBufferSize));
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment,
+ (kInputBufferSize + 1) * sizeof(uint16_t))) + 1;
+ output16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+ output16_ref_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+#endif
}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
static void TearDownTestCase() {
vpx_free(input_ - 1);
input_ = NULL;
vpx_free(output_);
output_ = NULL;
+ vpx_free(output_ref_);
+ output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_free(input16_ - 1);
+ input16_ = NULL;
+ vpx_free(output16_);
+ output16_ = NULL;
+ vpx_free(output16_ref_);
+ output16_ref_ = NULL;
+#endif
}
protected:
@@ -191,7 +355,6 @@
static const int kOuterBlockSize = 256;
static const int kInputStride = kOuterBlockSize;
static const int kOutputStride = kOuterBlockSize;
- static const int kMaxDimension = 64;
static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
@@ -212,6 +375,12 @@
virtual void SetUp() {
UUT_ = GET_PARAM(2);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ != 0)
+ mask_ = (1 << UUT_->use_highbd_) - 1;
+ else
+ mask_ = 255;
+#endif
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
@@ -221,12 +390,33 @@
}
::libvpx_test::ACMRandom prng;
- for (int i = 0; i < kInputBufferSize; ++i)
- input_[i] = prng.Rand8Extremes();
+ for (int i = 0; i < kInputBufferSize; ++i) {
+ if (i & 1) {
+ input_[i] = 255;
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = mask_;
+#endif
+ } else {
+ input_[i] = prng.Rand8Extremes();
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = prng.Rand16() & mask_;
+#endif
+ }
+ }
}
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_memset16(input16_, value, kInputBufferSize);
+#endif
+ }
+
+ void CopyOutputToRef() {
+ memcpy(output_ref_, output_, kOutputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ memcpy(output16_ref_, output16_, kOutputBufferSize);
+#endif
}
void CheckGuardBlocks() {
@@ -236,39 +426,197 @@
}
}
- uint8_t* input() const {
+ uint8_t *input() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
}
- uint8_t* output() const {
+ uint8_t *output() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint8_t *output_ref() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint16_t lookup(uint8_t *list, int index) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return list[index];
+ } else {
+ return CONVERT_TO_SHORTPTR(list)[index];
+ }
+#else
+ return list[index];
+#endif
+ }
+
+ void assign_val(uint8_t *list, int index, uint16_t val) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ list[index] = (uint8_t) val;
+ } else {
+ CONVERT_TO_SHORTPTR(list)[index] = val;
+ }
+#else
+ list[index] = (uint8_t) val;
+#endif
+ }
+
+ void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+ } else {
+ highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
+ src_stride, HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr),
+ dst_stride, output_width, output_height,
+ UUT_->use_highbd_);
+ }
+#else
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+#endif
+ }
+
+ void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+ } else {
+ highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
+ HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
+ output_width, output_height, UUT_->use_highbd_);
+ }
+#else
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+#endif
}
const ConvolveFunctions* UUT_;
static uint8_t* input_;
static uint8_t* output_;
+ static uint8_t* output_ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ static uint16_t* input16_;
+ static uint16_t* output16_;
+ static uint16_t* output16_ref_;
+ int mask_;
+#endif
};
+
uint8_t* ConvolveTest::input_ = NULL;
uint8_t* ConvolveTest::output_ = NULL;
+uint8_t* ConvolveTest::output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+uint16_t* ConvolveTest::input16_ = NULL;
+uint16_t* ConvolveTest::output16_ = NULL;
+uint16_t* ConvolveTest::output16_ref_ = NULL;
+#endif
TEST_P(ConvolveTest, GuardBlocks) {
CheckGuardBlocks();
}
+TEST_P(ConvolveTest, Copy) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, Avg) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t* const out_ref = output_ref();
+ CopyOutputToRef();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
+ lookup(out_ref, y * kOutputStride + x), 1))
+ << "(" << x << "," << y << ")";
+}
+
TEST_P(ConvolveTest, CopyHoriz) {
uint8_t* const in = input();
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->h8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -277,15 +625,16 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->v8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -294,31 +643,26 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->hv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8,
+ 16, Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
-const int16_t (*kTestFilterList[])[8] = {
- vp9_bilinear_filters,
- vp9_sub_pel_filters_8,
- vp9_sub_pel_filters_8s,
- vp9_sub_pel_filters_8lp
-};
-const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
+const int kNumFilterBanks = 4;
const int kNumFilters = 16;
TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int i = 0; i < kNumFilters; i++) {
const int p0 = filters[i][0] + filters[i][1];
const int p1 = filters[i][2] + filters[i][3];
@@ -341,40 +685,57 @@
TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
-
+#endif
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_(in, kInputStride, out, kOutputStride,
kInvalidFilter, 16, filters[filter_y], 16,
Width(), Height()));
- else
- REGISTER_STATE_CHECK(
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
UUT_->h8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -386,54 +747,77 @@
TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
+#endif
// Populate ref and out with some random data
::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- const uint8_t r = prng.Rand8Extremes();
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
- out[y * kOutputStride + x] = r;
- ref[y * kOutputStride + x] = r;
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
}
}
- const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
-
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
- const int kNumFilters = 16;
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_average_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_average_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
else
- REGISTER_STATE_CHECK(
- UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -442,108 +826,102 @@
}
}
-DECLARE_ALIGNED(256, const int16_t, kChangeFilters[16][8]) = {
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128},
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128}
-};
-
-/* This test exercises the horizontal and vertical filter functions. */
-TEST_P(ConvolveTest, ChangeFilterWorks) {
- uint8_t* const in = input();
- uint8_t* const out = output();
-
- /* Assume that the first input sample is at the 8/16th position. */
- const int kInitialSubPelOffset = 8;
-
- /* Filters are 8-tap, so the first filter tap will be applied to the pixel
- * at position -3 with respect to the current filtering position. Since
- * kInitialSubPelOffset is set to 8, we first select sub-pixel filter 8,
- * which is non-zero only in the last tap. So, applying the filter at the
- * current input position will result in an output equal to the pixel at
- * offset +4 (-3 + 7) with respect to the current filtering position.
- */
- const int kPixelSelected = 4;
-
- /* Assume that each output pixel requires us to step on by 17/16th pixels in
- * the input.
- */
- const int kInputPixelStep = 17;
-
- /* The filters are setup in such a way that the expected output produces
- * sets of 8 identical output samples. As the filter position moves to the
- * next 1/16th pixel position the only active (=128) filter tap moves one
- * position to the left, resulting in the same input pixel being replicated
- * in to the output for 8 consecutive samples. After each set of 8 positions
- * the filters select a different input pixel. kFilterPeriodAdjust below
- * computes which input pixel is written to the output for a specified
- * x or y position.
- */
-
- /* Test the horizontal filter. */
- REGISTER_STATE_CHECK(UUT_->h8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, NULL, 0, Width(), Height()));
-
- for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjust = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_x], out[x]) << "x == " << x << "width = " << Width();
+TEST_P(ConvolveTest, FilterExtremes) {
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t *ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
}
+#else
+ uint8_t ref[kOutputStride * kMaxDimension];
+#endif
- /* Test the vertical filter. */
- REGISTER_STATE_CHECK(UUT_->v8_(in, kInputStride, out, kOutputStride,
- NULL, 0, kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, Width(), Height()));
-
+ // Populate ref and out with some random data
+ ::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjust = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_y * kInputStride], out[y * kInputStride]) << "y == " << y;
- }
-
- /* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- Width(), Height()));
-
- for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjustY = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustY * kInputPixelStep)
- >> SUBPEL_BITS);
for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjustX = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustX * kInputPixelStep)
- >> SUBPEL_BITS);
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
+ }
+ }
- ASSERT_EQ(in[ref_y * kInputStride + ref_x], out[y * kOutputStride + x])
- << "x == " << x << ", y == " << y;
+ for (int axis = 0; axis < 2; axis++) {
+ int seed_val = 0;
+ while (seed_val < 256) {
+ for (int y = 0; y < 8; ++y) {
+ for (int x = 0; x < 8; ++x) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * mask_);
+#else
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * 255);
+#endif
+ if (axis) seed_val++;
+ }
+ if (axis)
+ seed_val-= 8;
+ else
+ seed_val++;
+ }
+ if (axis) seed_val += 8;
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
+ Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
}
}
}
@@ -553,22 +931,24 @@
TEST_P(ConvolveTest, CheckScalingFiltering) {
uint8_t* const in = input();
uint8_t* const out = output();
+ const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
SetConstantInput(127);
for (int frac = 0; frac < 16; ++frac) {
for (int step = 1; step <= 32; ++step) {
/* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- vp9_sub_pel_filters_8[frac], step,
- vp9_sub_pel_filters_8[frac], step,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
+ eighttap[frac], step,
+ eighttap[frac], step,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- ASSERT_EQ(in[y * kInputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(in, y * kInputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "x == " << x << ", y == " << y
<< ", frac == " << frac << ", step == " << step;
}
@@ -579,10 +959,590 @@
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE2 && ARCH_X86_64
+void wrap_convolve8_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride, filter_x,
+ filter_x_stride, filter_y, filter_y_stride,
+ w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+#endif // HAVE_SSE2 && ARCH_X86_64
+
+void wrap_convolve_copy_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_copy_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_copy_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
const ConvolveFunctions convolve8_c(
- vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_c,
- vp9_convolve8_vert_c, vp9_convolve8_avg_vert_c,
- vp9_convolve8_c, vp9_convolve8_avg_c);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
+INSTANTIATE_TEST_CASE_P(C_8, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_c),
+ make_tuple(8, 4, &convolve8_c),
+ make_tuple(4, 8, &convolve8_c),
+ make_tuple(8, 8, &convolve8_c),
+ make_tuple(16, 8, &convolve8_c),
+ make_tuple(8, 16, &convolve8_c),
+ make_tuple(16, 16, &convolve8_c),
+ make_tuple(32, 16, &convolve8_c),
+ make_tuple(16, 32, &convolve8_c),
+ make_tuple(32, 32, &convolve8_c),
+ make_tuple(64, 32, &convolve8_c),
+ make_tuple(32, 64, &convolve8_c),
+ make_tuple(64, 64, &convolve8_c)));
+const ConvolveFunctions convolve10_c(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10);
+INSTANTIATE_TEST_CASE_P(C_10, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve10_c),
+ make_tuple(8, 4, &convolve10_c),
+ make_tuple(4, 8, &convolve10_c),
+ make_tuple(8, 8, &convolve10_c),
+ make_tuple(16, 8, &convolve10_c),
+ make_tuple(8, 16, &convolve10_c),
+ make_tuple(16, 16, &convolve10_c),
+ make_tuple(32, 16, &convolve10_c),
+ make_tuple(16, 32, &convolve10_c),
+ make_tuple(32, 32, &convolve10_c),
+ make_tuple(64, 32, &convolve10_c),
+ make_tuple(32, 64, &convolve10_c),
+ make_tuple(64, 64, &convolve10_c)));
+const ConvolveFunctions convolve12_c(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12);
+INSTANTIATE_TEST_CASE_P(C_12, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve12_c),
+ make_tuple(8, 4, &convolve12_c),
+ make_tuple(4, 8, &convolve12_c),
+ make_tuple(8, 8, &convolve12_c),
+ make_tuple(16, 8, &convolve12_c),
+ make_tuple(8, 16, &convolve12_c),
+ make_tuple(16, 16, &convolve12_c),
+ make_tuple(32, 16, &convolve12_c),
+ make_tuple(16, 32, &convolve12_c),
+ make_tuple(32, 32, &convolve12_c),
+ make_tuple(64, 32, &convolve12_c),
+ make_tuple(32, 64, &convolve12_c),
+ make_tuple(64, 64, &convolve12_c)));
+
+#else
+
+const ConvolveFunctions convolve8_c(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c,
+ vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+ vpx_convolve8_c, vpx_convolve8_avg_c,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_c),
@@ -598,12 +1558,87 @@
make_tuple(64, 32, &convolve8_c),
make_tuple(32, 64, &convolve8_c),
make_tuple(64, 64, &convolve8_c)));
+#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && ARCH_X86_64
+#if CONFIG_VP9_HIGHBITDEPTH
const ConvolveFunctions convolve8_sse2(
- vp9_convolve8_horiz_sse2, vp9_convolve8_avg_horiz_sse2,
- vp9_convolve8_vert_sse2, vp9_convolve8_avg_vert_sse2,
- vp9_convolve8_sse2, vp9_convolve8_avg_sse2);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8);
+const ConvolveFunctions convolve10_sse2(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10);
+const ConvolveFunctions convolve12_sse2(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
+INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_sse2),
+ make_tuple(8, 4, &convolve8_sse2),
+ make_tuple(4, 8, &convolve8_sse2),
+ make_tuple(8, 8, &convolve8_sse2),
+ make_tuple(16, 8, &convolve8_sse2),
+ make_tuple(8, 16, &convolve8_sse2),
+ make_tuple(16, 16, &convolve8_sse2),
+ make_tuple(32, 16, &convolve8_sse2),
+ make_tuple(16, 32, &convolve8_sse2),
+ make_tuple(32, 32, &convolve8_sse2),
+ make_tuple(64, 32, &convolve8_sse2),
+ make_tuple(32, 64, &convolve8_sse2),
+ make_tuple(64, 64, &convolve8_sse2),
+ make_tuple(4, 4, &convolve10_sse2),
+ make_tuple(8, 4, &convolve10_sse2),
+ make_tuple(4, 8, &convolve10_sse2),
+ make_tuple(8, 8, &convolve10_sse2),
+ make_tuple(16, 8, &convolve10_sse2),
+ make_tuple(8, 16, &convolve10_sse2),
+ make_tuple(16, 16, &convolve10_sse2),
+ make_tuple(32, 16, &convolve10_sse2),
+ make_tuple(16, 32, &convolve10_sse2),
+ make_tuple(32, 32, &convolve10_sse2),
+ make_tuple(64, 32, &convolve10_sse2),
+ make_tuple(32, 64, &convolve10_sse2),
+ make_tuple(64, 64, &convolve10_sse2),
+ make_tuple(4, 4, &convolve12_sse2),
+ make_tuple(8, 4, &convolve12_sse2),
+ make_tuple(4, 8, &convolve12_sse2),
+ make_tuple(8, 8, &convolve12_sse2),
+ make_tuple(16, 8, &convolve12_sse2),
+ make_tuple(8, 16, &convolve12_sse2),
+ make_tuple(16, 16, &convolve12_sse2),
+ make_tuple(32, 16, &convolve12_sse2),
+ make_tuple(16, 32, &convolve12_sse2),
+ make_tuple(32, 32, &convolve12_sse2),
+ make_tuple(64, 32, &convolve12_sse2),
+ make_tuple(32, 64, &convolve12_sse2),
+ make_tuple(64, 64, &convolve12_sse2)));
+#else
+const ConvolveFunctions convolve8_sse2(
+#if CONFIG_USE_X86INC
+ vpx_convolve_copy_sse2, vpx_convolve_avg_sse2,
+#else
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+#endif // CONFIG_USE_X86INC
+ vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2,
+ vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2,
+ vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_sse2),
@@ -619,13 +1654,18 @@
make_tuple(64, 32, &convolve8_sse2),
make_tuple(32, 64, &convolve8_sse2),
make_tuple(64, 64, &convolve8_sse2)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
#if HAVE_SSSE3
const ConvolveFunctions convolve8_ssse3(
- vp9_convolve8_horiz_ssse3, vp9_convolve8_avg_horiz_ssse3,
- vp9_convolve8_vert_ssse3, vp9_convolve8_avg_vert_ssse3,
- vp9_convolve8_ssse3, vp9_convolve8_avg_ssse3);
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_ssse3),
@@ -643,11 +1683,52 @@
make_tuple(64, 64, &convolve8_ssse3)));
#endif
+#if HAVE_AVX2 && HAVE_SSSE3
+const ConvolveFunctions convolve8_avx2(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_avx2),
+ make_tuple(8, 4, &convolve8_avx2),
+ make_tuple(4, 8, &convolve8_avx2),
+ make_tuple(8, 8, &convolve8_avx2),
+ make_tuple(8, 16, &convolve8_avx2),
+ make_tuple(16, 8, &convolve8_avx2),
+ make_tuple(16, 16, &convolve8_avx2),
+ make_tuple(32, 16, &convolve8_avx2),
+ make_tuple(16, 32, &convolve8_avx2),
+ make_tuple(32, 32, &convolve8_avx2),
+ make_tuple(64, 32, &convolve8_avx2),
+ make_tuple(32, 64, &convolve8_avx2),
+ make_tuple(64, 64, &convolve8_avx2)));
+#endif // HAVE_AVX2 && HAVE_SSSE3
+
#if HAVE_NEON
+#if HAVE_NEON_ASM
const ConvolveFunctions convolve8_neon(
- vp9_convolve8_horiz_neon, vp9_convolve8_avg_horiz_neon,
- vp9_convolve8_vert_neon, vp9_convolve8_avg_vert_neon,
- vp9_convolve8_neon, vp9_convolve8_avg_neon);
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#else // HAVE_NEON
+const ConvolveFunctions convolve8_neon(
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#endif // HAVE_NEON_ASM
INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_neon),
@@ -663,13 +1744,17 @@
make_tuple(64, 32, &convolve8_neon),
make_tuple(32, 64, &convolve8_neon),
make_tuple(64, 64, &convolve8_neon)));
-#endif
+#endif // HAVE_NEON
#if HAVE_DSPR2
const ConvolveFunctions convolve8_dspr2(
- vp9_convolve8_horiz_dspr2, vp9_convolve8_avg_horiz_dspr2,
- vp9_convolve8_vert_dspr2, vp9_convolve8_avg_vert_dspr2,
- vp9_convolve8_dspr2, vp9_convolve8_avg_dspr2);
+ vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2,
+ vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2,
+ vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2,
+ vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_dspr2),
@@ -686,4 +1771,30 @@
make_tuple(32, 64, &convolve8_dspr2),
make_tuple(64, 64, &convolve8_dspr2)));
#endif
+
+#if HAVE_MSA
+const ConvolveFunctions convolve8_msa(
+ vpx_convolve_copy_msa, vpx_convolve_avg_msa,
+ vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa,
+ vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa,
+ vpx_convolve8_msa, vpx_convolve8_avg_msa,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_msa),
+ make_tuple(8, 4, &convolve8_msa),
+ make_tuple(4, 8, &convolve8_msa),
+ make_tuple(8, 8, &convolve8_msa),
+ make_tuple(16, 8, &convolve8_msa),
+ make_tuple(8, 16, &convolve8_msa),
+ make_tuple(16, 16, &convolve8_msa),
+ make_tuple(32, 16, &convolve8_msa),
+ make_tuple(16, 32, &convolve8_msa),
+ make_tuple(32, 32, &convolve8_msa),
+ make_tuple(64, 32, &convolve8_msa),
+ make_tuple(32, 64, &convolve8_msa),
+ make_tuple(64, 64, &convolve8_msa)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
| null |
uint8_t *input() const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
} else {
return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize +
BorderLeft());
}
#else
#endif
|
150,830 |
uint8_t* output() const {
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
uint8_t* output() const {
uint8_t *output() const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
} else {
return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize +
BorderLeft());
}
#else
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
#endif
}
uint8_t *output_ref() const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
} else {
return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize +
BorderLeft());
}
#else
return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
#endif
}
uint16_t lookup(uint8_t *list, int index) const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return list[index];
} else {
return CONVERT_TO_SHORTPTR(list)[index];
}
#else
return list[index];
#endif
}
void assign_val(uint8_t *list, int index, uint16_t val) const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
list[index] = (uint8_t) val;
} else {
CONVERT_TO_SHORTPTR(list)[index] = val;
}
#else
list[index] = (uint8_t) val;
#endif
}
void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
const unsigned int src_stride,
const int16_t *HFilter,
const int16_t *VFilter,
uint8_t *dst_ptr,
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
dst_ptr, dst_stride, output_width,
output_height);
} else {
highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
src_stride, HFilter, VFilter,
CONVERT_TO_SHORTPTR(dst_ptr),
dst_stride, output_width, output_height,
UUT_->use_highbd_);
}
#else
filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
dst_ptr, dst_stride, output_width,
output_height);
#endif
}
void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
const unsigned int src_stride,
const int16_t *HFilter,
const int16_t *VFilter,
uint8_t *dst_ptr,
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
dst_ptr, dst_stride, output_width, output_height);
} else {
highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
HFilter, VFilter,
CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
output_width, output_height, UUT_->use_highbd_);
}
#else
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
dst_ptr, dst_stride, output_width, output_height);
#endif
}
|
@@ -9,40 +9,65 @@
*/
#include <string.h>
-#include "test/acm_random.h"
-#include "test/register_state_check.h"
-#include "test/util.h"
+
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_filter.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
namespace {
-typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h);
+
+static const unsigned int kMaxDimension = 64;
+
+typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h);
struct ConvolveFunctions {
- ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
- convolve_fn_t v8, convolve_fn_t v8_avg,
- convolve_fn_t hv8, convolve_fn_t hv8_avg)
- : h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
- hv8_avg_(hv8_avg) {}
+ ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg,
+ ConvolveFunc h8, ConvolveFunc h8_avg,
+ ConvolveFunc v8, ConvolveFunc v8_avg,
+ ConvolveFunc hv8, ConvolveFunc hv8_avg,
+ ConvolveFunc sh8, ConvolveFunc sh8_avg,
+ ConvolveFunc sv8, ConvolveFunc sv8_avg,
+ ConvolveFunc shv8, ConvolveFunc shv8_avg,
+ int bd)
+ : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
+ v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8),
+ sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg),
+ use_highbd_(bd) {}
- convolve_fn_t h8_;
- convolve_fn_t v8_;
- convolve_fn_t hv8_;
- convolve_fn_t h8_avg_;
- convolve_fn_t v8_avg_;
- convolve_fn_t hv8_avg_;
+ ConvolveFunc copy_;
+ ConvolveFunc avg_;
+ ConvolveFunc h8_;
+ ConvolveFunc v8_;
+ ConvolveFunc hv8_;
+ ConvolveFunc h8_avg_;
+ ConvolveFunc v8_avg_;
+ ConvolveFunc hv8_avg_;
+ ConvolveFunc sh8_; // scaled horiz
+ ConvolveFunc sv8_; // scaled vert
+ ConvolveFunc shv8_; // scaled horiz/vert
+ ConvolveFunc sh8_avg_; // scaled avg horiz
+ ConvolveFunc sv8_avg_; // scaled avg vert
+ ConvolveFunc shv8_avg_; // scaled avg horiz/vert
+ int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth.
};
-typedef std::tr1::tuple<int, int, const ConvolveFunctions*> convolve_param_t;
+typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
// Reference 8-tap subpixel filter, slightly modified to fit into this test.
#define VP9_FILTER_WEIGHT 128
@@ -68,71 +93,66 @@
const int kInterp_Extend = 4;
const unsigned int intermediate_height =
(kInterp_Extend - 1) + output_height + kInterp_Extend;
+ unsigned int i, j;
- /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
- * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
- * + kInterp_Extend
- * = 3 + 16 + 4
- * = 23
- * and filter_max_width = 16
- */
- uint8_t intermediate_buffer[71 * 64];
+ // Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ // where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ // + kInterp_Extend
+ // = 3 + 16 + 4
+ // = 23
+ // and filter_max_width = 16
+ //
+ uint8_t intermediate_buffer[71 * kMaxDimension];
const int intermediate_next_stride = 1 - intermediate_height * output_width;
// Horizontal pass (src -> transposed intermediate).
- {
- uint8_t *output_ptr = intermediate_buffer;
- const int src_next_row_stride = src_stride - output_width;
- unsigned int i, j;
- src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
- for (i = 0; i < intermediate_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * HFilter[0]) +
- (src_ptr[1] * HFilter[1]) +
- (src_ptr[2] * HFilter[2]) +
- (src_ptr[3] * HFilter[3]) +
- (src_ptr[4] * HFilter[4]) +
- (src_ptr[5] * HFilter[5]) +
- (src_ptr[6] * HFilter[6]) +
- (src_ptr[7] * HFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ uint8_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
- ++src_ptr;
- output_ptr += intermediate_height;
- }
- src_ptr += src_next_row_stride;
- output_ptr += intermediate_next_stride;
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ ++src_ptr;
+ output_ptr += intermediate_height;
}
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
}
// Vertical pass (transposed intermediate -> dst).
- {
- uint8_t *src_ptr = intermediate_buffer;
- const int dst_next_row_stride = dst_stride - output_width;
- unsigned int i, j;
- for (i = 0; i < output_height; ++i) {
- for (j = 0; j < output_width; ++j) {
- // Apply filter...
- const int temp = (src_ptr[0] * VFilter[0]) +
- (src_ptr[1] * VFilter[1]) +
- (src_ptr[2] * VFilter[2]) +
- (src_ptr[3] * VFilter[3]) +
- (src_ptr[4] * VFilter[4]) +
- (src_ptr[5] * VFilter[5]) +
- (src_ptr[6] * VFilter[6]) +
- (src_ptr[7] * VFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
- // Normalize back to 0-255...
- *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
- src_ptr += intermediate_height;
- }
- src_ptr += intermediate_next_stride;
- dst_ptr += dst_next_row_stride;
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ src_ptr += intermediate_height;
}
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
}
}
@@ -159,17 +179,138 @@
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
- uint8_t tmp[64 * 64];
+ uint8_t tmp[kMaxDimension * kMaxDimension];
- assert(output_width <= 64);
- assert(output_height <= 64);
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
output_width, output_height);
block2d_average_c(tmp, 64, dst_ptr, dst_stride,
output_width, output_height);
}
-class ConvolveTest : public ::testing::TestWithParam<convolve_param_t> {
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ // Between passes, we use an intermediate buffer whose height is extended to
+ // have enough horizontally filtered values as input for the vertical pass.
+ // This buffer is allocated to be big enough for the largest block type we
+ // support.
+ const int kInterp_Extend = 4;
+ const unsigned int intermediate_height =
+ (kInterp_Extend - 1) + output_height + kInterp_Extend;
+
+ /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ * + kInterp_Extend
+ * = 3 + 16 + 4
+ * = 23
+ * and filter_max_width = 16
+ */
+ uint16_t intermediate_buffer[71 * kMaxDimension];
+ const int intermediate_next_stride = 1 - intermediate_height * output_width;
+
+ // Horizontal pass (src -> transposed intermediate).
+ {
+ uint16_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ unsigned int i, j;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ ++src_ptr;
+ output_ptr += intermediate_height;
+ }
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
+ }
+ }
+
+ // Vertical pass (transposed intermediate -> dst).
+ {
+ uint16_t *src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
+ src_ptr += intermediate_height;
+ }
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
+ }
+ }
+}
+
+void highbd_block2d_average_c(uint16_t *src,
+ unsigned int src_stride,
+ uint16_t *output_ptr,
+ unsigned int output_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
+ }
+ output_ptr += output_stride;
+ }
+}
+
+void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint16_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height,
+ int bd) {
+ uint16_t tmp[kMaxDimension * kMaxDimension];
+
+ assert(output_width <= kMaxDimension);
+ assert(output_height <= kMaxDimension);
+ highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
+ output_width, output_height, bd);
+ highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride,
+ output_width, output_height, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
public:
static void SetUpTestCase() {
// Force input_ to be unaligned, output to be 16 byte aligned.
@@ -177,13 +318,36 @@
vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
output_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kOutputBufferSize));
+ output_ref_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kOutputBufferSize));
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment,
+ (kInputBufferSize + 1) * sizeof(uint16_t))) + 1;
+ output16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+ output16_ref_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
+#endif
}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
static void TearDownTestCase() {
vpx_free(input_ - 1);
input_ = NULL;
vpx_free(output_);
output_ = NULL;
+ vpx_free(output_ref_);
+ output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_free(input16_ - 1);
+ input16_ = NULL;
+ vpx_free(output16_);
+ output16_ = NULL;
+ vpx_free(output16_ref_);
+ output16_ref_ = NULL;
+#endif
}
protected:
@@ -191,7 +355,6 @@
static const int kOuterBlockSize = 256;
static const int kInputStride = kOuterBlockSize;
static const int kOutputStride = kOuterBlockSize;
- static const int kMaxDimension = 64;
static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
@@ -212,6 +375,12 @@
virtual void SetUp() {
UUT_ = GET_PARAM(2);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ != 0)
+ mask_ = (1 << UUT_->use_highbd_) - 1;
+ else
+ mask_ = 255;
+#endif
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
@@ -221,12 +390,33 @@
}
::libvpx_test::ACMRandom prng;
- for (int i = 0; i < kInputBufferSize; ++i)
- input_[i] = prng.Rand8Extremes();
+ for (int i = 0; i < kInputBufferSize; ++i) {
+ if (i & 1) {
+ input_[i] = 255;
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = mask_;
+#endif
+ } else {
+ input_[i] = prng.Rand8Extremes();
+#if CONFIG_VP9_HIGHBITDEPTH
+ input16_[i] = prng.Rand16() & mask_;
+#endif
+ }
+ }
}
void SetConstantInput(int value) {
memset(input_, value, kInputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ vpx_memset16(input16_, value, kInputBufferSize);
+#endif
+ }
+
+ void CopyOutputToRef() {
+ memcpy(output_ref_, output_, kOutputBufferSize);
+#if CONFIG_VP9_HIGHBITDEPTH
+ memcpy(output16_ref_, output16_, kOutputBufferSize);
+#endif
}
void CheckGuardBlocks() {
@@ -236,39 +426,197 @@
}
}
- uint8_t* input() const {
+ uint8_t *input() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
}
- uint8_t* output() const {
+ uint8_t *output() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint8_t *output_ref() const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ } else {
+ return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize +
+ BorderLeft());
+ }
+#else
+ return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
+#endif
+ }
+
+ uint16_t lookup(uint8_t *list, int index) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ return list[index];
+ } else {
+ return CONVERT_TO_SHORTPTR(list)[index];
+ }
+#else
+ return list[index];
+#endif
+ }
+
+ void assign_val(uint8_t *list, int index, uint16_t val) const {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ list[index] = (uint8_t) val;
+ } else {
+ CONVERT_TO_SHORTPTR(list)[index] = val;
+ }
+#else
+ list[index] = (uint8_t) val;
+#endif
+ }
+
+ void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+ } else {
+ highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
+ src_stride, HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr),
+ dst_stride, output_width, output_height,
+ UUT_->use_highbd_);
+ }
+#else
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width,
+ output_height);
+#endif
+ }
+
+ void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0) {
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+ } else {
+ highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
+ HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
+ output_width, output_height, UUT_->use_highbd_);
+ }
+#else
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
+ dst_ptr, dst_stride, output_width, output_height);
+#endif
}
const ConvolveFunctions* UUT_;
static uint8_t* input_;
static uint8_t* output_;
+ static uint8_t* output_ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ static uint16_t* input16_;
+ static uint16_t* output16_;
+ static uint16_t* output16_ref_;
+ int mask_;
+#endif
};
+
uint8_t* ConvolveTest::input_ = NULL;
uint8_t* ConvolveTest::output_ = NULL;
+uint8_t* ConvolveTest::output_ref_ = NULL;
+#if CONFIG_VP9_HIGHBITDEPTH
+uint16_t* ConvolveTest::input16_ = NULL;
+uint16_t* ConvolveTest::output16_ = NULL;
+uint16_t* ConvolveTest::output16_ref_ = NULL;
+#endif
TEST_P(ConvolveTest, GuardBlocks) {
CheckGuardBlocks();
}
+TEST_P(ConvolveTest, Copy) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, Avg) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t* const out_ref = output_ref();
+ CopyOutputToRef();
+
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
+ lookup(out_ref, y * kOutputStride + x), 1))
+ << "(" << x << "," << y << ")";
+}
+
TEST_P(ConvolveTest, CopyHoriz) {
uint8_t* const in = input();
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->h8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -277,15 +625,16 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->v8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
@@ -294,31 +643,26 @@
uint8_t* const out = output();
DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
- REGISTER_STATE_CHECK(
- UUT_->hv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8,
+ 16, Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ ASSERT_EQ(lookup(out, y * kOutputStride + x),
+ lookup(in, y * kInputStride + x))
<< "(" << x << "," << y << ")";
}
-const int16_t (*kTestFilterList[])[8] = {
- vp9_bilinear_filters,
- vp9_sub_pel_filters_8,
- vp9_sub_pel_filters_8s,
- vp9_sub_pel_filters_8lp
-};
-const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
+const int kNumFilterBanks = 4;
const int kNumFilters = 16;
TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int i = 0; i < kNumFilters; i++) {
const int p0 = filters[i][0] + filters[i][1];
const int p1 = filters[i][2] + filters[i][3];
@@ -341,40 +685,57 @@
TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
-
+#endif
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_(in, kInputStride, out, kOutputStride,
kInvalidFilter, 16, filters[filter_y], 16,
Width(), Height()));
- else
- REGISTER_STATE_CHECK(
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
UUT_->h8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -386,54 +747,77 @@
TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
uint8_t* const in = input();
uint8_t* const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t* ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
+ }
+#else
uint8_t ref[kOutputStride * kMaxDimension];
+#endif
// Populate ref and out with some random data
::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- const uint8_t r = prng.Rand8Extremes();
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
- out[y * kOutputStride + x] = r;
- ref[y * kOutputStride + x] = r;
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
}
}
- const int kNumFilterBanks = sizeof(kTestFilterList) /
- sizeof(kTestFilterList[0]);
-
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
- const int16_t (*filters)[8] = kTestFilterList[filter_bank];
- const int kNumFilters = 16;
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- filter_average_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_average_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
- REGISTER_STATE_CHECK(
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
Width(), Height()));
else if (filter_y)
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
Width(), Height()));
else
- REGISTER_STATE_CHECK(
- UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->avg_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
- ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
<< "filters (" << filter_bank << ","
<< filter_x << "," << filter_y << ")";
@@ -442,108 +826,102 @@
}
}
-DECLARE_ALIGNED(256, const int16_t, kChangeFilters[16][8]) = {
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128},
- { 0, 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 0, 128},
- { 0, 0, 0, 0, 128},
- { 0, 0, 0, 128},
- { 0, 0, 128},
- { 0, 128},
- { 128}
-};
-
-/* This test exercises the horizontal and vertical filter functions. */
-TEST_P(ConvolveTest, ChangeFilterWorks) {
- uint8_t* const in = input();
- uint8_t* const out = output();
-
- /* Assume that the first input sample is at the 8/16th position. */
- const int kInitialSubPelOffset = 8;
-
- /* Filters are 8-tap, so the first filter tap will be applied to the pixel
- * at position -3 with respect to the current filtering position. Since
- * kInitialSubPelOffset is set to 8, we first select sub-pixel filter 8,
- * which is non-zero only in the last tap. So, applying the filter at the
- * current input position will result in an output equal to the pixel at
- * offset +4 (-3 + 7) with respect to the current filtering position.
- */
- const int kPixelSelected = 4;
-
- /* Assume that each output pixel requires us to step on by 17/16th pixels in
- * the input.
- */
- const int kInputPixelStep = 17;
-
- /* The filters are setup in such a way that the expected output produces
- * sets of 8 identical output samples. As the filter position moves to the
- * next 1/16th pixel position the only active (=128) filter tap moves one
- * position to the left, resulting in the same input pixel being replicated
- * in to the output for 8 consecutive samples. After each set of 8 positions
- * the filters select a different input pixel. kFilterPeriodAdjust below
- * computes which input pixel is written to the output for a specified
- * x or y position.
- */
-
- /* Test the horizontal filter. */
- REGISTER_STATE_CHECK(UUT_->h8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, NULL, 0, Width(), Height()));
-
- for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjust = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_x], out[x]) << "x == " << x << "width = " << Width();
+TEST_P(ConvolveTest, FilterExtremes) {
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint8_t ref8[kOutputStride * kMaxDimension];
+ uint16_t ref16[kOutputStride * kMaxDimension];
+ uint8_t *ref;
+ if (UUT_->use_highbd_ == 0) {
+ ref = ref8;
+ } else {
+ ref = CONVERT_TO_BYTEPTR(ref16);
}
+#else
+ uint8_t ref[kOutputStride * kMaxDimension];
+#endif
- /* Test the vertical filter. */
- REGISTER_STATE_CHECK(UUT_->v8_(in, kInputStride, out, kOutputStride,
- NULL, 0, kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep, Width(), Height()));
-
+ // Populate ref and out with some random data
+ ::libvpx_test::ACMRandom prng;
for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjust = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjust * kInputPixelStep)
- >> SUBPEL_BITS);
- ASSERT_EQ(in[ref_y * kInputStride], out[y * kInputStride]) << "y == " << y;
- }
-
- /* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- kChangeFilters[kInitialSubPelOffset],
- kInputPixelStep,
- Width(), Height()));
-
- for (int y = 0; y < Height(); ++y) {
- const int kFilterPeriodAdjustY = (y >> 3) << 3;
- const int ref_y =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustY * kInputPixelStep)
- >> SUBPEL_BITS);
for (int x = 0; x < Width(); ++x) {
- const int kFilterPeriodAdjustX = (x >> 3) << 3;
- const int ref_x =
- kPixelSelected + ((kInitialSubPelOffset
- + kFilterPeriodAdjustX * kInputPixelStep)
- >> SUBPEL_BITS);
+ uint16_t r;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
+ r = prng.Rand8Extremes();
+ } else {
+ r = prng.Rand16() & mask_;
+ }
+#else
+ r = prng.Rand8Extremes();
+#endif
+ assign_val(out, y * kOutputStride + x, r);
+ assign_val(ref, y * kOutputStride + x, r);
+ }
+ }
- ASSERT_EQ(in[ref_y * kInputStride + ref_x], out[y * kOutputStride + x])
- << "x == " << x << ", y == " << y;
+ for (int axis = 0; axis < 2; axis++) {
+ int seed_val = 0;
+ while (seed_val < 256) {
+ for (int y = 0; y < 8; ++y) {
+ for (int x = 0; x < 8; ++x) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * mask_);
+#else
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * 255);
+#endif
+ if (axis) seed_val++;
+ }
+ if (axis)
+ seed_val-= 8;
+ else
+ seed_val++;
+ }
+ if (axis) seed_val += 8;
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const InterpKernel *filters =
+ vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ wrapper_filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+ if (filter_x && filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_x)
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
+ Width(), Height()));
+ else
+ ASM_REGISTER_STATE_CHECK(
+ UUT_->copy_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 0, kInvalidFilter, 0,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(lookup(ref, y * kOutputStride + x),
+ lookup(out, y * kOutputStride + x))
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
}
}
}
@@ -553,22 +931,24 @@
TEST_P(ConvolveTest, CheckScalingFiltering) {
uint8_t* const in = input();
uint8_t* const out = output();
+ const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
SetConstantInput(127);
for (int frac = 0; frac < 16; ++frac) {
for (int step = 1; step <= 32; ++step) {
/* Test the horizontal and vertical filters in combination. */
- REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- vp9_sub_pel_filters_8[frac], step,
- vp9_sub_pel_filters_8[frac], step,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
+ eighttap[frac], step,
+ eighttap[frac], step,
+ Width(), Height()));
CheckGuardBlocks();
for (int y = 0; y < Height(); ++y) {
for (int x = 0; x < Width(); ++x) {
- ASSERT_EQ(in[y * kInputStride + x], out[y * kOutputStride + x])
+ ASSERT_EQ(lookup(in, y * kInputStride + x),
+ lookup(out, y * kOutputStride + x))
<< "x == " << x << ", y == " << y
<< ", frac == " << frac << ", step == " << step;
}
@@ -579,10 +959,590 @@
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE2 && ARCH_X86_64
+void wrap_convolve8_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride, filter_x,
+ filter_x_stride, filter_y, filter_y_stride,
+ w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_sse2_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_sse2_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_sse2_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_sse2(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+#endif // HAVE_SSE2 && ARCH_X86_64
+
+void wrap_convolve_copy_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_horiz_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_vert_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve8_avg_c_8(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 8);
+}
+
+void wrap_convolve_copy_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_horiz_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_vert_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve8_avg_c_10(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 10);
+}
+
+void wrap_convolve_copy_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_copy_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_horiz_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_vert_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
+void wrap_convolve8_avg_c_12(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w, int h) {
+ vpx_highbd_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, filter_x_stride,
+ filter_y, filter_y_stride, w, h, 12);
+}
+
const ConvolveFunctions convolve8_c(
- vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_c,
- vp9_convolve8_vert_c, vp9_convolve8_avg_vert_c,
- vp9_convolve8_c, vp9_convolve8_avg_c);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
+ wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
+ wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
+INSTANTIATE_TEST_CASE_P(C_8, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_c),
+ make_tuple(8, 4, &convolve8_c),
+ make_tuple(4, 8, &convolve8_c),
+ make_tuple(8, 8, &convolve8_c),
+ make_tuple(16, 8, &convolve8_c),
+ make_tuple(8, 16, &convolve8_c),
+ make_tuple(16, 16, &convolve8_c),
+ make_tuple(32, 16, &convolve8_c),
+ make_tuple(16, 32, &convolve8_c),
+ make_tuple(32, 32, &convolve8_c),
+ make_tuple(64, 32, &convolve8_c),
+ make_tuple(32, 64, &convolve8_c),
+ make_tuple(64, 64, &convolve8_c)));
+const ConvolveFunctions convolve10_c(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
+ wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
+ wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10);
+INSTANTIATE_TEST_CASE_P(C_10, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve10_c),
+ make_tuple(8, 4, &convolve10_c),
+ make_tuple(4, 8, &convolve10_c),
+ make_tuple(8, 8, &convolve10_c),
+ make_tuple(16, 8, &convolve10_c),
+ make_tuple(8, 16, &convolve10_c),
+ make_tuple(16, 16, &convolve10_c),
+ make_tuple(32, 16, &convolve10_c),
+ make_tuple(16, 32, &convolve10_c),
+ make_tuple(32, 32, &convolve10_c),
+ make_tuple(64, 32, &convolve10_c),
+ make_tuple(32, 64, &convolve10_c),
+ make_tuple(64, 64, &convolve10_c)));
+const ConvolveFunctions convolve12_c(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
+ wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
+ wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12);
+INSTANTIATE_TEST_CASE_P(C_12, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve12_c),
+ make_tuple(8, 4, &convolve12_c),
+ make_tuple(4, 8, &convolve12_c),
+ make_tuple(8, 8, &convolve12_c),
+ make_tuple(16, 8, &convolve12_c),
+ make_tuple(8, 16, &convolve12_c),
+ make_tuple(16, 16, &convolve12_c),
+ make_tuple(32, 16, &convolve12_c),
+ make_tuple(16, 32, &convolve12_c),
+ make_tuple(32, 32, &convolve12_c),
+ make_tuple(64, 32, &convolve12_c),
+ make_tuple(32, 64, &convolve12_c),
+ make_tuple(64, 64, &convolve12_c)));
+
+#else
+
+const ConvolveFunctions convolve8_c(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c,
+ vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+ vpx_convolve8_c, vpx_convolve8_avg_c,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_c),
@@ -598,12 +1558,87 @@
make_tuple(64, 32, &convolve8_c),
make_tuple(32, 64, &convolve8_c),
make_tuple(64, 64, &convolve8_c)));
+#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && ARCH_X86_64
+#if CONFIG_VP9_HIGHBITDEPTH
const ConvolveFunctions convolve8_sse2(
- vp9_convolve8_horiz_sse2, vp9_convolve8_avg_horiz_sse2,
- vp9_convolve8_vert_sse2, vp9_convolve8_avg_vert_sse2,
- vp9_convolve8_sse2, vp9_convolve8_avg_sse2);
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8,
+ wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
+ wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
+ wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8);
+const ConvolveFunctions convolve10_sse2(
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10,
+ wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
+ wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
+ wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10);
+const ConvolveFunctions convolve12_sse2(
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12,
+ wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
+ wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
+ wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
+INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_sse2),
+ make_tuple(8, 4, &convolve8_sse2),
+ make_tuple(4, 8, &convolve8_sse2),
+ make_tuple(8, 8, &convolve8_sse2),
+ make_tuple(16, 8, &convolve8_sse2),
+ make_tuple(8, 16, &convolve8_sse2),
+ make_tuple(16, 16, &convolve8_sse2),
+ make_tuple(32, 16, &convolve8_sse2),
+ make_tuple(16, 32, &convolve8_sse2),
+ make_tuple(32, 32, &convolve8_sse2),
+ make_tuple(64, 32, &convolve8_sse2),
+ make_tuple(32, 64, &convolve8_sse2),
+ make_tuple(64, 64, &convolve8_sse2),
+ make_tuple(4, 4, &convolve10_sse2),
+ make_tuple(8, 4, &convolve10_sse2),
+ make_tuple(4, 8, &convolve10_sse2),
+ make_tuple(8, 8, &convolve10_sse2),
+ make_tuple(16, 8, &convolve10_sse2),
+ make_tuple(8, 16, &convolve10_sse2),
+ make_tuple(16, 16, &convolve10_sse2),
+ make_tuple(32, 16, &convolve10_sse2),
+ make_tuple(16, 32, &convolve10_sse2),
+ make_tuple(32, 32, &convolve10_sse2),
+ make_tuple(64, 32, &convolve10_sse2),
+ make_tuple(32, 64, &convolve10_sse2),
+ make_tuple(64, 64, &convolve10_sse2),
+ make_tuple(4, 4, &convolve12_sse2),
+ make_tuple(8, 4, &convolve12_sse2),
+ make_tuple(4, 8, &convolve12_sse2),
+ make_tuple(8, 8, &convolve12_sse2),
+ make_tuple(16, 8, &convolve12_sse2),
+ make_tuple(8, 16, &convolve12_sse2),
+ make_tuple(16, 16, &convolve12_sse2),
+ make_tuple(32, 16, &convolve12_sse2),
+ make_tuple(16, 32, &convolve12_sse2),
+ make_tuple(32, 32, &convolve12_sse2),
+ make_tuple(64, 32, &convolve12_sse2),
+ make_tuple(32, 64, &convolve12_sse2),
+ make_tuple(64, 64, &convolve12_sse2)));
+#else
+const ConvolveFunctions convolve8_sse2(
+#if CONFIG_USE_X86INC
+ vpx_convolve_copy_sse2, vpx_convolve_avg_sse2,
+#else
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+#endif // CONFIG_USE_X86INC
+ vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2,
+ vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2,
+ vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_sse2),
@@ -619,13 +1654,18 @@
make_tuple(64, 32, &convolve8_sse2),
make_tuple(32, 64, &convolve8_sse2),
make_tuple(64, 64, &convolve8_sse2)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
#if HAVE_SSSE3
const ConvolveFunctions convolve8_ssse3(
- vp9_convolve8_horiz_ssse3, vp9_convolve8_avg_horiz_ssse3,
- vp9_convolve8_vert_ssse3, vp9_convolve8_avg_vert_ssse3,
- vp9_convolve8_ssse3, vp9_convolve8_avg_ssse3);
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_ssse3),
@@ -643,11 +1683,52 @@
make_tuple(64, 64, &convolve8_ssse3)));
#endif
+#if HAVE_AVX2 && HAVE_SSSE3
+const ConvolveFunctions convolve8_avx2(
+ vpx_convolve_copy_c, vpx_convolve_avg_c,
+ vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3,
+ vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3,
+ vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_avx2),
+ make_tuple(8, 4, &convolve8_avx2),
+ make_tuple(4, 8, &convolve8_avx2),
+ make_tuple(8, 8, &convolve8_avx2),
+ make_tuple(8, 16, &convolve8_avx2),
+ make_tuple(16, 8, &convolve8_avx2),
+ make_tuple(16, 16, &convolve8_avx2),
+ make_tuple(32, 16, &convolve8_avx2),
+ make_tuple(16, 32, &convolve8_avx2),
+ make_tuple(32, 32, &convolve8_avx2),
+ make_tuple(64, 32, &convolve8_avx2),
+ make_tuple(32, 64, &convolve8_avx2),
+ make_tuple(64, 64, &convolve8_avx2)));
+#endif // HAVE_AVX2 && HAVE_SSSE3
+
#if HAVE_NEON
+#if HAVE_NEON_ASM
const ConvolveFunctions convolve8_neon(
- vp9_convolve8_horiz_neon, vp9_convolve8_avg_horiz_neon,
- vp9_convolve8_vert_neon, vp9_convolve8_avg_vert_neon,
- vp9_convolve8_neon, vp9_convolve8_avg_neon);
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#else // HAVE_NEON
+const ConvolveFunctions convolve8_neon(
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon,
+ vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
+ vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
+ vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#endif // HAVE_NEON_ASM
INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_neon),
@@ -663,13 +1744,17 @@
make_tuple(64, 32, &convolve8_neon),
make_tuple(32, 64, &convolve8_neon),
make_tuple(64, 64, &convolve8_neon)));
-#endif
+#endif // HAVE_NEON
#if HAVE_DSPR2
const ConvolveFunctions convolve8_dspr2(
- vp9_convolve8_horiz_dspr2, vp9_convolve8_avg_horiz_dspr2,
- vp9_convolve8_vert_dspr2, vp9_convolve8_avg_vert_dspr2,
- vp9_convolve8_dspr2, vp9_convolve8_avg_dspr2);
+ vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2,
+ vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2,
+ vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2,
+ vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values(
make_tuple(4, 4, &convolve8_dspr2),
@@ -686,4 +1771,30 @@
make_tuple(32, 64, &convolve8_dspr2),
make_tuple(64, 64, &convolve8_dspr2)));
#endif
+
+#if HAVE_MSA
+const ConvolveFunctions convolve8_msa(
+ vpx_convolve_copy_msa, vpx_convolve_avg_msa,
+ vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa,
+ vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa,
+ vpx_convolve8_msa, vpx_convolve8_avg_msa,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
+ vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+
+INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_msa),
+ make_tuple(8, 4, &convolve8_msa),
+ make_tuple(4, 8, &convolve8_msa),
+ make_tuple(8, 8, &convolve8_msa),
+ make_tuple(16, 8, &convolve8_msa),
+ make_tuple(8, 16, &convolve8_msa),
+ make_tuple(16, 16, &convolve8_msa),
+ make_tuple(32, 16, &convolve8_msa),
+ make_tuple(16, 32, &convolve8_msa),
+ make_tuple(32, 32, &convolve8_msa),
+ make_tuple(64, 32, &convolve8_msa),
+ make_tuple(32, 64, &convolve8_msa),
+ make_tuple(64, 64, &convolve8_msa)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
| null |
uint8_t *output() const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
} else {
return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize +
BorderLeft());
}
#else
#endif
}
uint8_t *output_ref() const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
} else {
return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize +
BorderLeft());
}
#else
return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
#endif
}
uint16_t lookup(uint8_t *list, int index) const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
return list[index];
} else {
return CONVERT_TO_SHORTPTR(list)[index];
}
#else
return list[index];
#endif
}
void assign_val(uint8_t *list, int index, uint16_t val) const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
list[index] = (uint8_t) val;
} else {
CONVERT_TO_SHORTPTR(list)[index] = val;
}
#else
list[index] = (uint8_t) val;
#endif
}
void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
const unsigned int src_stride,
const int16_t *HFilter,
const int16_t *VFilter,
uint8_t *dst_ptr,
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
dst_ptr, dst_stride, output_width,
output_height);
} else {
highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
src_stride, HFilter, VFilter,
CONVERT_TO_SHORTPTR(dst_ptr),
dst_stride, output_width, output_height,
UUT_->use_highbd_);
}
#else
filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
dst_ptr, dst_stride, output_width,
output_height);
#endif
}
void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
const unsigned int src_stride,
const int16_t *HFilter,
const int16_t *VFilter,
uint8_t *dst_ptr,
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
dst_ptr, dst_stride, output_width, output_height);
} else {
highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
HFilter, VFilter,
CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
output_width, output_height, UUT_->use_highbd_);
}
#else
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
dst_ptr, dst_stride, output_width, output_height);
#endif
|
150,831 |
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
if (pkt->data.psnr.psnr[0] < min_psnr_)
min_psnr_ = pkt->data.psnr.psnr[0];
}
|
@@ -7,45 +7,65 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <climits>
-#include <vector>
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
#include "test/util.h"
+#include "test/y4m_video_source.h"
namespace {
-class CpuSpeedTest : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWith2Params<
- libvpx_test::TestMode, int> {
+const int kMaxPSNR = 100;
+
+class CpuSpeedTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
protected:
- CpuSpeedTest() : EncoderTest(GET_PARAM(0)) {}
+ CpuSpeedTest()
+ : EncoderTest(GET_PARAM(0)),
+ encoding_mode_(GET_PARAM(1)),
+ set_cpu_used_(GET_PARAM(2)),
+ min_psnr_(kMaxPSNR) {}
virtual ~CpuSpeedTest() {}
virtual void SetUp() {
InitializeConfig();
- SetMode(GET_PARAM(1));
- set_cpu_used_ = GET_PARAM(2);
+ SetMode(encoding_mode_);
+ if (encoding_mode_ != ::libvpx_test::kRealTime) {
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_end_usage = VPX_VBR;
+ } else {
+ cfg_.g_lag_in_frames = 0;
+ cfg_.rc_end_usage = VPX_CBR;
+ }
+ }
+
+ virtual void BeginPassHook(unsigned int /*pass*/) {
+ min_psnr_ = kMaxPSNR;
}
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
- encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
- encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
- encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
- encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+ if (encoding_mode_ != ::libvpx_test::kRealTime) {
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
+ encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
+ encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
+ encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+ }
}
}
- virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- }
+ virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (pkt->data.psnr.psnr[0] < min_psnr_)
+ min_psnr_ = pkt->data.psnr.psnr[0];
}
+
+ ::libvpx_test::TestMode encoding_mode_;
int set_cpu_used_;
+ double min_psnr_;
};
TEST_P(CpuSpeedTest, TestQ0) {
@@ -53,7 +73,6 @@
// without a mismatch when passing in a very low max q. This pushes
// the encoder to producing lots of big partitions which will likely
// extend into the border and test the border condition.
- cfg_.g_lag_in_frames = 25;
cfg_.rc_2pass_vbr_minsection_pct = 5;
cfg_.rc_2pass_vbr_minsection_pct = 2000;
cfg_.rc_target_bitrate = 400;
@@ -63,16 +82,32 @@
::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
20);
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ EXPECT_GE(min_psnr_, kMaxPSNR);
}
+TEST_P(CpuSpeedTest, TestScreencastQ0) {
+ ::libvpx_test::Y4mVideoSource video("screendata.y4m", 0, 25);
+ cfg_.g_timebase = video.timebase();
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_target_bitrate = 400;
+ cfg_.rc_max_quantizer = 0;
+ cfg_.rc_min_quantizer = 0;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ EXPECT_GE(min_psnr_, kMaxPSNR);
+}
TEST_P(CpuSpeedTest, TestEncodeHighBitrate) {
// Validate that this non multiple of 64 wide clip encodes and decodes
// without a mismatch when passing in a very low max q. This pushes
// the encoder to producing lots of big partitions which will likely
// extend into the border and test the border condition.
- cfg_.g_lag_in_frames = 25;
cfg_.rc_2pass_vbr_minsection_pct = 5;
cfg_.rc_2pass_vbr_minsection_pct = 2000;
cfg_.rc_target_bitrate = 12000;
@@ -84,12 +119,11 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
+
TEST_P(CpuSpeedTest, TestLowBitrate) {
// Validate that this clip encodes and decodes without a mismatch
// when passing in a very high min q. This pushes the encoder to producing
// lots of small partitions which might will test the other condition.
-
- cfg_.g_lag_in_frames = 25;
cfg_.rc_2pass_vbr_minsection_pct = 5;
cfg_.rc_2pass_vbr_minsection_pct = 2000;
cfg_.rc_target_bitrate = 200;
@@ -101,13 +135,14 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-using std::tr1::make_tuple;
-
-#define VP9_FACTORY \
- static_cast<const libvpx_test::CodecFactory*> (&libvpx_test::kVP9)
-
VP9_INSTANTIATE_TEST_CASE(
CpuSpeedTest,
+ ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood,
+ ::libvpx_test::kRealTime),
+ ::testing::Range(0, 9));
+
+VP10_INSTANTIATE_TEST_CASE(
+ CpuSpeedTest,
::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood),
- ::testing::Range(0, 8));
+ ::testing::Range(0, 3));
} // namespace
|
CWE-119
|
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
}
|
virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
if (pkt->data.psnr.psnr[0] < min_psnr_)
min_psnr_ = pkt->data.psnr.psnr[0];
|
150,832 |
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
encoder->Control(VP8E_SET_ARNR_TYPE, 3);
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
if (encoding_mode_ != ::libvpx_test::kRealTime) {
encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
encoder->Control(VP8E_SET_ARNR_TYPE, 3);
}
}
}
|
@@ -7,45 +7,65 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <climits>
-#include <vector>
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
#include "test/util.h"
+#include "test/y4m_video_source.h"
namespace {
-class CpuSpeedTest : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWith2Params<
- libvpx_test::TestMode, int> {
+const int kMaxPSNR = 100;
+
+class CpuSpeedTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
protected:
- CpuSpeedTest() : EncoderTest(GET_PARAM(0)) {}
+ CpuSpeedTest()
+ : EncoderTest(GET_PARAM(0)),
+ encoding_mode_(GET_PARAM(1)),
+ set_cpu_used_(GET_PARAM(2)),
+ min_psnr_(kMaxPSNR) {}
virtual ~CpuSpeedTest() {}
virtual void SetUp() {
InitializeConfig();
- SetMode(GET_PARAM(1));
- set_cpu_used_ = GET_PARAM(2);
+ SetMode(encoding_mode_);
+ if (encoding_mode_ != ::libvpx_test::kRealTime) {
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_end_usage = VPX_VBR;
+ } else {
+ cfg_.g_lag_in_frames = 0;
+ cfg_.rc_end_usage = VPX_CBR;
+ }
+ }
+
+ virtual void BeginPassHook(unsigned int /*pass*/) {
+ min_psnr_ = kMaxPSNR;
}
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
- encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
- encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
- encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
- encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+ if (encoding_mode_ != ::libvpx_test::kRealTime) {
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
+ encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
+ encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
+ encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+ }
}
}
- virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- }
+ virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (pkt->data.psnr.psnr[0] < min_psnr_)
+ min_psnr_ = pkt->data.psnr.psnr[0];
}
+
+ ::libvpx_test::TestMode encoding_mode_;
int set_cpu_used_;
+ double min_psnr_;
};
TEST_P(CpuSpeedTest, TestQ0) {
@@ -53,7 +73,6 @@
// without a mismatch when passing in a very low max q. This pushes
// the encoder to producing lots of big partitions which will likely
// extend into the border and test the border condition.
- cfg_.g_lag_in_frames = 25;
cfg_.rc_2pass_vbr_minsection_pct = 5;
cfg_.rc_2pass_vbr_minsection_pct = 2000;
cfg_.rc_target_bitrate = 400;
@@ -63,16 +82,32 @@
::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
20);
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ EXPECT_GE(min_psnr_, kMaxPSNR);
}
+TEST_P(CpuSpeedTest, TestScreencastQ0) {
+ ::libvpx_test::Y4mVideoSource video("screendata.y4m", 0, 25);
+ cfg_.g_timebase = video.timebase();
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_target_bitrate = 400;
+ cfg_.rc_max_quantizer = 0;
+ cfg_.rc_min_quantizer = 0;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ EXPECT_GE(min_psnr_, kMaxPSNR);
+}
TEST_P(CpuSpeedTest, TestEncodeHighBitrate) {
// Validate that this non multiple of 64 wide clip encodes and decodes
// without a mismatch when passing in a very low max q. This pushes
// the encoder to producing lots of big partitions which will likely
// extend into the border and test the border condition.
- cfg_.g_lag_in_frames = 25;
cfg_.rc_2pass_vbr_minsection_pct = 5;
cfg_.rc_2pass_vbr_minsection_pct = 2000;
cfg_.rc_target_bitrate = 12000;
@@ -84,12 +119,11 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
+
TEST_P(CpuSpeedTest, TestLowBitrate) {
// Validate that this clip encodes and decodes without a mismatch
// when passing in a very high min q. This pushes the encoder to producing
// lots of small partitions which might will test the other condition.
-
- cfg_.g_lag_in_frames = 25;
cfg_.rc_2pass_vbr_minsection_pct = 5;
cfg_.rc_2pass_vbr_minsection_pct = 2000;
cfg_.rc_target_bitrate = 200;
@@ -101,13 +135,14 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-using std::tr1::make_tuple;
-
-#define VP9_FACTORY \
- static_cast<const libvpx_test::CodecFactory*> (&libvpx_test::kVP9)
-
VP9_INSTANTIATE_TEST_CASE(
CpuSpeedTest,
+ ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood,
+ ::libvpx_test::kRealTime),
+ ::testing::Range(0, 9));
+
+VP10_INSTANTIATE_TEST_CASE(
+ CpuSpeedTest,
::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood),
- ::testing::Range(0, 8));
+ ::testing::Range(0, 3));
} // namespace
|
CWE-119
|
encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
encoder->Control(VP8E_SET_ARNR_TYPE, 3);
|
if (encoding_mode_ != ::libvpx_test::kRealTime) {
encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
encoder->Control(VP8E_SET_ARNR_TYPE, 3);
}
|
150,833 |
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
}
if (cfg_.ts_number_layers > 1) {
if (video->frame() == 1) {
encoder->Control(VP9E_SET_SVC, 1);
}
vpx_svc_layer_id_t layer_id = {0, 0};
layer_id.spatial_layer_id = 0;
frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
layer_id.temporal_layer_id = SetLayerId(video->frame(),
cfg_.ts_number_layers);
if (video->frame() > 0) {
encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
}
}
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
duration_ = 0;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 0)
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
if (denoiser_offon_test_) {
ASSERT_GT(denoiser_offon_period_, 0)
<< "denoiser_offon_period_ is not positive.";
if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
// Flip denoiser_on_ periodically
denoiser_on_ ^= 1;
}
}
encoder->Control(VP9E_SET_NOISE_SENSITIVITY, denoiser_on_);
if (cfg_.ts_number_layers > 1) {
if (video->frame() == 0) {
encoder->Control(VP9E_SET_SVC, 1);
}
vpx_svc_layer_id_t layer_id;
layer_id.spatial_layer_id = 0;
frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
layer_id.temporal_layer_id = SetLayerId(video->frame(),
cfg_.ts_number_layers);
encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
}
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
duration_ = 0;
}
|
@@ -14,6 +14,7 @@
#include "test/i420_video_source.h"
#include "test/util.h"
#include "test/y4m_video_source.h"
+#include "vpx/vpx_codec.h"
namespace {
@@ -38,10 +39,25 @@
first_drop_ = 0;
bits_total_ = 0;
duration_ = 0.0;
+ denoiser_offon_test_ = 0;
+ denoiser_offon_period_ = -1;
}
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 0)
+ encoder->Control(VP8E_SET_NOISE_SENSITIVITY, denoiser_on_);
+
+ if (denoiser_offon_test_) {
+ ASSERT_GT(denoiser_offon_period_, 0)
+ << "denoiser_offon_period_ is not positive.";
+ if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
+ // Flip denoiser_on_ periodically
+ denoiser_on_ ^= 1;
+ }
+ encoder->Control(VP8E_SET_NOISE_SENSITIVITY, denoiser_on_);
+ }
+
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
duration_ = 0;
@@ -120,9 +136,67 @@
double file_datarate_;
double effective_datarate_;
size_t bits_in_last_frame_;
+ int denoiser_on_;
+ int denoiser_offon_test_;
+ int denoiser_offon_period_;
};
+#if CONFIG_TEMPORAL_DENOISING
+// Check basic datarate targeting, for a single bitrate, but loop over the
+// various denoiser settings.
+TEST_P(DatarateTestLarge, DenoiserLevels) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 140);
+ for (int j = 1; j < 5; ++j) {
+ // Run over the denoiser levels.
+ // For the temporal denoiser (#if CONFIG_TEMPORAL_DENOISING) the level j
+ // refers to the 4 denoiser modes: denoiserYonly, denoiserOnYUV,
+ // denoiserOnAggressive, and denoiserOnAdaptive.
+ // For the spatial denoiser (if !CONFIG_TEMPORAL_DENOISING), the level j
+ // refers to the blur thresholds: 20, 40, 60 80.
+ // The j = 0 case (denoiser off) is covered in the tests below.
+ denoiser_on_ = j;
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
+ << " The datarate for the file exceeds the target!";
+
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
+ << " The datarate for the file missed the target!";
+ }
+}
+
+// Check basic datarate targeting, for a single bitrate, when denoiser is off
+// and on.
+TEST_P(DatarateTestLarge, DenoiserOffOn) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 299);
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // The denoiser is off by default.
+ denoiser_on_ = 0;
+ // Set the offon test flag.
+ denoiser_offon_test_ = 1;
+ denoiser_offon_period_ = 100;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
+ << " The datarate for the file exceeds the target!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
+ << " The datarate for the file missed the target!";
+}
+#endif // CONFIG_TEMPORAL_DENOISING
+
TEST_P(DatarateTestLarge, BasicBufferModel) {
+ denoiser_on_ = 0;
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_dropframe_thresh = 1;
cfg_.rc_max_quantizer = 56;
@@ -145,7 +219,7 @@
cfg_.rc_target_bitrate = i;
ResetModel();
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
- ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_)
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
<< " The datarate for the file exceeds the target!";
ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
@@ -154,6 +228,7 @@
}
TEST_P(DatarateTestLarge, ChangingDropFrameThresh) {
+ denoiser_on_ = 0;
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_max_quantizer = 36;
cfg_.rc_end_usage = VPX_CBR;
@@ -203,10 +278,14 @@
tot_frame_number_ = 0;
first_drop_ = 0;
num_drops_ = 0;
+ // Denoiser is off by default.
+ denoiser_on_ = 0;
// For testing up to 3 layers.
for (int i = 0; i < 3; ++i) {
bits_total_[i] = 0;
}
+ denoiser_offon_test_ = 0;
+ denoiser_offon_period_ = -1;
}
//
@@ -274,21 +353,30 @@
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
- if (video->frame() == 1) {
+ if (video->frame() == 0)
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+
+ if (denoiser_offon_test_) {
+ ASSERT_GT(denoiser_offon_period_, 0)
+ << "denoiser_offon_period_ is not positive.";
+ if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
+ // Flip denoiser_on_ periodically
+ denoiser_on_ ^= 1;
+ }
}
+
+ encoder->Control(VP9E_SET_NOISE_SENSITIVITY, denoiser_on_);
+
if (cfg_.ts_number_layers > 1) {
- if (video->frame() == 1) {
+ if (video->frame() == 0) {
encoder->Control(VP9E_SET_SVC, 1);
}
- vpx_svc_layer_id_t layer_id = {0, 0};
+ vpx_svc_layer_id_t layer_id;
layer_id.spatial_layer_id = 0;
frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
layer_id.temporal_layer_id = SetLayerId(video->frame(),
cfg_.ts_number_layers);
- if (video->frame() > 0) {
- encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
- }
+ encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
}
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
@@ -357,6 +445,9 @@
int64_t bits_in_buffer_model_;
vpx_codec_pts_t first_drop_;
int num_drops_;
+ int denoiser_on_;
+ int denoiser_offon_test_;
+ int denoiser_offon_period_;
};
// Check basic rate targeting,
@@ -447,7 +538,7 @@
<< " The first dropped frame for drop_thresh " << i
<< " > first dropped frame for drop_thresh "
<< i - kDropFrameThreshTestStep;
- ASSERT_GE(num_drops_, last_num_drops)
+ ASSERT_GE(num_drops_, last_num_drops * 0.90)
<< " The number of dropped frames for drop_thresh " << i
<< " < number of dropped frames for drop_thresh "
<< i - kDropFrameThreshTestStep;
@@ -473,20 +564,25 @@
cfg_.ts_rate_decimator[0] = 2;
cfg_.ts_rate_decimator[1] = 1;
+ cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+
+ if (deadline_ == VPX_DL_REALTIME)
+ cfg_.g_error_resilient = 1;
+
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
for (int i = 200; i <= 800; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
// 60-40 bitrate allocation for 2 temporal layers.
- cfg_.ts_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[1] = cfg_.rc_target_bitrate;
+ cfg_.layer_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[1] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.85)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.15)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
}
@@ -511,21 +607,27 @@
cfg_.ts_rate_decimator[1] = 2;
cfg_.ts_rate_decimator[2] = 1;
+ cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
for (int i = 200; i <= 800; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
// 40-20-40 bitrate allocation for 3 temporal layers.
- cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
+ cfg_.layer_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[2] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ // TODO(yaowu): Work out more stable rc control strategy and
+ // Adjust the thresholds to be tighter than .75.
+ ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.75)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ // TODO(yaowu): Work out more stable rc control strategy and
+ // Adjust the thresholds to be tighter than 1.25.
+ ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.25)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
}
@@ -553,32 +655,324 @@
cfg_.ts_rate_decimator[1] = 2;
cfg_.ts_rate_decimator[2] = 1;
+ cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
cfg_.rc_target_bitrate = 200;
ResetModel();
// 40-20-40 bitrate allocation for 3 temporal layers.
- cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
+ cfg_.layer_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[2] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.85)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.15)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
// Expect some frame drops in this test: for this 200 frames test,
// expect at least 10% and not more than 60% drops.
ASSERT_GE(num_drops_, 20);
- ASSERT_LE(num_drops_, 120);
+ ASSERT_LE(num_drops_, 130);
}
}
+#if CONFIG_VP9_TEMPORAL_DENOISING
+// Check basic datarate targeting, for a single bitrate, when denoiser is on.
+TEST_P(DatarateTestVP9Large, DenoiserLevels) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 140);
+
+ // For the temporal denoiser (#if CONFIG_VP9_TEMPORAL_DENOISING),
+ // there is only one denoiser mode: denoiserYonly(which is 1),
+ // but may add more modes in the future.
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // Turn on the denoiser.
+ denoiser_on_ = 1;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
+ << " The datarate for the file is lower than target by too much!";
+ ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
+ << " The datarate for the file is greater than target by too much!";
+}
+
+// Check basic datarate targeting, for a single bitrate, when denoiser is off
+// and on.
+TEST_P(DatarateTestVP9Large, DenoiserOffOn) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 299);
+
+ // For the temporal denoiser (#if CONFIG_VP9_TEMPORAL_DENOISING),
+ // there is only one denoiser mode: denoiserYonly(which is 1),
+ // but may add more modes in the future.
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // The denoiser is off by default.
+ denoiser_on_ = 0;
+ // Set the offon test flag.
+ denoiser_offon_test_ = 1;
+ denoiser_offon_period_ = 100;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
+ << " The datarate for the file is lower than target by too much!";
+ ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
+ << " The datarate for the file is greater than target by too much!";
+}
+#endif // CONFIG_VP9_TEMPORAL_DENOISING
+
+class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
+ public:
+ DatarateOnePassCbrSvc() : EncoderTest(GET_PARAM(0)) {}
+ virtual ~DatarateOnePassCbrSvc() {}
+ protected:
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(GET_PARAM(1));
+ speed_setting_ = GET_PARAM(2);
+ ResetModel();
+ }
+ virtual void ResetModel() {
+ last_pts_ = 0;
+ bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz;
+ frame_number_ = 0;
+ first_drop_ = 0;
+ bits_total_ = 0;
+ duration_ = 0.0;
+ mismatch_psnr_ = 0.0;
+ mismatch_nframes_ = 0;
+ }
+ virtual void BeginPassHook(unsigned int /*pass*/) {
+ }
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 0) {
+ int i;
+ for (i = 0; i < 2; ++i) {
+ svc_params_.max_quantizers[i] = 63;
+ svc_params_.min_quantizers[i] = 0;
+ }
+ svc_params_.scaling_factor_num[0] = 144;
+ svc_params_.scaling_factor_den[0] = 288;
+ svc_params_.scaling_factor_num[1] = 288;
+ svc_params_.scaling_factor_den[1] = 288;
+ encoder->Control(VP9E_SET_SVC, 1);
+ encoder->Control(VP9E_SET_SVC_PARAMETERS, &svc_params_);
+ encoder->Control(VP8E_SET_CPUUSED, speed_setting_);
+ encoder->Control(VP9E_SET_TILE_COLUMNS, 0);
+ encoder->Control(VP8E_SET_MAX_INTRA_BITRATE_PCT, 300);
+ encoder->Control(VP9E_SET_TILE_COLUMNS, (cfg_.g_threads >> 1));
+ }
+ const vpx_rational_t tb = video->timebase();
+ timebase_ = static_cast<double>(tb.num) / tb.den;
+ duration_ = 0;
+ }
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
+ if (last_pts_ == 0)
+ duration = 1;
+ bits_in_buffer_model_ += static_cast<int64_t>(
+ duration * timebase_ * cfg_.rc_target_bitrate * 1000);
+ const bool key_frame = (pkt->data.frame.flags & VPX_FRAME_IS_KEY)
+ ? true: false;
+ if (!key_frame) {
+ ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame "
+ << pkt->data.frame.pts;
+ }
+ const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
+ bits_in_buffer_model_ -= frame_size_in_bits;
+ bits_total_ += frame_size_in_bits;
+ if (!first_drop_ && duration > 1)
+ first_drop_ = last_pts_ + 1;
+ last_pts_ = pkt->data.frame.pts;
+ bits_in_last_frame_ = frame_size_in_bits;
+ ++frame_number_;
+ }
+ virtual void EndPassHook(void) {
+ if (bits_total_) {
+ const double file_size_in_kb = bits_total_ / 1000.; // bits per kilobit
+ duration_ = (last_pts_ + 1) * timebase_;
+ effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0
+ / (cfg_.rc_buf_initial_sz / 1000.0 + duration_);
+ file_datarate_ = file_size_in_kb / duration_;
+ }
+ }
+
+ virtual void MismatchHook(const vpx_image_t *img1,
+ const vpx_image_t *img2) {
+ double mismatch_psnr = compute_psnr(img1, img2);
+ mismatch_psnr_ += mismatch_psnr;
+ ++mismatch_nframes_;
+ }
+
+ unsigned int GetMismatchFrames() {
+ return mismatch_nframes_;
+ }
+
+ vpx_codec_pts_t last_pts_;
+ int64_t bits_in_buffer_model_;
+ double timebase_;
+ int frame_number_;
+ vpx_codec_pts_t first_drop_;
+ int64_t bits_total_;
+ double duration_;
+ double file_datarate_;
+ double effective_datarate_;
+ size_t bits_in_last_frame_;
+ vpx_svc_extra_cfg_t svc_params_;
+ int speed_setting_;
+ double mismatch_psnr_;
+ int mismatch_nframes_;
+};
+static void assign_layer_bitrates(vpx_codec_enc_cfg_t *const enc_cfg,
+ const vpx_svc_extra_cfg_t *svc_params,
+ int spatial_layers,
+ int temporal_layers,
+ int temporal_layering_mode,
+ unsigned int total_rate) {
+ int sl, spatial_layer_target;
+ float total = 0;
+ float alloc_ratio[VPX_MAX_LAYERS] = {0};
+ for (sl = 0; sl < spatial_layers; ++sl) {
+ if (svc_params->scaling_factor_den[sl] > 0) {
+ alloc_ratio[sl] = (float)(svc_params->scaling_factor_num[sl] *
+ 1.0 / svc_params->scaling_factor_den[sl]);
+ total += alloc_ratio[sl];
+ }
+ }
+ for (sl = 0; sl < spatial_layers; ++sl) {
+ enc_cfg->ss_target_bitrate[sl] = spatial_layer_target =
+ (unsigned int)(enc_cfg->rc_target_bitrate *
+ alloc_ratio[sl] / total);
+ const int index = sl * temporal_layers;
+ if (temporal_layering_mode == 3) {
+ enc_cfg->layer_target_bitrate[index] =
+ spatial_layer_target >> 1;
+ enc_cfg->layer_target_bitrate[index + 1] =
+ (spatial_layer_target >> 1) + (spatial_layer_target >> 2);
+ enc_cfg->layer_target_bitrate[index + 2] =
+ spatial_layer_target;
+ } else if (temporal_layering_mode == 2) {
+ enc_cfg->layer_target_bitrate[index] =
+ spatial_layer_target * 2 / 3;
+ enc_cfg->layer_target_bitrate[index + 1] =
+ spatial_layer_target;
+ }
+ }
+}
+
+// Check basic rate targeting for 1 pass CBR SVC: 2 spatial layers and
+// 3 temporal layers. Run CIF clip with 1 thread.
+TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_min_quantizer = 0;
+ cfg_.rc_max_quantizer = 63;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.ss_number_layers = 2;
+ cfg_.ts_number_layers = 3;
+ cfg_.ts_rate_decimator[0] = 4;
+ cfg_.ts_rate_decimator[1] = 2;
+ cfg_.ts_rate_decimator[2] = 1;
+ cfg_.g_error_resilient = 1;
+ cfg_.g_threads = 1;
+ cfg_.temporal_layering_mode = 3;
+ svc_params_.scaling_factor_num[0] = 144;
+ svc_params_.scaling_factor_den[0] = 288;
+ svc_params_.scaling_factor_num[1] = 288;
+ svc_params_.scaling_factor_den[1] = 288;
+ // TODO(wonkap/marpan): No frame drop for now, we need to implement correct
+ // frame dropping for SVC.
+ cfg_.rc_dropframe_thresh = 0;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 200);
+ // TODO(wonkap/marpan): Check that effective_datarate for each layer hits the
+ // layer target_bitrate. Also check if test can pass at lower bitrate (~200k).
+ for (int i = 400; i <= 800; i += 200) {
+ cfg_.rc_target_bitrate = i;
+ ResetModel();
+ assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers,
+ cfg_.ts_number_layers, cfg_.temporal_layering_mode,
+ cfg_.rc_target_bitrate);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85)
+ << " The datarate for the file exceeds the target by too much!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15)
+ << " The datarate for the file is lower than the target by too much!";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+ }
+}
+
+// Check basic rate targeting for 1 pass CBR SVC: 2 spatial layers and
+// 3 temporal layers. Run HD clip with 4 threads.
+TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc4threads) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_min_quantizer = 0;
+ cfg_.rc_max_quantizer = 63;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.ss_number_layers = 2;
+ cfg_.ts_number_layers = 3;
+ cfg_.ts_rate_decimator[0] = 4;
+ cfg_.ts_rate_decimator[1] = 2;
+ cfg_.ts_rate_decimator[2] = 1;
+ cfg_.g_error_resilient = 1;
+ cfg_.g_threads = 4;
+ cfg_.temporal_layering_mode = 3;
+ svc_params_.scaling_factor_num[0] = 144;
+ svc_params_.scaling_factor_den[0] = 288;
+ svc_params_.scaling_factor_num[1] = 288;
+ svc_params_.scaling_factor_den[1] = 288;
+ // TODO(wonkap/marpan): No frame drop for now, we need to implement correct
+ // frame dropping for SVC.
+ cfg_.rc_dropframe_thresh = 0;
+ ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720,
+ 30, 1, 0, 300);
+ cfg_.rc_target_bitrate = 800;
+ ResetModel();
+ assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers,
+ cfg_.ts_number_layers, cfg_.temporal_layering_mode,
+ cfg_.rc_target_bitrate);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85)
+ << " The datarate for the file exceeds the target by too much!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15)
+ << " The datarate for the file is lower than the target by too much!";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+}
+
VP8_INSTANTIATE_TEST_CASE(DatarateTestLarge, ALL_TEST_MODES);
VP9_INSTANTIATE_TEST_CASE(DatarateTestVP9Large,
::testing::Values(::libvpx_test::kOnePassGood,
- ::libvpx_test::kRealTime),
+ ::libvpx_test::kRealTime),
::testing::Range(2, 7));
+VP9_INSTANTIATE_TEST_CASE(DatarateOnePassCbrSvc,
+ ::testing::Values(::libvpx_test::kRealTime),
+ ::testing::Range(5, 8));
} // namespace
|
CWE-119
|
if (video->frame() == 1) {
if (video->frame() == 1) {
vpx_svc_layer_id_t layer_id = {0, 0};
if (video->frame() > 0) {
encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
}
|
if (video->frame() == 0)
if (denoiser_offon_test_) {
ASSERT_GT(denoiser_offon_period_, 0)
<< "denoiser_offon_period_ is not positive.";
if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
// Flip denoiser_on_ periodically
denoiser_on_ ^= 1;
}
encoder->Control(VP9E_SET_NOISE_SENSITIVITY, denoiser_on_);
if (video->frame() == 0) {
vpx_svc_layer_id_t layer_id;
encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
|
150,834 |
virtual void ResetModel() {
last_pts_ = 0;
bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz;
frame_number_ = 0;
first_drop_ = 0;
bits_total_ = 0;
duration_ = 0.0;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void ResetModel() {
last_pts_ = 0;
bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz;
frame_number_ = 0;
first_drop_ = 0;
bits_total_ = 0;
duration_ = 0.0;
denoiser_offon_test_ = 0;
denoiser_offon_period_ = -1;
}
|
@@ -14,6 +14,7 @@
#include "test/i420_video_source.h"
#include "test/util.h"
#include "test/y4m_video_source.h"
+#include "vpx/vpx_codec.h"
namespace {
@@ -38,10 +39,25 @@
first_drop_ = 0;
bits_total_ = 0;
duration_ = 0.0;
+ denoiser_offon_test_ = 0;
+ denoiser_offon_period_ = -1;
}
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 0)
+ encoder->Control(VP8E_SET_NOISE_SENSITIVITY, denoiser_on_);
+
+ if (denoiser_offon_test_) {
+ ASSERT_GT(denoiser_offon_period_, 0)
+ << "denoiser_offon_period_ is not positive.";
+ if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
+ // Flip denoiser_on_ periodically
+ denoiser_on_ ^= 1;
+ }
+ encoder->Control(VP8E_SET_NOISE_SENSITIVITY, denoiser_on_);
+ }
+
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
duration_ = 0;
@@ -120,9 +136,67 @@
double file_datarate_;
double effective_datarate_;
size_t bits_in_last_frame_;
+ int denoiser_on_;
+ int denoiser_offon_test_;
+ int denoiser_offon_period_;
};
+#if CONFIG_TEMPORAL_DENOISING
+// Check basic datarate targeting, for a single bitrate, but loop over the
+// various denoiser settings.
+TEST_P(DatarateTestLarge, DenoiserLevels) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 140);
+ for (int j = 1; j < 5; ++j) {
+ // Run over the denoiser levels.
+ // For the temporal denoiser (#if CONFIG_TEMPORAL_DENOISING) the level j
+ // refers to the 4 denoiser modes: denoiserYonly, denoiserOnYUV,
+ // denoiserOnAggressive, and denoiserOnAdaptive.
+ // For the spatial denoiser (if !CONFIG_TEMPORAL_DENOISING), the level j
+ // refers to the blur thresholds: 20, 40, 60 80.
+ // The j = 0 case (denoiser off) is covered in the tests below.
+ denoiser_on_ = j;
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
+ << " The datarate for the file exceeds the target!";
+
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
+ << " The datarate for the file missed the target!";
+ }
+}
+
+// Check basic datarate targeting, for a single bitrate, when denoiser is off
+// and on.
+TEST_P(DatarateTestLarge, DenoiserOffOn) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 299);
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // The denoiser is off by default.
+ denoiser_on_ = 0;
+ // Set the offon test flag.
+ denoiser_offon_test_ = 1;
+ denoiser_offon_period_ = 100;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
+ << " The datarate for the file exceeds the target!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
+ << " The datarate for the file missed the target!";
+}
+#endif // CONFIG_TEMPORAL_DENOISING
+
TEST_P(DatarateTestLarge, BasicBufferModel) {
+ denoiser_on_ = 0;
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_dropframe_thresh = 1;
cfg_.rc_max_quantizer = 56;
@@ -145,7 +219,7 @@
cfg_.rc_target_bitrate = i;
ResetModel();
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
- ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_)
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
<< " The datarate for the file exceeds the target!";
ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
@@ -154,6 +228,7 @@
}
TEST_P(DatarateTestLarge, ChangingDropFrameThresh) {
+ denoiser_on_ = 0;
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_max_quantizer = 36;
cfg_.rc_end_usage = VPX_CBR;
@@ -203,10 +278,14 @@
tot_frame_number_ = 0;
first_drop_ = 0;
num_drops_ = 0;
+ // Denoiser is off by default.
+ denoiser_on_ = 0;
// For testing up to 3 layers.
for (int i = 0; i < 3; ++i) {
bits_total_[i] = 0;
}
+ denoiser_offon_test_ = 0;
+ denoiser_offon_period_ = -1;
}
//
@@ -274,21 +353,30 @@
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
- if (video->frame() == 1) {
+ if (video->frame() == 0)
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+
+ if (denoiser_offon_test_) {
+ ASSERT_GT(denoiser_offon_period_, 0)
+ << "denoiser_offon_period_ is not positive.";
+ if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
+ // Flip denoiser_on_ periodically
+ denoiser_on_ ^= 1;
+ }
}
+
+ encoder->Control(VP9E_SET_NOISE_SENSITIVITY, denoiser_on_);
+
if (cfg_.ts_number_layers > 1) {
- if (video->frame() == 1) {
+ if (video->frame() == 0) {
encoder->Control(VP9E_SET_SVC, 1);
}
- vpx_svc_layer_id_t layer_id = {0, 0};
+ vpx_svc_layer_id_t layer_id;
layer_id.spatial_layer_id = 0;
frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
layer_id.temporal_layer_id = SetLayerId(video->frame(),
cfg_.ts_number_layers);
- if (video->frame() > 0) {
- encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
- }
+ encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
}
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
@@ -357,6 +445,9 @@
int64_t bits_in_buffer_model_;
vpx_codec_pts_t first_drop_;
int num_drops_;
+ int denoiser_on_;
+ int denoiser_offon_test_;
+ int denoiser_offon_period_;
};
// Check basic rate targeting,
@@ -447,7 +538,7 @@
<< " The first dropped frame for drop_thresh " << i
<< " > first dropped frame for drop_thresh "
<< i - kDropFrameThreshTestStep;
- ASSERT_GE(num_drops_, last_num_drops)
+ ASSERT_GE(num_drops_, last_num_drops * 0.90)
<< " The number of dropped frames for drop_thresh " << i
<< " < number of dropped frames for drop_thresh "
<< i - kDropFrameThreshTestStep;
@@ -473,20 +564,25 @@
cfg_.ts_rate_decimator[0] = 2;
cfg_.ts_rate_decimator[1] = 1;
+ cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+
+ if (deadline_ == VPX_DL_REALTIME)
+ cfg_.g_error_resilient = 1;
+
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
for (int i = 200; i <= 800; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
// 60-40 bitrate allocation for 2 temporal layers.
- cfg_.ts_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[1] = cfg_.rc_target_bitrate;
+ cfg_.layer_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[1] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.85)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.15)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
}
@@ -511,21 +607,27 @@
cfg_.ts_rate_decimator[1] = 2;
cfg_.ts_rate_decimator[2] = 1;
+ cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
for (int i = 200; i <= 800; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
// 40-20-40 bitrate allocation for 3 temporal layers.
- cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
+ cfg_.layer_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[2] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ // TODO(yaowu): Work out more stable rc control strategy and
+ // Adjust the thresholds to be tighter than .75.
+ ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.75)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ // TODO(yaowu): Work out more stable rc control strategy and
+ // Adjust the thresholds to be tighter than 1.25.
+ ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.25)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
}
@@ -553,32 +655,324 @@
cfg_.ts_rate_decimator[1] = 2;
cfg_.ts_rate_decimator[2] = 1;
+ cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
cfg_.rc_target_bitrate = 200;
ResetModel();
// 40-20-40 bitrate allocation for 3 temporal layers.
- cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
+ cfg_.layer_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[2] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.85)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.15)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
// Expect some frame drops in this test: for this 200 frames test,
// expect at least 10% and not more than 60% drops.
ASSERT_GE(num_drops_, 20);
- ASSERT_LE(num_drops_, 120);
+ ASSERT_LE(num_drops_, 130);
}
}
+#if CONFIG_VP9_TEMPORAL_DENOISING
+// Check basic datarate targeting, for a single bitrate, when denoiser is on.
+TEST_P(DatarateTestVP9Large, DenoiserLevels) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 140);
+
+ // For the temporal denoiser (#if CONFIG_VP9_TEMPORAL_DENOISING),
+ // there is only one denoiser mode: denoiserYonly(which is 1),
+ // but may add more modes in the future.
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // Turn on the denoiser.
+ denoiser_on_ = 1;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
+ << " The datarate for the file is lower than target by too much!";
+ ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
+ << " The datarate for the file is greater than target by too much!";
+}
+
+// Check basic datarate targeting, for a single bitrate, when denoiser is off
+// and on.
+TEST_P(DatarateTestVP9Large, DenoiserOffOn) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 299);
+
+ // For the temporal denoiser (#if CONFIG_VP9_TEMPORAL_DENOISING),
+ // there is only one denoiser mode: denoiserYonly(which is 1),
+ // but may add more modes in the future.
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // The denoiser is off by default.
+ denoiser_on_ = 0;
+ // Set the offon test flag.
+ denoiser_offon_test_ = 1;
+ denoiser_offon_period_ = 100;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
+ << " The datarate for the file is lower than target by too much!";
+ ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
+ << " The datarate for the file is greater than target by too much!";
+}
+#endif // CONFIG_VP9_TEMPORAL_DENOISING
+
+class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
+ public:
+ DatarateOnePassCbrSvc() : EncoderTest(GET_PARAM(0)) {}
+ virtual ~DatarateOnePassCbrSvc() {}
+ protected:
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(GET_PARAM(1));
+ speed_setting_ = GET_PARAM(2);
+ ResetModel();
+ }
+ virtual void ResetModel() {
+ last_pts_ = 0;
+ bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz;
+ frame_number_ = 0;
+ first_drop_ = 0;
+ bits_total_ = 0;
+ duration_ = 0.0;
+ mismatch_psnr_ = 0.0;
+ mismatch_nframes_ = 0;
+ }
+ virtual void BeginPassHook(unsigned int /*pass*/) {
+ }
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 0) {
+ int i;
+ for (i = 0; i < 2; ++i) {
+ svc_params_.max_quantizers[i] = 63;
+ svc_params_.min_quantizers[i] = 0;
+ }
+ svc_params_.scaling_factor_num[0] = 144;
+ svc_params_.scaling_factor_den[0] = 288;
+ svc_params_.scaling_factor_num[1] = 288;
+ svc_params_.scaling_factor_den[1] = 288;
+ encoder->Control(VP9E_SET_SVC, 1);
+ encoder->Control(VP9E_SET_SVC_PARAMETERS, &svc_params_);
+ encoder->Control(VP8E_SET_CPUUSED, speed_setting_);
+ encoder->Control(VP9E_SET_TILE_COLUMNS, 0);
+ encoder->Control(VP8E_SET_MAX_INTRA_BITRATE_PCT, 300);
+ encoder->Control(VP9E_SET_TILE_COLUMNS, (cfg_.g_threads >> 1));
+ }
+ const vpx_rational_t tb = video->timebase();
+ timebase_ = static_cast<double>(tb.num) / tb.den;
+ duration_ = 0;
+ }
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
+ if (last_pts_ == 0)
+ duration = 1;
+ bits_in_buffer_model_ += static_cast<int64_t>(
+ duration * timebase_ * cfg_.rc_target_bitrate * 1000);
+ const bool key_frame = (pkt->data.frame.flags & VPX_FRAME_IS_KEY)
+ ? true: false;
+ if (!key_frame) {
+ ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame "
+ << pkt->data.frame.pts;
+ }
+ const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
+ bits_in_buffer_model_ -= frame_size_in_bits;
+ bits_total_ += frame_size_in_bits;
+ if (!first_drop_ && duration > 1)
+ first_drop_ = last_pts_ + 1;
+ last_pts_ = pkt->data.frame.pts;
+ bits_in_last_frame_ = frame_size_in_bits;
+ ++frame_number_;
+ }
+ virtual void EndPassHook(void) {
+ if (bits_total_) {
+ const double file_size_in_kb = bits_total_ / 1000.; // bits per kilobit
+ duration_ = (last_pts_ + 1) * timebase_;
+ effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0
+ / (cfg_.rc_buf_initial_sz / 1000.0 + duration_);
+ file_datarate_ = file_size_in_kb / duration_;
+ }
+ }
+
+ virtual void MismatchHook(const vpx_image_t *img1,
+ const vpx_image_t *img2) {
+ double mismatch_psnr = compute_psnr(img1, img2);
+ mismatch_psnr_ += mismatch_psnr;
+ ++mismatch_nframes_;
+ }
+
+ unsigned int GetMismatchFrames() {
+ return mismatch_nframes_;
+ }
+
+ vpx_codec_pts_t last_pts_;
+ int64_t bits_in_buffer_model_;
+ double timebase_;
+ int frame_number_;
+ vpx_codec_pts_t first_drop_;
+ int64_t bits_total_;
+ double duration_;
+ double file_datarate_;
+ double effective_datarate_;
+ size_t bits_in_last_frame_;
+ vpx_svc_extra_cfg_t svc_params_;
+ int speed_setting_;
+ double mismatch_psnr_;
+ int mismatch_nframes_;
+};
+static void assign_layer_bitrates(vpx_codec_enc_cfg_t *const enc_cfg,
+ const vpx_svc_extra_cfg_t *svc_params,
+ int spatial_layers,
+ int temporal_layers,
+ int temporal_layering_mode,
+ unsigned int total_rate) {
+ int sl, spatial_layer_target;
+ float total = 0;
+ float alloc_ratio[VPX_MAX_LAYERS] = {0};
+ for (sl = 0; sl < spatial_layers; ++sl) {
+ if (svc_params->scaling_factor_den[sl] > 0) {
+ alloc_ratio[sl] = (float)(svc_params->scaling_factor_num[sl] *
+ 1.0 / svc_params->scaling_factor_den[sl]);
+ total += alloc_ratio[sl];
+ }
+ }
+ for (sl = 0; sl < spatial_layers; ++sl) {
+ enc_cfg->ss_target_bitrate[sl] = spatial_layer_target =
+ (unsigned int)(enc_cfg->rc_target_bitrate *
+ alloc_ratio[sl] / total);
+ const int index = sl * temporal_layers;
+ if (temporal_layering_mode == 3) {
+ enc_cfg->layer_target_bitrate[index] =
+ spatial_layer_target >> 1;
+ enc_cfg->layer_target_bitrate[index + 1] =
+ (spatial_layer_target >> 1) + (spatial_layer_target >> 2);
+ enc_cfg->layer_target_bitrate[index + 2] =
+ spatial_layer_target;
+ } else if (temporal_layering_mode == 2) {
+ enc_cfg->layer_target_bitrate[index] =
+ spatial_layer_target * 2 / 3;
+ enc_cfg->layer_target_bitrate[index + 1] =
+ spatial_layer_target;
+ }
+ }
+}
+
+// Check basic rate targeting for 1 pass CBR SVC: 2 spatial layers and
+// 3 temporal layers. Run CIF clip with 1 thread.
+TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_min_quantizer = 0;
+ cfg_.rc_max_quantizer = 63;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.ss_number_layers = 2;
+ cfg_.ts_number_layers = 3;
+ cfg_.ts_rate_decimator[0] = 4;
+ cfg_.ts_rate_decimator[1] = 2;
+ cfg_.ts_rate_decimator[2] = 1;
+ cfg_.g_error_resilient = 1;
+ cfg_.g_threads = 1;
+ cfg_.temporal_layering_mode = 3;
+ svc_params_.scaling_factor_num[0] = 144;
+ svc_params_.scaling_factor_den[0] = 288;
+ svc_params_.scaling_factor_num[1] = 288;
+ svc_params_.scaling_factor_den[1] = 288;
+ // TODO(wonkap/marpan): No frame drop for now, we need to implement correct
+ // frame dropping for SVC.
+ cfg_.rc_dropframe_thresh = 0;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 200);
+ // TODO(wonkap/marpan): Check that effective_datarate for each layer hits the
+ // layer target_bitrate. Also check if test can pass at lower bitrate (~200k).
+ for (int i = 400; i <= 800; i += 200) {
+ cfg_.rc_target_bitrate = i;
+ ResetModel();
+ assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers,
+ cfg_.ts_number_layers, cfg_.temporal_layering_mode,
+ cfg_.rc_target_bitrate);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85)
+ << " The datarate for the file exceeds the target by too much!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15)
+ << " The datarate for the file is lower than the target by too much!";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+ }
+}
+
+// Check basic rate targeting for 1 pass CBR SVC: 2 spatial layers and
+// 3 temporal layers. Run HD clip with 4 threads.
+TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc4threads) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_min_quantizer = 0;
+ cfg_.rc_max_quantizer = 63;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.ss_number_layers = 2;
+ cfg_.ts_number_layers = 3;
+ cfg_.ts_rate_decimator[0] = 4;
+ cfg_.ts_rate_decimator[1] = 2;
+ cfg_.ts_rate_decimator[2] = 1;
+ cfg_.g_error_resilient = 1;
+ cfg_.g_threads = 4;
+ cfg_.temporal_layering_mode = 3;
+ svc_params_.scaling_factor_num[0] = 144;
+ svc_params_.scaling_factor_den[0] = 288;
+ svc_params_.scaling_factor_num[1] = 288;
+ svc_params_.scaling_factor_den[1] = 288;
+ // TODO(wonkap/marpan): No frame drop for now, we need to implement correct
+ // frame dropping for SVC.
+ cfg_.rc_dropframe_thresh = 0;
+ ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720,
+ 30, 1, 0, 300);
+ cfg_.rc_target_bitrate = 800;
+ ResetModel();
+ assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers,
+ cfg_.ts_number_layers, cfg_.temporal_layering_mode,
+ cfg_.rc_target_bitrate);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85)
+ << " The datarate for the file exceeds the target by too much!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15)
+ << " The datarate for the file is lower than the target by too much!";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+}
+
VP8_INSTANTIATE_TEST_CASE(DatarateTestLarge, ALL_TEST_MODES);
VP9_INSTANTIATE_TEST_CASE(DatarateTestVP9Large,
::testing::Values(::libvpx_test::kOnePassGood,
- ::libvpx_test::kRealTime),
+ ::libvpx_test::kRealTime),
::testing::Range(2, 7));
+VP9_INSTANTIATE_TEST_CASE(DatarateOnePassCbrSvc,
+ ::testing::Values(::libvpx_test::kRealTime),
+ ::testing::Range(5, 8));
} // namespace
|
CWE-119
| null |
denoiser_offon_test_ = 0;
denoiser_offon_period_ = -1;
|
150,835 |
virtual void ResetModel() {
last_pts_ = 0;
bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz;
frame_number_ = 0;
tot_frame_number_ = 0;
first_drop_ = 0;
num_drops_ = 0;
for (int i = 0; i < 3; ++i) {
bits_total_[i] = 0;
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void ResetModel() {
last_pts_ = 0;
bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz;
frame_number_ = 0;
tot_frame_number_ = 0;
first_drop_ = 0;
num_drops_ = 0;
// Denoiser is off by default.
denoiser_on_ = 0;
for (int i = 0; i < 3; ++i) {
bits_total_[i] = 0;
}
denoiser_offon_test_ = 0;
denoiser_offon_period_ = -1;
}
|
@@ -14,6 +14,7 @@
#include "test/i420_video_source.h"
#include "test/util.h"
#include "test/y4m_video_source.h"
+#include "vpx/vpx_codec.h"
namespace {
@@ -38,10 +39,25 @@
first_drop_ = 0;
bits_total_ = 0;
duration_ = 0.0;
+ denoiser_offon_test_ = 0;
+ denoiser_offon_period_ = -1;
}
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 0)
+ encoder->Control(VP8E_SET_NOISE_SENSITIVITY, denoiser_on_);
+
+ if (denoiser_offon_test_) {
+ ASSERT_GT(denoiser_offon_period_, 0)
+ << "denoiser_offon_period_ is not positive.";
+ if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
+ // Flip denoiser_on_ periodically
+ denoiser_on_ ^= 1;
+ }
+ encoder->Control(VP8E_SET_NOISE_SENSITIVITY, denoiser_on_);
+ }
+
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
duration_ = 0;
@@ -120,9 +136,67 @@
double file_datarate_;
double effective_datarate_;
size_t bits_in_last_frame_;
+ int denoiser_on_;
+ int denoiser_offon_test_;
+ int denoiser_offon_period_;
};
+#if CONFIG_TEMPORAL_DENOISING
+// Check basic datarate targeting, for a single bitrate, but loop over the
+// various denoiser settings.
+TEST_P(DatarateTestLarge, DenoiserLevels) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 140);
+ for (int j = 1; j < 5; ++j) {
+ // Run over the denoiser levels.
+ // For the temporal denoiser (#if CONFIG_TEMPORAL_DENOISING) the level j
+ // refers to the 4 denoiser modes: denoiserYonly, denoiserOnYUV,
+ // denoiserOnAggressive, and denoiserOnAdaptive.
+ // For the spatial denoiser (if !CONFIG_TEMPORAL_DENOISING), the level j
+ // refers to the blur thresholds: 20, 40, 60 80.
+ // The j = 0 case (denoiser off) is covered in the tests below.
+ denoiser_on_ = j;
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
+ << " The datarate for the file exceeds the target!";
+
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
+ << " The datarate for the file missed the target!";
+ }
+}
+
+// Check basic datarate targeting, for a single bitrate, when denoiser is off
+// and on.
+TEST_P(DatarateTestLarge, DenoiserOffOn) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 299);
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // The denoiser is off by default.
+ denoiser_on_ = 0;
+ // Set the offon test flag.
+ denoiser_offon_test_ = 1;
+ denoiser_offon_period_ = 100;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
+ << " The datarate for the file exceeds the target!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
+ << " The datarate for the file missed the target!";
+}
+#endif // CONFIG_TEMPORAL_DENOISING
+
TEST_P(DatarateTestLarge, BasicBufferModel) {
+ denoiser_on_ = 0;
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_dropframe_thresh = 1;
cfg_.rc_max_quantizer = 56;
@@ -145,7 +219,7 @@
cfg_.rc_target_bitrate = i;
ResetModel();
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
- ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_)
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
<< " The datarate for the file exceeds the target!";
ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
@@ -154,6 +228,7 @@
}
TEST_P(DatarateTestLarge, ChangingDropFrameThresh) {
+ denoiser_on_ = 0;
cfg_.rc_buf_initial_sz = 500;
cfg_.rc_max_quantizer = 36;
cfg_.rc_end_usage = VPX_CBR;
@@ -203,10 +278,14 @@
tot_frame_number_ = 0;
first_drop_ = 0;
num_drops_ = 0;
+ // Denoiser is off by default.
+ denoiser_on_ = 0;
// For testing up to 3 layers.
for (int i = 0; i < 3; ++i) {
bits_total_[i] = 0;
}
+ denoiser_offon_test_ = 0;
+ denoiser_offon_period_ = -1;
}
//
@@ -274,21 +353,30 @@
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
- if (video->frame() == 1) {
+ if (video->frame() == 0)
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+
+ if (denoiser_offon_test_) {
+ ASSERT_GT(denoiser_offon_period_, 0)
+ << "denoiser_offon_period_ is not positive.";
+ if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
+ // Flip denoiser_on_ periodically
+ denoiser_on_ ^= 1;
+ }
}
+
+ encoder->Control(VP9E_SET_NOISE_SENSITIVITY, denoiser_on_);
+
if (cfg_.ts_number_layers > 1) {
- if (video->frame() == 1) {
+ if (video->frame() == 0) {
encoder->Control(VP9E_SET_SVC, 1);
}
- vpx_svc_layer_id_t layer_id = {0, 0};
+ vpx_svc_layer_id_t layer_id;
layer_id.spatial_layer_id = 0;
frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
layer_id.temporal_layer_id = SetLayerId(video->frame(),
cfg_.ts_number_layers);
- if (video->frame() > 0) {
- encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
- }
+ encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id);
}
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
@@ -357,6 +445,9 @@
int64_t bits_in_buffer_model_;
vpx_codec_pts_t first_drop_;
int num_drops_;
+ int denoiser_on_;
+ int denoiser_offon_test_;
+ int denoiser_offon_period_;
};
// Check basic rate targeting,
@@ -447,7 +538,7 @@
<< " The first dropped frame for drop_thresh " << i
<< " > first dropped frame for drop_thresh "
<< i - kDropFrameThreshTestStep;
- ASSERT_GE(num_drops_, last_num_drops)
+ ASSERT_GE(num_drops_, last_num_drops * 0.90)
<< " The number of dropped frames for drop_thresh " << i
<< " < number of dropped frames for drop_thresh "
<< i - kDropFrameThreshTestStep;
@@ -473,20 +564,25 @@
cfg_.ts_rate_decimator[0] = 2;
cfg_.ts_rate_decimator[1] = 1;
+ cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+
+ if (deadline_ == VPX_DL_REALTIME)
+ cfg_.g_error_resilient = 1;
+
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
for (int i = 200; i <= 800; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
// 60-40 bitrate allocation for 2 temporal layers.
- cfg_.ts_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[1] = cfg_.rc_target_bitrate;
+ cfg_.layer_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[1] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.85)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.15)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
}
@@ -511,21 +607,27 @@
cfg_.ts_rate_decimator[1] = 2;
cfg_.ts_rate_decimator[2] = 1;
+ cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
for (int i = 200; i <= 800; i += 200) {
cfg_.rc_target_bitrate = i;
ResetModel();
// 40-20-40 bitrate allocation for 3 temporal layers.
- cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
+ cfg_.layer_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[2] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ // TODO(yaowu): Work out more stable rc control strategy and
+ // Adjust the thresholds to be tighter than .75.
+ ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.75)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ // TODO(yaowu): Work out more stable rc control strategy and
+ // Adjust the thresholds to be tighter than 1.25.
+ ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.25)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
}
@@ -553,32 +655,324 @@
cfg_.ts_rate_decimator[1] = 2;
cfg_.ts_rate_decimator[2] = 1;
+ cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+
::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
30, 1, 0, 200);
cfg_.rc_target_bitrate = 200;
ResetModel();
// 40-20-40 bitrate allocation for 3 temporal layers.
- cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
- cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
+ cfg_.layer_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.layer_target_bitrate[2] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.85)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.15)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
// Expect some frame drops in this test: for this 200 frames test,
// expect at least 10% and not more than 60% drops.
ASSERT_GE(num_drops_, 20);
- ASSERT_LE(num_drops_, 120);
+ ASSERT_LE(num_drops_, 130);
}
}
+#if CONFIG_VP9_TEMPORAL_DENOISING
+// Check basic datarate targeting, for a single bitrate, when denoiser is on.
+TEST_P(DatarateTestVP9Large, DenoiserLevels) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 140);
+
+ // For the temporal denoiser (#if CONFIG_VP9_TEMPORAL_DENOISING),
+ // there is only one denoiser mode: denoiserYonly(which is 1),
+ // but may add more modes in the future.
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // Turn on the denoiser.
+ denoiser_on_ = 1;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
+ << " The datarate for the file is lower than target by too much!";
+ ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
+ << " The datarate for the file is greater than target by too much!";
+}
+
+// Check basic datarate targeting, for a single bitrate, when denoiser is off
+// and on.
+TEST_P(DatarateTestVP9Large, DenoiserOffOn) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 299);
+
+ // For the temporal denoiser (#if CONFIG_VP9_TEMPORAL_DENOISING),
+ // there is only one denoiser mode: denoiserYonly(which is 1),
+ // but may add more modes in the future.
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // The denoiser is off by default.
+ denoiser_on_ = 0;
+ // Set the offon test flag.
+ denoiser_offon_test_ = 1;
+ denoiser_offon_period_ = 100;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
+ << " The datarate for the file is lower than target by too much!";
+ ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
+ << " The datarate for the file is greater than target by too much!";
+}
+#endif // CONFIG_VP9_TEMPORAL_DENOISING
+
+class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
+ public:
+ DatarateOnePassCbrSvc() : EncoderTest(GET_PARAM(0)) {}
+ virtual ~DatarateOnePassCbrSvc() {}
+ protected:
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(GET_PARAM(1));
+ speed_setting_ = GET_PARAM(2);
+ ResetModel();
+ }
+ virtual void ResetModel() {
+ last_pts_ = 0;
+ bits_in_buffer_model_ = cfg_.rc_target_bitrate * cfg_.rc_buf_initial_sz;
+ frame_number_ = 0;
+ first_drop_ = 0;
+ bits_total_ = 0;
+ duration_ = 0.0;
+ mismatch_psnr_ = 0.0;
+ mismatch_nframes_ = 0;
+ }
+ virtual void BeginPassHook(unsigned int /*pass*/) {
+ }
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 0) {
+ int i;
+ for (i = 0; i < 2; ++i) {
+ svc_params_.max_quantizers[i] = 63;
+ svc_params_.min_quantizers[i] = 0;
+ }
+ svc_params_.scaling_factor_num[0] = 144;
+ svc_params_.scaling_factor_den[0] = 288;
+ svc_params_.scaling_factor_num[1] = 288;
+ svc_params_.scaling_factor_den[1] = 288;
+ encoder->Control(VP9E_SET_SVC, 1);
+ encoder->Control(VP9E_SET_SVC_PARAMETERS, &svc_params_);
+ encoder->Control(VP8E_SET_CPUUSED, speed_setting_);
+ encoder->Control(VP9E_SET_TILE_COLUMNS, 0);
+ encoder->Control(VP8E_SET_MAX_INTRA_BITRATE_PCT, 300);
+ encoder->Control(VP9E_SET_TILE_COLUMNS, (cfg_.g_threads >> 1));
+ }
+ const vpx_rational_t tb = video->timebase();
+ timebase_ = static_cast<double>(tb.num) / tb.den;
+ duration_ = 0;
+ }
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
+ if (last_pts_ == 0)
+ duration = 1;
+ bits_in_buffer_model_ += static_cast<int64_t>(
+ duration * timebase_ * cfg_.rc_target_bitrate * 1000);
+ const bool key_frame = (pkt->data.frame.flags & VPX_FRAME_IS_KEY)
+ ? true: false;
+ if (!key_frame) {
+ ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame "
+ << pkt->data.frame.pts;
+ }
+ const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
+ bits_in_buffer_model_ -= frame_size_in_bits;
+ bits_total_ += frame_size_in_bits;
+ if (!first_drop_ && duration > 1)
+ first_drop_ = last_pts_ + 1;
+ last_pts_ = pkt->data.frame.pts;
+ bits_in_last_frame_ = frame_size_in_bits;
+ ++frame_number_;
+ }
+ virtual void EndPassHook(void) {
+ if (bits_total_) {
+ const double file_size_in_kb = bits_total_ / 1000.; // bits per kilobit
+ duration_ = (last_pts_ + 1) * timebase_;
+ effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0
+ / (cfg_.rc_buf_initial_sz / 1000.0 + duration_);
+ file_datarate_ = file_size_in_kb / duration_;
+ }
+ }
+
+ virtual void MismatchHook(const vpx_image_t *img1,
+ const vpx_image_t *img2) {
+ double mismatch_psnr = compute_psnr(img1, img2);
+ mismatch_psnr_ += mismatch_psnr;
+ ++mismatch_nframes_;
+ }
+
+ unsigned int GetMismatchFrames() {
+ return mismatch_nframes_;
+ }
+
+ vpx_codec_pts_t last_pts_;
+ int64_t bits_in_buffer_model_;
+ double timebase_;
+ int frame_number_;
+ vpx_codec_pts_t first_drop_;
+ int64_t bits_total_;
+ double duration_;
+ double file_datarate_;
+ double effective_datarate_;
+ size_t bits_in_last_frame_;
+ vpx_svc_extra_cfg_t svc_params_;
+ int speed_setting_;
+ double mismatch_psnr_;
+ int mismatch_nframes_;
+};
+static void assign_layer_bitrates(vpx_codec_enc_cfg_t *const enc_cfg,
+ const vpx_svc_extra_cfg_t *svc_params,
+ int spatial_layers,
+ int temporal_layers,
+ int temporal_layering_mode,
+ unsigned int total_rate) {
+ int sl, spatial_layer_target;
+ float total = 0;
+ float alloc_ratio[VPX_MAX_LAYERS] = {0};
+ for (sl = 0; sl < spatial_layers; ++sl) {
+ if (svc_params->scaling_factor_den[sl] > 0) {
+ alloc_ratio[sl] = (float)(svc_params->scaling_factor_num[sl] *
+ 1.0 / svc_params->scaling_factor_den[sl]);
+ total += alloc_ratio[sl];
+ }
+ }
+ for (sl = 0; sl < spatial_layers; ++sl) {
+ enc_cfg->ss_target_bitrate[sl] = spatial_layer_target =
+ (unsigned int)(enc_cfg->rc_target_bitrate *
+ alloc_ratio[sl] / total);
+ const int index = sl * temporal_layers;
+ if (temporal_layering_mode == 3) {
+ enc_cfg->layer_target_bitrate[index] =
+ spatial_layer_target >> 1;
+ enc_cfg->layer_target_bitrate[index + 1] =
+ (spatial_layer_target >> 1) + (spatial_layer_target >> 2);
+ enc_cfg->layer_target_bitrate[index + 2] =
+ spatial_layer_target;
+ } else if (temporal_layering_mode == 2) {
+ enc_cfg->layer_target_bitrate[index] =
+ spatial_layer_target * 2 / 3;
+ enc_cfg->layer_target_bitrate[index + 1] =
+ spatial_layer_target;
+ }
+ }
+}
+
+// Check basic rate targeting for 1 pass CBR SVC: 2 spatial layers and
+// 3 temporal layers. Run CIF clip with 1 thread.
+TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_min_quantizer = 0;
+ cfg_.rc_max_quantizer = 63;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.ss_number_layers = 2;
+ cfg_.ts_number_layers = 3;
+ cfg_.ts_rate_decimator[0] = 4;
+ cfg_.ts_rate_decimator[1] = 2;
+ cfg_.ts_rate_decimator[2] = 1;
+ cfg_.g_error_resilient = 1;
+ cfg_.g_threads = 1;
+ cfg_.temporal_layering_mode = 3;
+ svc_params_.scaling_factor_num[0] = 144;
+ svc_params_.scaling_factor_den[0] = 288;
+ svc_params_.scaling_factor_num[1] = 288;
+ svc_params_.scaling_factor_den[1] = 288;
+ // TODO(wonkap/marpan): No frame drop for now, we need to implement correct
+ // frame dropping for SVC.
+ cfg_.rc_dropframe_thresh = 0;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 200);
+ // TODO(wonkap/marpan): Check that effective_datarate for each layer hits the
+ // layer target_bitrate. Also check if test can pass at lower bitrate (~200k).
+ for (int i = 400; i <= 800; i += 200) {
+ cfg_.rc_target_bitrate = i;
+ ResetModel();
+ assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers,
+ cfg_.ts_number_layers, cfg_.temporal_layering_mode,
+ cfg_.rc_target_bitrate);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85)
+ << " The datarate for the file exceeds the target by too much!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15)
+ << " The datarate for the file is lower than the target by too much!";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+ }
+}
+
+// Check basic rate targeting for 1 pass CBR SVC: 2 spatial layers and
+// 3 temporal layers. Run HD clip with 4 threads.
+TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc4threads) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_min_quantizer = 0;
+ cfg_.rc_max_quantizer = 63;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.ss_number_layers = 2;
+ cfg_.ts_number_layers = 3;
+ cfg_.ts_rate_decimator[0] = 4;
+ cfg_.ts_rate_decimator[1] = 2;
+ cfg_.ts_rate_decimator[2] = 1;
+ cfg_.g_error_resilient = 1;
+ cfg_.g_threads = 4;
+ cfg_.temporal_layering_mode = 3;
+ svc_params_.scaling_factor_num[0] = 144;
+ svc_params_.scaling_factor_den[0] = 288;
+ svc_params_.scaling_factor_num[1] = 288;
+ svc_params_.scaling_factor_den[1] = 288;
+ // TODO(wonkap/marpan): No frame drop for now, we need to implement correct
+ // frame dropping for SVC.
+ cfg_.rc_dropframe_thresh = 0;
+ ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720,
+ 30, 1, 0, 300);
+ cfg_.rc_target_bitrate = 800;
+ ResetModel();
+ assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers,
+ cfg_.ts_number_layers, cfg_.temporal_layering_mode,
+ cfg_.rc_target_bitrate);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85)
+ << " The datarate for the file exceeds the target by too much!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15)
+ << " The datarate for the file is lower than the target by too much!";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+}
+
VP8_INSTANTIATE_TEST_CASE(DatarateTestLarge, ALL_TEST_MODES);
VP9_INSTANTIATE_TEST_CASE(DatarateTestVP9Large,
::testing::Values(::libvpx_test::kOnePassGood,
- ::libvpx_test::kRealTime),
+ ::libvpx_test::kRealTime),
::testing::Range(2, 7));
+VP9_INSTANTIATE_TEST_CASE(DatarateOnePassCbrSvc,
+ ::testing::Values(::libvpx_test::kRealTime),
+ ::testing::Range(5, 8));
} // namespace
|
CWE-119
| null |
// Denoiser is off by default.
denoiser_on_ = 0;
denoiser_offon_test_ = 0;
denoiser_offon_period_ = -1;
|
150,836 |
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
for (int i = 0; i < count_test_block; ++i) {
for (int j = 0; j < kNumCoeffs; ++j)
input_block[j] = rnd.Rand8() - rnd.Rand8();
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
for (int j = 0; j < kNumCoeffs; ++j)
EXPECT_EQ(output_block[j], output_ref_block[j]);
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
for (int j = 0; j < kNumCoeffs; ++j)
EXPECT_EQ(output_block[j], output_ref_block[j]);
}
}
|
@@ -13,18 +13,18 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
@@ -258,32 +258,100 @@
}
}
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_16x16_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_16x16_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
+ Idct16x16Param;
-void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct16x16_c(in, out, stride);
+void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int /*tx_type*/) {
+ vpx_fdct16x16_c(in, out, stride);
}
-void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int /*tx_type*/) {
+ vpx_idct16x16_256_add_c(in, dest, stride);
+}
+
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
+void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int tx_type) {
+ vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
+}
+
+void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
+}
+
+void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_10(in, out, stride);
+}
+
+void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_12(in, out, stride);
+}
+
+void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+}
+
+void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans16x16TestBase {
public:
virtual ~Trans16x16TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -291,24 +359,49 @@
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -316,27 +409,27 @@
}
}
- EXPECT_GE(1u, max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
- EXPECT_GE(count_test_block , total_error)
+ EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -347,63 +440,148 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
+ void RunQuantCheck(int dc_thred, int ac_thred) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 100000;
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = mask_;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -mask_;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+
+ // clear reconstructed pixel buffers
+ memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+#if CONFIG_VP9_HIGHBITDEPTH
+ memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+#endif
+
+ // quantization with maximum allowed step sizes
+ output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
+ for (int j = 1; j < kNumCoeffs; ++j)
+ output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
+ if (bit_depth_ == VPX_BITS_8) {
+ inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
+ tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref[j], dst[j]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref16[j], dst16[j]);
+#endif
+ }
+ }
+ }
+
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
- coeff[j] = round(out_r[j]);
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ 16));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
@@ -411,14 +589,75 @@
}
}
}
+
+ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 10;
+ const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ } else {
+#if CONFIG_VP9_HIGHBITDEPTH
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 16x16 IDCT Comparison has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ FhtFunc fwd_txfm_ref;
+ IhtFunc inv_txfm_ref;
};
class Trans16x16DCT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<dct_16x16_param_t> {
+ public ::testing::TestWithParam<Dct16x16Param> {
public:
virtual ~Trans16x16DCT() {}
@@ -426,21 +665,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
+ inv_txfm_ref = idct16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = idct16x16_10_ref;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = idct16x16_12_ref;
+ break;
+ default:
+ inv_txfm_ref = idct16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = idct16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans16x16DCT, AccuracyCheck) {
@@ -455,13 +712,19 @@
RunMemCheck();
}
+TEST_P(Trans16x16DCT, QuantCheck) {
+ // Use maximally allowed quantization step sizes for DC and AC
+ // coefficients respectively.
+ RunQuantCheck(1336, 1828);
+}
+
TEST_P(Trans16x16DCT, InvAccuracyCheck) {
RunInvAccuracyCheck();
}
class Trans16x16HT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<ht_16x16_param_t> {
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~Trans16x16HT() {}
@@ -469,21 +732,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
+ inv_txfm_ref = iht16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = iht16x16_10;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = iht16x16_12;
+ break;
+ default:
+ inv_txfm_ref = iht16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = iht16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans16x16HT, AccuracyCheck) {
@@ -498,40 +779,162 @@
RunMemCheck();
}
+TEST_P(Trans16x16HT, QuantCheck) {
+ // The encoder skips any non-DC intra prediction modes,
+ // when the quantization step size goes beyond 988.
+ RunQuantCheck(429, 729);
+}
+
+class InvTrans16x16DCT
+ : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Idct16x16Param> {
+ public:
+ virtual ~InvTrans16x16DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
+ pitch_ = 16;
+ mask_ = (1 << bit_depth_) - 1;
+}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans16x16DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3)));
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c,
- &vp9_idct16x16_256_add_neon, 0)));
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_sse2, 0)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
+ VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
+ VPX_BITS_8)));
+// Optimizations take effect at a threshold of 3155, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans16x16DCT,
+ ::testing::Values(
+ make_tuple(&idct16x16_10_add_10_c,
+ &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10,
+ &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10_add_12_c,
+ &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+ make_tuple(&idct16x16_12,
+ &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
+ VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
input_block[j] = rnd.Rand8() - rnd.Rand8();
REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
|
DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
// Initialize a test block with input range [-mask_, mask_].
input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
|
150,837 |
void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
|
@@ -13,18 +13,18 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
@@ -258,32 +258,100 @@
}
}
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_16x16_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_16x16_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
+ Idct16x16Param;
-void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct16x16_c(in, out, stride);
+void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int /*tx_type*/) {
+ vpx_fdct16x16_c(in, out, stride);
}
-void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int /*tx_type*/) {
+ vpx_idct16x16_256_add_c(in, dest, stride);
+}
+
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
+void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int tx_type) {
+ vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
+}
+
+void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
+}
+
+void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_10(in, out, stride);
+}
+
+void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_12(in, out, stride);
+}
+
+void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+}
+
+void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans16x16TestBase {
public:
virtual ~Trans16x16TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -291,24 +359,49 @@
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -316,27 +409,27 @@
}
}
- EXPECT_GE(1u, max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
- EXPECT_GE(count_test_block , total_error)
+ EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -347,63 +440,148 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
+ void RunQuantCheck(int dc_thred, int ac_thred) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 100000;
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = mask_;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -mask_;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+
+ // clear reconstructed pixel buffers
+ memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+#if CONFIG_VP9_HIGHBITDEPTH
+ memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+#endif
+
+ // quantization with maximum allowed step sizes
+ output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
+ for (int j = 1; j < kNumCoeffs; ++j)
+ output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
+ if (bit_depth_ == VPX_BITS_8) {
+ inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
+ tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref[j], dst[j]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref16[j], dst16[j]);
+#endif
+ }
+ }
+ }
+
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
- coeff[j] = round(out_r[j]);
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ 16));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
@@ -411,14 +589,75 @@
}
}
}
+
+ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 10;
+ const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ } else {
+#if CONFIG_VP9_HIGHBITDEPTH
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 16x16 IDCT Comparison has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ FhtFunc fwd_txfm_ref;
+ IhtFunc inv_txfm_ref;
};
class Trans16x16DCT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<dct_16x16_param_t> {
+ public ::testing::TestWithParam<Dct16x16Param> {
public:
virtual ~Trans16x16DCT() {}
@@ -426,21 +665,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
+ inv_txfm_ref = idct16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = idct16x16_10_ref;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = idct16x16_12_ref;
+ break;
+ default:
+ inv_txfm_ref = idct16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = idct16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans16x16DCT, AccuracyCheck) {
@@ -455,13 +712,19 @@
RunMemCheck();
}
+TEST_P(Trans16x16DCT, QuantCheck) {
+ // Use maximally allowed quantization step sizes for DC and AC
+ // coefficients respectively.
+ RunQuantCheck(1336, 1828);
+}
+
TEST_P(Trans16x16DCT, InvAccuracyCheck) {
RunInvAccuracyCheck();
}
class Trans16x16HT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<ht_16x16_param_t> {
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~Trans16x16HT() {}
@@ -469,21 +732,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
+ inv_txfm_ref = iht16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = iht16x16_10;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = iht16x16_12;
+ break;
+ default:
+ inv_txfm_ref = iht16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = iht16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans16x16HT, AccuracyCheck) {
@@ -498,40 +779,162 @@
RunMemCheck();
}
+TEST_P(Trans16x16HT, QuantCheck) {
+ // The encoder skips any non-DC intra prediction modes,
+ // when the quantization step size goes beyond 988.
+ RunQuantCheck(429, 729);
+}
+
+class InvTrans16x16DCT
+ : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Idct16x16Param> {
+ public:
+ virtual ~InvTrans16x16DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
+ pitch_ = 16;
+ mask_ = (1 << bit_depth_) - 1;
+}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans16x16DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3)));
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c,
- &vp9_idct16x16_256_add_neon, 0)));
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_sse2, 0)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
+ VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
+ VPX_BITS_8)));
+// Optimizations take effect at a threshold of 3155, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans16x16DCT,
+ ::testing::Values(
+ make_tuple(&idct16x16_10_add_10_c,
+ &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10,
+ &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10_add_12_c,
+ &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+ make_tuple(&idct16x16_12,
+ &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
+ VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
|
150,838 |
void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
|
@@ -13,18 +13,18 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
@@ -258,32 +258,100 @@
}
}
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_16x16_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_16x16_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
+ Idct16x16Param;
-void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct16x16_c(in, out, stride);
+void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int /*tx_type*/) {
+ vpx_fdct16x16_c(in, out, stride);
}
-void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int /*tx_type*/) {
+ vpx_idct16x16_256_add_c(in, dest, stride);
+}
+
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
+void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int tx_type) {
+ vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
+}
+
+void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
+}
+
+void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_10(in, out, stride);
+}
+
+void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_12(in, out, stride);
+}
+
+void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+}
+
+void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans16x16TestBase {
public:
virtual ~Trans16x16TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -291,24 +359,49 @@
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -316,27 +409,27 @@
}
}
- EXPECT_GE(1u, max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
- EXPECT_GE(count_test_block , total_error)
+ EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -347,63 +440,148 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
+ void RunQuantCheck(int dc_thred, int ac_thred) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 100000;
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = mask_;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -mask_;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+
+ // clear reconstructed pixel buffers
+ memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+#if CONFIG_VP9_HIGHBITDEPTH
+ memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+#endif
+
+ // quantization with maximum allowed step sizes
+ output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
+ for (int j = 1; j < kNumCoeffs; ++j)
+ output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
+ if (bit_depth_ == VPX_BITS_8) {
+ inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
+ tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref[j], dst[j]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref16[j], dst16[j]);
+#endif
+ }
+ }
+ }
+
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
- coeff[j] = round(out_r[j]);
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ 16));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
@@ -411,14 +589,75 @@
}
}
}
+
+ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 10;
+ const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ } else {
+#if CONFIG_VP9_HIGHBITDEPTH
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 16x16 IDCT Comparison has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ FhtFunc fwd_txfm_ref;
+ IhtFunc inv_txfm_ref;
};
class Trans16x16DCT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<dct_16x16_param_t> {
+ public ::testing::TestWithParam<Dct16x16Param> {
public:
virtual ~Trans16x16DCT() {}
@@ -426,21 +665,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
+ inv_txfm_ref = idct16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = idct16x16_10_ref;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = idct16x16_12_ref;
+ break;
+ default:
+ inv_txfm_ref = idct16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = idct16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans16x16DCT, AccuracyCheck) {
@@ -455,13 +712,19 @@
RunMemCheck();
}
+TEST_P(Trans16x16DCT, QuantCheck) {
+ // Use maximally allowed quantization step sizes for DC and AC
+ // coefficients respectively.
+ RunQuantCheck(1336, 1828);
+}
+
TEST_P(Trans16x16DCT, InvAccuracyCheck) {
RunInvAccuracyCheck();
}
class Trans16x16HT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<ht_16x16_param_t> {
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~Trans16x16HT() {}
@@ -469,21 +732,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
+ inv_txfm_ref = iht16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = iht16x16_10;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = iht16x16_12;
+ break;
+ default:
+ inv_txfm_ref = iht16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = iht16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans16x16HT, AccuracyCheck) {
@@ -498,40 +779,162 @@
RunMemCheck();
}
+TEST_P(Trans16x16HT, QuantCheck) {
+ // The encoder skips any non-DC intra prediction modes,
+ // when the quantization step size goes beyond 988.
+ RunQuantCheck(429, 729);
+}
+
+class InvTrans16x16DCT
+ : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Idct16x16Param> {
+ public:
+ virtual ~InvTrans16x16DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
+ pitch_ = 16;
+ mask_ = (1 << bit_depth_) - 1;
+}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans16x16DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3)));
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c,
- &vp9_idct16x16_256_add_neon, 0)));
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_sse2, 0)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
+ VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
+ VPX_BITS_8)));
+// Optimizations take effect at a threshold of 3155, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans16x16DCT,
+ ::testing::Values(
+ make_tuple(&idct16x16_10_add_10_c,
+ &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10,
+ &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10_add_12_c,
+ &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+ make_tuple(&idct16x16_12,
+ &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
+ VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
|
150,839 |
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
for (int j = 0; j < kNumCoeffs; ++j) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
in[j] = src[j] - dst[j];
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
coeff[j] = round(out_r[j]);
REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
for (int j = 0; j < kNumCoeffs; ++j) {
const uint32_t diff = dst[j] - src[j];
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
<< " at index " << j;
}
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
for (int j = 0; j < kNumCoeffs; ++j) {
if (bit_depth_ == VPX_BITS_8) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
in[j] = src[j] - dst[j];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
in[j] = src16[j] - dst16[j];
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
16));
#endif // CONFIG_VP9_HIGHBITDEPTH
}
for (int j = 0; j < kNumCoeffs; ++j) {
#if CONFIG_VP9_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
const uint32_t diff = dst[j] - src[j];
#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
<< " at index " << j;
}
}
}
|
@@ -13,18 +13,18 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
@@ -258,32 +258,100 @@
}
}
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_16x16_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_16x16_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
+ Idct16x16Param;
-void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct16x16_c(in, out, stride);
+void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int /*tx_type*/) {
+ vpx_fdct16x16_c(in, out, stride);
}
-void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int /*tx_type*/) {
+ vpx_idct16x16_256_add_c(in, dest, stride);
+}
+
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
+void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int tx_type) {
+ vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
+}
+
+void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
+}
+
+void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_10(in, out, stride);
+}
+
+void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_12(in, out, stride);
+}
+
+void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+}
+
+void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans16x16TestBase {
public:
virtual ~Trans16x16TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -291,24 +359,49 @@
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -316,27 +409,27 @@
}
}
- EXPECT_GE(1u, max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
- EXPECT_GE(count_test_block , total_error)
+ EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -347,63 +440,148 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
+ void RunQuantCheck(int dc_thred, int ac_thred) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 100000;
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = mask_;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -mask_;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+
+ // clear reconstructed pixel buffers
+ memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+#if CONFIG_VP9_HIGHBITDEPTH
+ memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+#endif
+
+ // quantization with maximum allowed step sizes
+ output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
+ for (int j = 1; j < kNumCoeffs; ++j)
+ output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
+ if (bit_depth_ == VPX_BITS_8) {
+ inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
+ tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref[j], dst[j]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref16[j], dst16[j]);
+#endif
+ }
+ }
+ }
+
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
- coeff[j] = round(out_r[j]);
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ 16));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
@@ -411,14 +589,75 @@
}
}
}
+
+ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 10;
+ const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ } else {
+#if CONFIG_VP9_HIGHBITDEPTH
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 16x16 IDCT Comparison has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ FhtFunc fwd_txfm_ref;
+ IhtFunc inv_txfm_ref;
};
class Trans16x16DCT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<dct_16x16_param_t> {
+ public ::testing::TestWithParam<Dct16x16Param> {
public:
virtual ~Trans16x16DCT() {}
@@ -426,21 +665,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
+ inv_txfm_ref = idct16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = idct16x16_10_ref;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = idct16x16_12_ref;
+ break;
+ default:
+ inv_txfm_ref = idct16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = idct16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans16x16DCT, AccuracyCheck) {
@@ -455,13 +712,19 @@
RunMemCheck();
}
+TEST_P(Trans16x16DCT, QuantCheck) {
+ // Use maximally allowed quantization step sizes for DC and AC
+ // coefficients respectively.
+ RunQuantCheck(1336, 1828);
+}
+
TEST_P(Trans16x16DCT, InvAccuracyCheck) {
RunInvAccuracyCheck();
}
class Trans16x16HT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<ht_16x16_param_t> {
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~Trans16x16HT() {}
@@ -469,21 +732,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
+ inv_txfm_ref = iht16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = iht16x16_10;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = iht16x16_12;
+ break;
+ default:
+ inv_txfm_ref = iht16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = iht16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans16x16HT, AccuracyCheck) {
@@ -498,40 +779,162 @@
RunMemCheck();
}
+TEST_P(Trans16x16HT, QuantCheck) {
+ // The encoder skips any non-DC intra prediction modes,
+ // when the quantization step size goes beyond 988.
+ RunQuantCheck(429, 729);
+}
+
+class InvTrans16x16DCT
+ : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Idct16x16Param> {
+ public:
+ virtual ~InvTrans16x16DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
+ pitch_ = 16;
+ mask_ = (1 << bit_depth_) - 1;
+}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans16x16DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3)));
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c,
- &vp9_idct16x16_256_add_neon, 0)));
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_sse2, 0)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
+ VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
+ VPX_BITS_8)));
+// Optimizations take effect at a threshold of 3155, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans16x16DCT,
+ ::testing::Values(
+ make_tuple(&idct16x16_10_add_10_c,
+ &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10,
+ &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10_add_12_c,
+ &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+ make_tuple(&idct16x16_12,
+ &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
+ VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
in[j] = src[j] - dst[j];
coeff[j] = round(out_r[j]);
REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
|
DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif // CONFIG_VP9_HIGHBITDEPTH
if (bit_depth_ == VPX_BITS_8) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
in[j] = src[j] - dst[j];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
in[j] = src16[j] - dst16[j];
#endif // CONFIG_VP9_HIGHBITDEPTH
}
coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
16));
#endif // CONFIG_VP9_HIGHBITDEPTH
}
#if CONFIG_VP9_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
#endif // CONFIG_VP9_HIGHBITDEPTH
|
150,840 |
void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
|
@@ -13,18 +13,18 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
@@ -258,32 +258,100 @@
}
}
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_16x16_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_16x16_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
+ Idct16x16Param;
-void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct16x16_c(in, out, stride);
+void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int /*tx_type*/) {
+ vpx_fdct16x16_c(in, out, stride);
}
-void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int /*tx_type*/) {
+ vpx_idct16x16_256_add_c(in, dest, stride);
+}
+
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
+void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int tx_type) {
+ vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
+}
+
+void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
+}
+
+void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_10(in, out, stride);
+}
+
+void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_12(in, out, stride);
+}
+
+void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+}
+
+void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans16x16TestBase {
public:
virtual ~Trans16x16TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -291,24 +359,49 @@
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -316,27 +409,27 @@
}
}
- EXPECT_GE(1u, max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
- EXPECT_GE(count_test_block , total_error)
+ EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -347,63 +440,148 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
+ void RunQuantCheck(int dc_thred, int ac_thred) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 100000;
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = mask_;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -mask_;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+
+ // clear reconstructed pixel buffers
+ memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+#if CONFIG_VP9_HIGHBITDEPTH
+ memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+#endif
+
+ // quantization with maximum allowed step sizes
+ output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
+ for (int j = 1; j < kNumCoeffs; ++j)
+ output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
+ if (bit_depth_ == VPX_BITS_8) {
+ inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
+ tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref[j], dst[j]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref16[j], dst16[j]);
+#endif
+ }
+ }
+ }
+
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
- coeff[j] = round(out_r[j]);
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ 16));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
@@ -411,14 +589,75 @@
}
}
}
+
+ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 10;
+ const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ } else {
+#if CONFIG_VP9_HIGHBITDEPTH
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 16x16 IDCT Comparison has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ FhtFunc fwd_txfm_ref;
+ IhtFunc inv_txfm_ref;
};
class Trans16x16DCT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<dct_16x16_param_t> {
+ public ::testing::TestWithParam<Dct16x16Param> {
public:
virtual ~Trans16x16DCT() {}
@@ -426,21 +665,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
+ inv_txfm_ref = idct16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = idct16x16_10_ref;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = idct16x16_12_ref;
+ break;
+ default:
+ inv_txfm_ref = idct16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = idct16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans16x16DCT, AccuracyCheck) {
@@ -455,13 +712,19 @@
RunMemCheck();
}
+TEST_P(Trans16x16DCT, QuantCheck) {
+ // Use maximally allowed quantization step sizes for DC and AC
+ // coefficients respectively.
+ RunQuantCheck(1336, 1828);
+}
+
TEST_P(Trans16x16DCT, InvAccuracyCheck) {
RunInvAccuracyCheck();
}
class Trans16x16HT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<ht_16x16_param_t> {
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~Trans16x16HT() {}
@@ -469,21 +732,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
+ inv_txfm_ref = iht16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = iht16x16_10;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = iht16x16_12;
+ break;
+ default:
+ inv_txfm_ref = iht16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = iht16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans16x16HT, AccuracyCheck) {
@@ -498,40 +779,162 @@
RunMemCheck();
}
+TEST_P(Trans16x16HT, QuantCheck) {
+ // The encoder skips any non-DC intra prediction modes,
+ // when the quantization step size goes beyond 988.
+ RunQuantCheck(429, 729);
+}
+
+class InvTrans16x16DCT
+ : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Idct16x16Param> {
+ public:
+ virtual ~InvTrans16x16DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
+ pitch_ = 16;
+ mask_ = (1 << bit_depth_) - 1;
+}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans16x16DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3)));
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c,
- &vp9_idct16x16_256_add_neon, 0)));
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_sse2, 0)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
+ VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
+ VPX_BITS_8)));
+// Optimizations take effect at a threshold of 3155, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans16x16DCT,
+ ::testing::Values(
+ make_tuple(&idct16x16_10_add_10_c,
+ &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10,
+ &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10_add_12_c,
+ &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+ make_tuple(&idct16x16_12,
+ &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
+ VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
|
150,841 |
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
inv_txfm_ref = iht16x16_ref;
mask_ = (1 << bit_depth_) - 1;
#if CONFIG_VP9_HIGHBITDEPTH
switch (bit_depth_) {
case VPX_BITS_10:
inv_txfm_ref = iht16x16_10;
break;
case VPX_BITS_12:
inv_txfm_ref = iht16x16_12;
break;
default:
inv_txfm_ref = iht16x16_ref;
break;
}
#else
inv_txfm_ref = iht16x16_ref;
#endif
}
|
@@ -13,18 +13,18 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
@@ -258,32 +258,100 @@
}
}
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_16x16_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_16x16_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
+ Idct16x16Param;
-void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct16x16_c(in, out, stride);
+void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int /*tx_type*/) {
+ vpx_fdct16x16_c(in, out, stride);
}
-void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int /*tx_type*/) {
+ vpx_idct16x16_256_add_c(in, dest, stride);
+}
+
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
+void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int tx_type) {
+ vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
+}
+
+void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
+}
+
+void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_10(in, out, stride);
+}
+
+void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_12(in, out, stride);
+}
+
+void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+}
+
+void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans16x16TestBase {
public:
virtual ~Trans16x16TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -291,24 +359,49 @@
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -316,27 +409,27 @@
}
}
- EXPECT_GE(1u, max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
- EXPECT_GE(count_test_block , total_error)
+ EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -347,63 +440,148 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
+ void RunQuantCheck(int dc_thred, int ac_thred) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 100000;
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = mask_;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -mask_;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+
+ // clear reconstructed pixel buffers
+ memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+#if CONFIG_VP9_HIGHBITDEPTH
+ memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+#endif
+
+ // quantization with maximum allowed step sizes
+ output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
+ for (int j = 1; j < kNumCoeffs; ++j)
+ output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
+ if (bit_depth_ == VPX_BITS_8) {
+ inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
+ tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref[j], dst[j]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref16[j], dst16[j]);
+#endif
+ }
+ }
+ }
+
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
- coeff[j] = round(out_r[j]);
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ 16));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
@@ -411,14 +589,75 @@
}
}
}
+
+ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 10;
+ const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ } else {
+#if CONFIG_VP9_HIGHBITDEPTH
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 16x16 IDCT Comparison has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ FhtFunc fwd_txfm_ref;
+ IhtFunc inv_txfm_ref;
};
class Trans16x16DCT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<dct_16x16_param_t> {
+ public ::testing::TestWithParam<Dct16x16Param> {
public:
virtual ~Trans16x16DCT() {}
@@ -426,21 +665,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
+ inv_txfm_ref = idct16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = idct16x16_10_ref;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = idct16x16_12_ref;
+ break;
+ default:
+ inv_txfm_ref = idct16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = idct16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans16x16DCT, AccuracyCheck) {
@@ -455,13 +712,19 @@
RunMemCheck();
}
+TEST_P(Trans16x16DCT, QuantCheck) {
+ // Use maximally allowed quantization step sizes for DC and AC
+ // coefficients respectively.
+ RunQuantCheck(1336, 1828);
+}
+
TEST_P(Trans16x16DCT, InvAccuracyCheck) {
RunInvAccuracyCheck();
}
class Trans16x16HT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<ht_16x16_param_t> {
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~Trans16x16HT() {}
@@ -469,21 +732,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
+ inv_txfm_ref = iht16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = iht16x16_10;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = iht16x16_12;
+ break;
+ default:
+ inv_txfm_ref = iht16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = iht16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans16x16HT, AccuracyCheck) {
@@ -498,40 +779,162 @@
RunMemCheck();
}
+TEST_P(Trans16x16HT, QuantCheck) {
+ // The encoder skips any non-DC intra prediction modes,
+ // when the quantization step size goes beyond 988.
+ RunQuantCheck(429, 729);
+}
+
+class InvTrans16x16DCT
+ : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Idct16x16Param> {
+ public:
+ virtual ~InvTrans16x16DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
+ pitch_ = 16;
+ mask_ = (1 << bit_depth_) - 1;
+}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans16x16DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3)));
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c,
- &vp9_idct16x16_256_add_neon, 0)));
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_sse2, 0)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
+ VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
+ VPX_BITS_8)));
+// Optimizations take effect at a threshold of 3155, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans16x16DCT,
+ ::testing::Values(
+ make_tuple(&idct16x16_10_add_10_c,
+ &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10,
+ &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10_add_12_c,
+ &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+ make_tuple(&idct16x16_12,
+ &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
+ VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
bit_depth_ = GET_PARAM(3);
inv_txfm_ref = iht16x16_ref;
mask_ = (1 << bit_depth_) - 1;
#if CONFIG_VP9_HIGHBITDEPTH
switch (bit_depth_) {
case VPX_BITS_10:
inv_txfm_ref = iht16x16_10;
break;
case VPX_BITS_12:
inv_txfm_ref = iht16x16_12;
break;
default:
inv_txfm_ref = iht16x16_ref;
break;
}
#else
inv_txfm_ref = iht16x16_ref;
#endif
|
150,842 |
void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
vp9_fdct16x16_c(in, out, stride);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
int /*tx_type*/) {
vpx_fdct16x16_c(in, out, stride);
}
|
@@ -13,18 +13,18 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
@@ -258,32 +258,100 @@
}
}
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_16x16_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_16x16_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
+ Idct16x16Param;
-void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct16x16_c(in, out, stride);
+void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int /*tx_type*/) {
+ vpx_fdct16x16_c(in, out, stride);
}
-void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int /*tx_type*/) {
+ vpx_idct16x16_256_add_c(in, dest, stride);
+}
+
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
+void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int tx_type) {
+ vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
+}
+
+void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
+}
+
+void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_10(in, out, stride);
+}
+
+void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_12(in, out, stride);
+}
+
+void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+}
+
+void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans16x16TestBase {
public:
virtual ~Trans16x16TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -291,24 +359,49 @@
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -316,27 +409,27 @@
}
}
- EXPECT_GE(1u, max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
- EXPECT_GE(count_test_block , total_error)
+ EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -347,63 +440,148 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
+ void RunQuantCheck(int dc_thred, int ac_thred) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 100000;
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = mask_;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -mask_;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+
+ // clear reconstructed pixel buffers
+ memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+#if CONFIG_VP9_HIGHBITDEPTH
+ memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+#endif
+
+ // quantization with maximum allowed step sizes
+ output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
+ for (int j = 1; j < kNumCoeffs; ++j)
+ output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
+ if (bit_depth_ == VPX_BITS_8) {
+ inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
+ tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref[j], dst[j]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref16[j], dst16[j]);
+#endif
+ }
+ }
+ }
+
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
- coeff[j] = round(out_r[j]);
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ 16));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
@@ -411,14 +589,75 @@
}
}
}
+
+ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 10;
+ const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ } else {
+#if CONFIG_VP9_HIGHBITDEPTH
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 16x16 IDCT Comparison has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ FhtFunc fwd_txfm_ref;
+ IhtFunc inv_txfm_ref;
};
class Trans16x16DCT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<dct_16x16_param_t> {
+ public ::testing::TestWithParam<Dct16x16Param> {
public:
virtual ~Trans16x16DCT() {}
@@ -426,21 +665,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
+ inv_txfm_ref = idct16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = idct16x16_10_ref;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = idct16x16_12_ref;
+ break;
+ default:
+ inv_txfm_ref = idct16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = idct16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans16x16DCT, AccuracyCheck) {
@@ -455,13 +712,19 @@
RunMemCheck();
}
+TEST_P(Trans16x16DCT, QuantCheck) {
+ // Use maximally allowed quantization step sizes for DC and AC
+ // coefficients respectively.
+ RunQuantCheck(1336, 1828);
+}
+
TEST_P(Trans16x16DCT, InvAccuracyCheck) {
RunInvAccuracyCheck();
}
class Trans16x16HT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<ht_16x16_param_t> {
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~Trans16x16HT() {}
@@ -469,21 +732,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
+ inv_txfm_ref = iht16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = iht16x16_10;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = iht16x16_12;
+ break;
+ default:
+ inv_txfm_ref = iht16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = iht16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans16x16HT, AccuracyCheck) {
@@ -498,40 +779,162 @@
RunMemCheck();
}
+TEST_P(Trans16x16HT, QuantCheck) {
+ // The encoder skips any non-DC intra prediction modes,
+ // when the quantization step size goes beyond 988.
+ RunQuantCheck(429, 729);
+}
+
+class InvTrans16x16DCT
+ : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Idct16x16Param> {
+ public:
+ virtual ~InvTrans16x16DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
+ pitch_ = 16;
+ mask_ = (1 << bit_depth_) - 1;
+}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans16x16DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3)));
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c,
- &vp9_idct16x16_256_add_neon, 0)));
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_sse2, 0)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
+ VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
+ VPX_BITS_8)));
+// Optimizations take effect at a threshold of 3155, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans16x16DCT,
+ ::testing::Values(
+ make_tuple(&idct16x16_10_add_10_c,
+ &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10,
+ &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10_add_12_c,
+ &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+ make_tuple(&idct16x16_12,
+ &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
+ VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
vp9_fdct16x16_c(in, out, stride);
|
void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
int /*tx_type*/) {
vpx_fdct16x16_c(in, out, stride);
|
150,843 |
void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
int /*tx_type*/) {
vpx_idct16x16_256_add_c(in, dest, stride);
}
void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
|
@@ -13,18 +13,18 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
@@ -258,32 +258,100 @@
}
}
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_16x16_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_16x16_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
+ Idct16x16Param;
-void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct16x16_c(in, out, stride);
+void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int /*tx_type*/) {
+ vpx_fdct16x16_c(in, out, stride);
}
-void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int /*tx_type*/) {
+ vpx_idct16x16_256_add_c(in, dest, stride);
+}
+
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
+void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
+ int tx_type) {
+ vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
+}
+
+void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
+}
+
+void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_10(in, out, stride);
+}
+
+void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type) {
+ idct16x16_12(in, out, stride);
+}
+
+void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+}
+
+void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+}
+
+void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans16x16TestBase {
public:
virtual ~Trans16x16TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -291,24 +359,49 @@
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -316,27 +409,27 @@
}
}
- EXPECT_GE(1u, max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
- EXPECT_GE(count_test_block , total_error)
+ EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -347,63 +440,148 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
+ void RunQuantCheck(int dc_thred, int ac_thred) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 100000;
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = mask_;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -mask_;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+
+ // clear reconstructed pixel buffers
+ memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+#if CONFIG_VP9_HIGHBITDEPTH
+ memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+#endif
+
+ // quantization with maximum allowed step sizes
+ output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
+ for (int j = 1; j < kNumCoeffs; ++j)
+ output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
+ if (bit_depth_ == VPX_BITS_8) {
+ inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
+ tx_type_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref[j], dst[j]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref16[j], dst16[j]);
+#endif
+ }
+ }
+ }
+
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
reference_16x16_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
- coeff[j] = round(out_r[j]);
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ 16));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
EXPECT_GE(1u, error)
<< "Error: 16x16 IDCT has error " << error
@@ -411,14 +589,75 @@
}
}
}
+
+ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 10;
+ const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ } else {
+#if CONFIG_VP9_HIGHBITDEPTH
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 16x16 IDCT Comparison has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ FhtFunc fwd_txfm_ref;
+ IhtFunc inv_txfm_ref;
};
class Trans16x16DCT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<dct_16x16_param_t> {
+ public ::testing::TestWithParam<Dct16x16Param> {
public:
virtual ~Trans16x16DCT() {}
@@ -426,21 +665,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
+ inv_txfm_ref = idct16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = idct16x16_10_ref;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = idct16x16_12_ref;
+ break;
+ default:
+ inv_txfm_ref = idct16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = idct16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans16x16DCT, AccuracyCheck) {
@@ -455,13 +712,19 @@
RunMemCheck();
}
+TEST_P(Trans16x16DCT, QuantCheck) {
+ // Use maximally allowed quantization step sizes for DC and AC
+ // coefficients respectively.
+ RunQuantCheck(1336, 1828);
+}
+
TEST_P(Trans16x16DCT, InvAccuracyCheck) {
RunInvAccuracyCheck();
}
class Trans16x16HT
: public Trans16x16TestBase,
- public ::testing::TestWithParam<ht_16x16_param_t> {
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~Trans16x16HT() {}
@@ -469,21 +732,39 @@
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
+ inv_txfm_ref = iht16x16_ref;
+ mask_ = (1 << bit_depth_) - 1;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth_) {
+ case VPX_BITS_10:
+ inv_txfm_ref = iht16x16_10;
+ break;
+ case VPX_BITS_12:
+ inv_txfm_ref = iht16x16_12;
+ break;
+ default:
+ inv_txfm_ref = iht16x16_ref;
+ break;
+ }
+#else
+ inv_txfm_ref = iht16x16_ref;
+#endif
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans16x16HT, AccuracyCheck) {
@@ -498,40 +779,162 @@
RunMemCheck();
}
+TEST_P(Trans16x16HT, QuantCheck) {
+ // The encoder skips any non-DC intra prediction modes,
+ // when the quantization step size goes beyond 988.
+ RunQuantCheck(429, 729);
+}
+
+class InvTrans16x16DCT
+ : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Idct16x16Param> {
+ public:
+ virtual ~InvTrans16x16DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(3);
+ pitch_ = 16;
+ mask_ = (1 << bit_depth_) - 1;
+}
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans16x16DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2),
- make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3)));
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c,
- &vp9_idct16x16_256_add_neon, 0)));
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_sse2, 0)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2),
- make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
+ VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
+ VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
+ &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct16x16_c,
+ &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
+ VPX_BITS_8)));
+// Optimizations take effect at a threshold of 3155, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans16x16DCT,
+ ::testing::Values(
+ make_tuple(&idct16x16_10_add_10_c,
+ &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10,
+ &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10_add_12_c,
+ &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+ make_tuple(&idct16x16_12,
+ &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
+ VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
int /*tx_type*/) {
vpx_idct16x16_256_add_c(in, dest, stride);
}
void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
int tx_type) {
|
150,844 |
void reference_32x32_dct_2d(const int16_t input[kNumCoeffs],
double output[kNumCoeffs]) {
for (int i = 0; i < 32; ++i) {
double temp_in[32], temp_out[32];
for (int j = 0; j < 32; ++j)
temp_in[j] = input[j*32 + i];
reference_32x32_dct_1d(temp_in, temp_out, 1);
for (int j = 0; j < 32; ++j)
output[j * 32 + i] = temp_out[j];
}
for (int i = 0; i < 32; ++i) {
double temp_in[32], temp_out[32];
for (int j = 0; j < 32; ++j)
temp_in[j] = output[j + i*32];
reference_32x32_dct_1d(temp_in, temp_out, 1);
for (int j = 0; j < 32; ++j)
output[j + i * 32] = temp_out[j] / 4;
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void reference_32x32_dct_2d(const int16_t input[kNumCoeffs],
double output[kNumCoeffs]) {
for (int i = 0; i < 32; ++i) {
double temp_in[32], temp_out[32];
for (int j = 0; j < 32; ++j)
temp_in[j] = input[j*32 + i];
reference_32x32_dct_1d(temp_in, temp_out);
for (int j = 0; j < 32; ++j)
output[j * 32 + i] = temp_out[j];
}
for (int i = 0; i < 32; ++i) {
double temp_in[32], temp_out[32];
for (int j = 0; j < 32; ++j)
temp_in[j] = output[j + i*32];
reference_32x32_dct_1d(temp_in, temp_out);
for (int j = 0; j < 32; ++j)
output[j + i * 32] = temp_out[j] / 4;
}
}
|
@@ -13,15 +13,18 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vpx_config.h"
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
@@ -37,7 +40,7 @@
const int kNumCoeffs = 1024;
const double kPi = 3.141592653589793238462643383279502884;
-void reference_32x32_dct_1d(const double in[32], double out[32], int stride) {
+void reference_32x32_dct_1d(const double in[32], double out[32]) {
const double kInvSqrt2 = 0.707106781186547524400844362104;
for (int k = 0; k < 32; k++) {
out[k] = 0.0;
@@ -55,7 +58,7 @@
double temp_in[32], temp_out[32];
for (int j = 0; j < 32; ++j)
temp_in[j] = input[j*32 + i];
- reference_32x32_dct_1d(temp_in, temp_out, 1);
+ reference_32x32_dct_1d(temp_in, temp_out);
for (int j = 0; j < 32; ++j)
output[j * 32 + i] = temp_out[j];
}
@@ -64,19 +67,34 @@
double temp_in[32], temp_out[32];
for (int j = 0; j < 32; ++j)
temp_in[j] = output[j + i*32];
- reference_32x32_dct_1d(temp_in, temp_out, 1);
+ reference_32x32_dct_1d(temp_in, temp_out);
// Scale by some magic number
for (int j = 0; j < 32; ++j)
output[j + i * 32] = temp_out[j] / 4;
}
}
-typedef void (*fwd_txfm_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*inv_txfm_t)(const int16_t *in, uint8_t *out, int stride);
+typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
-typedef std::tr1::tuple<fwd_txfm_t, inv_txfm_t, int> trans_32x32_param_t;
+typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, int, vpx_bit_depth_t>
+ Trans32x32Param;
-class Trans32x32Test : public ::testing::TestWithParam<trans_32x32_param_t> {
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct32x32_8(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct32x32_1024_add_c(in, out, stride, 8);
+}
+
+void idct32x32_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct32x32_1024_add_c(in, out, stride, 10);
+}
+
+void idct32x32_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct32x32_1024_add_c(in, out, stride, 12);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+class Trans32x32Test : public ::testing::TestWithParam<Trans32x32Param> {
public:
virtual ~Trans32x32Test() {}
virtual void SetUp() {
@@ -84,39 +102,67 @@
inv_txfm_ = GET_PARAM(1);
version_ = GET_PARAM(2); // 0: high precision forward transform
// 1: low precision version for rd loop
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
int version_;
- fwd_txfm_t fwd_txfm_;
- inv_txfm_t inv_txfm_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ FwdTxfmFunc fwd_txfm_;
+ InvTxfmFunc inv_txfm_;
};
TEST_P(Trans32x32Test, AccuracyCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
- const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ const int count_test_block = 10000;
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(fwd_txfm_(test_input_block, test_temp_block, 32));
- REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, dst, 32));
+ ASM_REGISTER_STATE_CHECK(fwd_txfm_(test_input_block, test_temp_block, 32));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, dst, 32));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), 32));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -129,10 +175,10 @@
total_error /= 45;
}
- EXPECT_GE(1u, max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 32x32 FDCT/IDCT has an individual round-trip error > 1";
- EXPECT_GE(count_test_block, total_error)
+ EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
<< "Error: 32x32 FDCT/IDCT has average round-trip error > 1 per block";
}
@@ -140,17 +186,17 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
const int stride = 32;
- vp9_fdct32x32_c(input_block, output_ref_block, stride);
- REGISTER_STATE_CHECK(fwd_txfm_(input_block, output_block, stride));
+ vpx_fdct32x32_c(input_block, output_ref_block, stride);
+ ASM_REGISTER_STATE_CHECK(fwd_txfm_(input_block, output_block, stride));
if (version_ == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
@@ -168,27 +214,27 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 2000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() & 1 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() & 1 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
const int stride = 32;
- vp9_fdct32x32_c(input_extreme_block, output_ref_block, stride);
- REGISTER_STATE_CHECK(fwd_txfm_(input_extreme_block, output_block, stride));
+ vpx_fdct32x32_c(input_extreme_block, output_ref_block, stride);
+ ASM_REGISTER_STATE_CHECK(
+ fwd_txfm_(input_extreme_block, output_block, stride));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
@@ -199,9 +245,9 @@
EXPECT_GE(6, abs(output_block[j] - output_ref_block[j]))
<< "Error: 32x32 FDCT rd has mismatched coefficients";
}
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_ref_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_ref_block[j]))
<< "Error: 32x32 FDCT C has coefficient larger than 4*DCT_MAX_VALUE";
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 32x32 FDCT has coefficient larger than "
<< "4*DCT_MAX_VALUE";
}
@@ -211,27 +257,50 @@
TEST_P(Trans32x32Test, InverseAccuracy) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
double out_r[kNumCoeffs];
// Initialize a test block with input range [-255, 255]
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
reference_32x32_dct_2d(in, out_r);
for (int j = 0; j < kNumCoeffs; ++j)
- coeff[j] = round(out_r[j]);
- REGISTER_STATE_CHECK(inv_txfm_(coeff, dst, 32));
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, dst, 32));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, CONVERT_TO_BYTEPTR(dst16), 32));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
EXPECT_GE(1, error)
<< "Error: 32x32 IDCT has error " << error
@@ -242,39 +311,85 @@
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans32x32Test,
::testing::Values(
- make_tuple(&vp9_fdct32x32_c, &vp9_idct32x32_1024_add_c, 0),
- make_tuple(&vp9_fdct32x32_rd_c, &vp9_idct32x32_1024_add_c, 1)));
+ make_tuple(&vpx_highbd_fdct32x32_c,
+ &idct32x32_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct32x32_rd_c,
+ &idct32x32_10, 1, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct32x32_c,
+ &idct32x32_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct32x32_rd_c,
+ &idct32x32_12, 1, VPX_BITS_12),
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_c,
+ &vpx_idct32x32_1024_add_c, 1, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans32x32Test,
+ ::testing::Values(
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_c,
+ &vpx_idct32x32_1024_add_c, 1, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans32x32Test,
::testing::Values(
- make_tuple(&vp9_fdct32x32_c,
- &vp9_idct32x32_1024_add_neon, 0),
- make_tuple(&vp9_fdct32x32_rd_c,
- &vp9_idct32x32_1024_add_neon, 1)));
-#endif
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_c,
+ &vpx_idct32x32_1024_add_neon, 1, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans32x32Test,
::testing::Values(
- make_tuple(&vp9_fdct32x32_sse2,
- &vp9_idct32x32_1024_add_sse2, 0),
- make_tuple(&vp9_fdct32x32_rd_sse2,
- &vp9_idct32x32_1024_add_sse2, 1)));
-#endif
+ make_tuple(&vpx_fdct32x32_sse2,
+ &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_sse2,
+ &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_AVX2
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans32x32Test,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct32x32_sse2, &idct32x32_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct32x32_rd_sse2, &idct32x32_10, 1,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct32x32_sse2, &idct32x32_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct32x32_rd_sse2, &idct32x32_12, 1,
+ VPX_BITS_12),
+ make_tuple(&vpx_fdct32x32_sse2, &vpx_idct32x32_1024_add_c, 0,
+ VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_sse2, &vpx_idct32x32_1024_add_c, 1,
+ VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_AVX2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
AVX2, Trans32x32Test,
::testing::Values(
- make_tuple(&vp9_fdct32x32_avx2,
- &vp9_idct32x32_1024_add_sse2, 0),
- make_tuple(&vp9_fdct32x32_rd_avx2,
- &vp9_idct32x32_1024_add_sse2, 1)));
-#endif
+ make_tuple(&vpx_fdct32x32_avx2,
+ &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_avx2,
+ &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
+#endif // HAVE_AVX2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans32x32Test,
+ ::testing::Values(
+ make_tuple(&vpx_fdct32x32_msa,
+ &vpx_idct32x32_1024_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_msa,
+ &vpx_idct32x32_1024_add_msa, 1, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
reference_32x32_dct_1d(temp_in, temp_out, 1);
reference_32x32_dct_1d(temp_in, temp_out, 1);
|
reference_32x32_dct_1d(temp_in, temp_out);
reference_32x32_dct_1d(temp_in, temp_out);
|
150,845 |
vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
vpx_codec_err_t res_dec;
InitOnce();
REGISTER_STATE_CHECK(
res_dec = vpx_codec_decode(&decoder_,
cxdata, static_cast<unsigned int>(size),
NULL, 0));
return res_dec;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
return DecodeFrame(cxdata, size, NULL);
}
vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size,
void *user_priv) {
vpx_codec_err_t res_dec;
InitOnce();
API_REGISTER_STATE_CHECK(
res_dec = vpx_codec_decode(&decoder_,
cxdata, static_cast<unsigned int>(size),
user_priv, 0));
return res_dec;
}
|
@@ -7,35 +7,95 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
#include "test/register_state_check.h"
#include "test/video_source.h"
namespace libvpx_test {
+const char kVP8Name[] = "WebM Project VP8";
+
+vpx_codec_err_t Decoder::PeekStream(const uint8_t *cxdata, size_t size,
+ vpx_codec_stream_info_t *stream_info) {
+ return vpx_codec_peek_stream_info(CodecInterface(),
+ cxdata, static_cast<unsigned int>(size),
+ stream_info);
+}
+
vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
+ return DecodeFrame(cxdata, size, NULL);
+}
+
+vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size,
+ void *user_priv) {
vpx_codec_err_t res_dec;
InitOnce();
- REGISTER_STATE_CHECK(
+ API_REGISTER_STATE_CHECK(
res_dec = vpx_codec_decode(&decoder_,
cxdata, static_cast<unsigned int>(size),
- NULL, 0));
+ user_priv, 0));
return res_dec;
}
-void DecoderTest::RunLoop(CompressedVideoSource *video) {
- vpx_codec_dec_cfg_t dec_cfg = {0};
- Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
+bool Decoder::IsVP8() const {
+ const char *codec_name = GetDecoderName();
+ return strncmp(kVP8Name, codec_name, sizeof(kVP8Name) - 1) == 0;
+}
+
+void DecoderTest::HandlePeekResult(Decoder *const decoder,
+ CompressedVideoSource *video,
+ const vpx_codec_err_t res_peek) {
+ const bool is_vp8 = decoder->IsVP8();
+ if (is_vp8) {
+ /* Vp8's implementation of PeekStream returns an error if the frame you
+ * pass it is not a keyframe, so we only expect VPX_CODEC_OK on the first
+ * frame, which must be a keyframe. */
+ if (video->frame_number() == 0)
+ ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
+ << vpx_codec_err_to_string(res_peek);
+ } else {
+ /* The Vp9 implementation of PeekStream returns an error only if the
+ * data passed to it isn't a valid Vp9 chunk. */
+ ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
+ << vpx_codec_err_to_string(res_peek);
+ }
+}
+
+void DecoderTest::RunLoop(CompressedVideoSource *video,
+ const vpx_codec_dec_cfg_t &dec_cfg) {
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
ASSERT_TRUE(decoder != NULL);
+ bool end_of_file = false;
// Decode frames.
- for (video->Begin(); video->cxdata(); video->Next()) {
+ for (video->Begin(); !::testing::Test::HasFailure() && !end_of_file;
+ video->Next()) {
PreDecodeFrameHook(*video, decoder);
- vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
- video->frame_size());
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+
+ vpx_codec_stream_info_t stream_info;
+ stream_info.sz = sizeof(stream_info);
+
+ if (video->cxdata() != NULL) {
+ const vpx_codec_err_t res_peek = decoder->PeekStream(video->cxdata(),
+ video->frame_size(),
+ &stream_info);
+ HandlePeekResult(decoder, video, res_peek);
+ ASSERT_FALSE(::testing::Test::HasFailure());
+
+ vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
+ video->frame_size());
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+ } else {
+ // Signal end of the file to the decoder.
+ const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+ end_of_file = true;
+ }
DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img = NULL;
@@ -44,7 +104,20 @@
while ((img = dec_iter.Next()))
DecompressedFrameHook(*img, video->frame_number());
}
-
delete decoder;
}
+
+void DecoderTest::RunLoop(CompressedVideoSource *video) {
+ vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
+ RunLoop(video, dec_cfg);
+}
+
+void DecoderTest::set_cfg(const vpx_codec_dec_cfg_t &dec_cfg) {
+ memcpy(&cfg_, &dec_cfg, sizeof(cfg_));
+}
+
+void DecoderTest::set_flags(const vpx_codec_flags_t flags) {
+ flags_ = flags;
+}
+
} // namespace libvpx_test
|
CWE-119
|
REGISTER_STATE_CHECK(
NULL, 0));
|
return DecodeFrame(cxdata, size, NULL);
}
vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size,
void *user_priv) {
API_REGISTER_STATE_CHECK(
user_priv, 0));
|
150,846 |
void DecoderTest::RunLoop(CompressedVideoSource *video) {
vpx_codec_dec_cfg_t dec_cfg = {0};
Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
ASSERT_TRUE(decoder != NULL);
for (video->Begin(); video->cxdata(); video->Next()) {
PreDecodeFrameHook(*video, decoder);
vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
video->frame_size());
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img = NULL;
while ((img = dec_iter.Next()))
DecompressedFrameHook(*img, video->frame_number());
}
delete decoder;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void DecoderTest::RunLoop(CompressedVideoSource *video) {
bool Decoder::IsVP8() const {
const char *codec_name = GetDecoderName();
return strncmp(kVP8Name, codec_name, sizeof(kVP8Name) - 1) == 0;
}
void DecoderTest::HandlePeekResult(Decoder *const decoder,
CompressedVideoSource *video,
const vpx_codec_err_t res_peek) {
const bool is_vp8 = decoder->IsVP8();
if (is_vp8) {
/* Vp8's implementation of PeekStream returns an error if the frame you
* pass it is not a keyframe, so we only expect VPX_CODEC_OK on the first
* frame, which must be a keyframe. */
if (video->frame_number() == 0)
ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
<< vpx_codec_err_to_string(res_peek);
} else {
/* The Vp9 implementation of PeekStream returns an error only if the
* data passed to it isn't a valid Vp9 chunk. */
ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
<< vpx_codec_err_to_string(res_peek);
}
}
void DecoderTest::RunLoop(CompressedVideoSource *video,
const vpx_codec_dec_cfg_t &dec_cfg) {
Decoder* const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
ASSERT_TRUE(decoder != NULL);
bool end_of_file = false;
for (video->Begin(); !::testing::Test::HasFailure() && !end_of_file;
video->Next()) {
PreDecodeFrameHook(*video, decoder);
vpx_codec_stream_info_t stream_info;
stream_info.sz = sizeof(stream_info);
if (video->cxdata() != NULL) {
const vpx_codec_err_t res_peek = decoder->PeekStream(video->cxdata(),
video->frame_size(),
&stream_info);
HandlePeekResult(decoder, video, res_peek);
ASSERT_FALSE(::testing::Test::HasFailure());
vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
video->frame_size());
if (!HandleDecodeResult(res_dec, *video, decoder))
break;
} else {
// Signal end of the file to the decoder.
const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
end_of_file = true;
}
DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img = NULL;
while ((img = dec_iter.Next()))
DecompressedFrameHook(*img, video->frame_number());
}
delete decoder;
}
|
@@ -7,35 +7,95 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
#include "test/register_state_check.h"
#include "test/video_source.h"
namespace libvpx_test {
+const char kVP8Name[] = "WebM Project VP8";
+
+vpx_codec_err_t Decoder::PeekStream(const uint8_t *cxdata, size_t size,
+ vpx_codec_stream_info_t *stream_info) {
+ return vpx_codec_peek_stream_info(CodecInterface(),
+ cxdata, static_cast<unsigned int>(size),
+ stream_info);
+}
+
vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
+ return DecodeFrame(cxdata, size, NULL);
+}
+
+vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size,
+ void *user_priv) {
vpx_codec_err_t res_dec;
InitOnce();
- REGISTER_STATE_CHECK(
+ API_REGISTER_STATE_CHECK(
res_dec = vpx_codec_decode(&decoder_,
cxdata, static_cast<unsigned int>(size),
- NULL, 0));
+ user_priv, 0));
return res_dec;
}
-void DecoderTest::RunLoop(CompressedVideoSource *video) {
- vpx_codec_dec_cfg_t dec_cfg = {0};
- Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
+bool Decoder::IsVP8() const {
+ const char *codec_name = GetDecoderName();
+ return strncmp(kVP8Name, codec_name, sizeof(kVP8Name) - 1) == 0;
+}
+
+void DecoderTest::HandlePeekResult(Decoder *const decoder,
+ CompressedVideoSource *video,
+ const vpx_codec_err_t res_peek) {
+ const bool is_vp8 = decoder->IsVP8();
+ if (is_vp8) {
+ /* Vp8's implementation of PeekStream returns an error if the frame you
+ * pass it is not a keyframe, so we only expect VPX_CODEC_OK on the first
+ * frame, which must be a keyframe. */
+ if (video->frame_number() == 0)
+ ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
+ << vpx_codec_err_to_string(res_peek);
+ } else {
+ /* The Vp9 implementation of PeekStream returns an error only if the
+ * data passed to it isn't a valid Vp9 chunk. */
+ ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
+ << vpx_codec_err_to_string(res_peek);
+ }
+}
+
+void DecoderTest::RunLoop(CompressedVideoSource *video,
+ const vpx_codec_dec_cfg_t &dec_cfg) {
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
ASSERT_TRUE(decoder != NULL);
+ bool end_of_file = false;
// Decode frames.
- for (video->Begin(); video->cxdata(); video->Next()) {
+ for (video->Begin(); !::testing::Test::HasFailure() && !end_of_file;
+ video->Next()) {
PreDecodeFrameHook(*video, decoder);
- vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
- video->frame_size());
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+
+ vpx_codec_stream_info_t stream_info;
+ stream_info.sz = sizeof(stream_info);
+
+ if (video->cxdata() != NULL) {
+ const vpx_codec_err_t res_peek = decoder->PeekStream(video->cxdata(),
+ video->frame_size(),
+ &stream_info);
+ HandlePeekResult(decoder, video, res_peek);
+ ASSERT_FALSE(::testing::Test::HasFailure());
+
+ vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
+ video->frame_size());
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+ } else {
+ // Signal end of the file to the decoder.
+ const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+ end_of_file = true;
+ }
DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img = NULL;
@@ -44,7 +104,20 @@
while ((img = dec_iter.Next()))
DecompressedFrameHook(*img, video->frame_number());
}
-
delete decoder;
}
+
+void DecoderTest::RunLoop(CompressedVideoSource *video) {
+ vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
+ RunLoop(video, dec_cfg);
+}
+
+void DecoderTest::set_cfg(const vpx_codec_dec_cfg_t &dec_cfg) {
+ memcpy(&cfg_, &dec_cfg, sizeof(cfg_));
+}
+
+void DecoderTest::set_flags(const vpx_codec_flags_t flags) {
+ flags_ = flags;
+}
+
} // namespace libvpx_test
|
CWE-119
|
vpx_codec_dec_cfg_t dec_cfg = {0};
Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
for (video->Begin(); video->cxdata(); video->Next()) {
vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
video->frame_size());
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
|
bool Decoder::IsVP8() const {
const char *codec_name = GetDecoderName();
return strncmp(kVP8Name, codec_name, sizeof(kVP8Name) - 1) == 0;
}
void DecoderTest::HandlePeekResult(Decoder *const decoder,
CompressedVideoSource *video,
const vpx_codec_err_t res_peek) {
const bool is_vp8 = decoder->IsVP8();
if (is_vp8) {
/* Vp8's implementation of PeekStream returns an error if the frame you
* pass it is not a keyframe, so we only expect VPX_CODEC_OK on the first
* frame, which must be a keyframe. */
if (video->frame_number() == 0)
ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
<< vpx_codec_err_to_string(res_peek);
} else {
/* The Vp9 implementation of PeekStream returns an error only if the
* data passed to it isn't a valid Vp9 chunk. */
ASSERT_EQ(VPX_CODEC_OK, res_peek) << "Peek return failed: "
<< vpx_codec_err_to_string(res_peek);
}
}
void DecoderTest::RunLoop(CompressedVideoSource *video,
const vpx_codec_dec_cfg_t &dec_cfg) {
Decoder* const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
bool end_of_file = false;
for (video->Begin(); !::testing::Test::HasFailure() && !end_of_file;
video->Next()) {
vpx_codec_stream_info_t stream_info;
stream_info.sz = sizeof(stream_info);
if (video->cxdata() != NULL) {
const vpx_codec_err_t res_peek = decoder->PeekStream(video->cxdata(),
video->frame_size(),
&stream_info);
HandlePeekResult(decoder, video, res_peek);
ASSERT_FALSE(::testing::Test::HasFailure());
vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
video->frame_size());
if (!HandleDecodeResult(res_dec, *video, decoder))
break;
} else {
// Signal end of the file to the decoder.
const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
end_of_file = true;
}
|
150,847 |
void Encoder::Flush() {
const vpx_codec_err_t res = vpx_codec_encode(&encoder_, NULL, 0, 0, 0,
deadline_);
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void Encoder::Flush() {
const vpx_codec_err_t res = vpx_codec_encode(&encoder_, NULL, 0, 0, 0,
deadline_);
if (!encoder_.priv)
ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError();
else
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
|
@@ -8,15 +8,59 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "test/codec_factory.h"
-#include "test/encode_test_driver.h"
-#include "test/decode_test_driver.h"
-#include "test/register_state_check.h"
-#include "test/video_source.h"
+#include <string>
+
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "test/codec_factory.h"
+#include "test/decode_test_driver.h"
+#include "test/encode_test_driver.h"
+#include "test/register_state_check.h"
+#include "test/video_source.h"
+
namespace libvpx_test {
+void Encoder::InitEncoder(VideoSource *video) {
+ vpx_codec_err_t res;
+ const vpx_image_t *img = video->img();
+
+ if (video->img() && !encoder_.priv) {
+ cfg_.g_w = img->d_w;
+ cfg_.g_h = img->d_h;
+ cfg_.g_timebase = video->timebase();
+ cfg_.rc_twopass_stats_in = stats_->buf();
+
+ res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
+ init_flags_);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+
+#if CONFIG_VP9_ENCODER
+ if (CodecInterface() == &vpx_codec_vp9_cx_algo) {
+ // Default to 1 tile column for VP9.
+ const int log2_tile_columns = 0;
+ res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
+ log2_tile_columns);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ } else
+#endif
+#if CONFIG_VP10_ENCODER
+ if (CodecInterface() == &vpx_codec_vp10_cx_algo) {
+ // Default to 1 tile column for VP10.
+ const int log2_tile_columns = 0;
+ res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
+ log2_tile_columns);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ } else
+#endif
+ {
+#if CONFIG_VP8_ENCODER
+ ASSERT_EQ(&vpx_codec_vp8_cx_algo, CodecInterface())
+ << "Unknown Codec Interface";
+#endif
+ }
+ }
+}
+
void Encoder::EncodeFrame(VideoSource *video, const unsigned long frame_flags) {
if (video->img())
EncodeFrameInternal(*video, frame_flags);
@@ -39,17 +83,6 @@
vpx_codec_err_t res;
const vpx_image_t *img = video.img();
- // Handle first frame initialization
- if (!encoder_.priv) {
- cfg_.g_w = img->d_w;
- cfg_.g_h = img->d_h;
- cfg_.g_timebase = video.timebase();
- cfg_.rc_twopass_stats_in = stats_->buf();
- res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
- init_flags_);
- ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
- }
-
// Handle frame resizing
if (cfg_.g_w != img->d_w || cfg_.g_h != img->d_h) {
cfg_.g_w = img->d_w;
@@ -59,9 +92,8 @@
}
// Encode the frame
- REGISTER_STATE_CHECK(
- res = vpx_codec_encode(&encoder_,
- video.img(), video.pts(), video.duration(),
+ API_REGISTER_STATE_CHECK(
+ res = vpx_codec_encode(&encoder_, img, video.pts(), video.duration(),
frame_flags, deadline_));
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
@@ -69,11 +101,15 @@
void Encoder::Flush() {
const vpx_codec_err_t res = vpx_codec_encode(&encoder_, NULL, 0, 0, 0,
deadline_);
- ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ if (!encoder_.priv)
+ ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError();
+ else
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
void EncoderTest::InitializeConfig() {
const vpx_codec_err_t res = codec_->DefaultEncoderConfig(&cfg_, 0);
+ dec_cfg_ = vpx_codec_dec_cfg_t();
ASSERT_EQ(VPX_CODEC_OK, res);
}
@@ -107,6 +143,7 @@
static bool compare_img(const vpx_image_t *img1,
const vpx_image_t *img2) {
bool match = (img1->fmt == img2->fmt) &&
+ (img1->cs == img2->cs) &&
(img1->d_w == img2->d_w) &&
(img1->d_h == img2->d_h);
@@ -130,13 +167,13 @@
return match;
}
-void EncoderTest::MismatchHook(const vpx_image_t *img1,
- const vpx_image_t *img2) {
+void EncoderTest::MismatchHook(const vpx_image_t* /*img1*/,
+ const vpx_image_t* /*img2*/) {
ASSERT_TRUE(0) << "Encode/Decode mismatch found";
}
void EncoderTest::RunLoop(VideoSource *video) {
- vpx_codec_dec_cfg_t dec_cfg = {0};
+ vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
stats_.Reset();
@@ -155,9 +192,18 @@
Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_,
&stats_);
ASSERT_TRUE(encoder != NULL);
- Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
+
+ video->Begin();
+ encoder->InitEncoder(video);
+
+ unsigned long dec_init_flags = 0; // NOLINT
+ // Use fragment decoder if encoder outputs partitions.
+ // NOTE: fragment decoder and partition encoder are only supported by VP8.
+ if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION)
+ dec_init_flags |= VPX_CODEC_USE_INPUT_FRAGMENTS;
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, dec_init_flags, 0);
bool again;
- for (again = true, video->Begin(); again; video->Next()) {
+ for (again = true; again; video->Next()) {
again = (video->img() != NULL);
PreEncodeFrameHook(video);
@@ -177,7 +223,10 @@
if (decoder && DoDecode()) {
vpx_codec_err_t res_dec = decoder->DecodeFrame(
(const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+
has_dxdata = true;
}
ASSERT_GE(pkt->data.frame.pts, last_pts_);
@@ -194,6 +243,13 @@
}
}
+ // Flush the decoder when there are no more fragments.
+ if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
+ const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+ }
+
if (has_dxdata && has_cxdata) {
const vpx_image_t *img_enc = encoder->GetPreviewFrame();
DxDataIterator dec_iter = decoder->GetDxData();
|
CWE-119
|
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
|
if (!encoder_.priv)
ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError();
else
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
|
150,848 |
void EncoderTest::MismatchHook(const vpx_image_t *img1,
const vpx_image_t *img2) {
ASSERT_TRUE(0) << "Encode/Decode mismatch found";
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void EncoderTest::MismatchHook(const vpx_image_t *img1,
void EncoderTest::MismatchHook(const vpx_image_t* /*img1*/,
const vpx_image_t* /*img2*/) {
ASSERT_TRUE(0) << "Encode/Decode mismatch found";
}
|
@@ -8,15 +8,59 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "test/codec_factory.h"
-#include "test/encode_test_driver.h"
-#include "test/decode_test_driver.h"
-#include "test/register_state_check.h"
-#include "test/video_source.h"
+#include <string>
+
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "test/codec_factory.h"
+#include "test/decode_test_driver.h"
+#include "test/encode_test_driver.h"
+#include "test/register_state_check.h"
+#include "test/video_source.h"
+
namespace libvpx_test {
+void Encoder::InitEncoder(VideoSource *video) {
+ vpx_codec_err_t res;
+ const vpx_image_t *img = video->img();
+
+ if (video->img() && !encoder_.priv) {
+ cfg_.g_w = img->d_w;
+ cfg_.g_h = img->d_h;
+ cfg_.g_timebase = video->timebase();
+ cfg_.rc_twopass_stats_in = stats_->buf();
+
+ res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
+ init_flags_);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+
+#if CONFIG_VP9_ENCODER
+ if (CodecInterface() == &vpx_codec_vp9_cx_algo) {
+ // Default to 1 tile column for VP9.
+ const int log2_tile_columns = 0;
+ res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
+ log2_tile_columns);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ } else
+#endif
+#if CONFIG_VP10_ENCODER
+ if (CodecInterface() == &vpx_codec_vp10_cx_algo) {
+ // Default to 1 tile column for VP10.
+ const int log2_tile_columns = 0;
+ res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
+ log2_tile_columns);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ } else
+#endif
+ {
+#if CONFIG_VP8_ENCODER
+ ASSERT_EQ(&vpx_codec_vp8_cx_algo, CodecInterface())
+ << "Unknown Codec Interface";
+#endif
+ }
+ }
+}
+
void Encoder::EncodeFrame(VideoSource *video, const unsigned long frame_flags) {
if (video->img())
EncodeFrameInternal(*video, frame_flags);
@@ -39,17 +83,6 @@
vpx_codec_err_t res;
const vpx_image_t *img = video.img();
- // Handle first frame initialization
- if (!encoder_.priv) {
- cfg_.g_w = img->d_w;
- cfg_.g_h = img->d_h;
- cfg_.g_timebase = video.timebase();
- cfg_.rc_twopass_stats_in = stats_->buf();
- res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
- init_flags_);
- ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
- }
-
// Handle frame resizing
if (cfg_.g_w != img->d_w || cfg_.g_h != img->d_h) {
cfg_.g_w = img->d_w;
@@ -59,9 +92,8 @@
}
// Encode the frame
- REGISTER_STATE_CHECK(
- res = vpx_codec_encode(&encoder_,
- video.img(), video.pts(), video.duration(),
+ API_REGISTER_STATE_CHECK(
+ res = vpx_codec_encode(&encoder_, img, video.pts(), video.duration(),
frame_flags, deadline_));
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
@@ -69,11 +101,15 @@
void Encoder::Flush() {
const vpx_codec_err_t res = vpx_codec_encode(&encoder_, NULL, 0, 0, 0,
deadline_);
- ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ if (!encoder_.priv)
+ ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError();
+ else
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
void EncoderTest::InitializeConfig() {
const vpx_codec_err_t res = codec_->DefaultEncoderConfig(&cfg_, 0);
+ dec_cfg_ = vpx_codec_dec_cfg_t();
ASSERT_EQ(VPX_CODEC_OK, res);
}
@@ -107,6 +143,7 @@
static bool compare_img(const vpx_image_t *img1,
const vpx_image_t *img2) {
bool match = (img1->fmt == img2->fmt) &&
+ (img1->cs == img2->cs) &&
(img1->d_w == img2->d_w) &&
(img1->d_h == img2->d_h);
@@ -130,13 +167,13 @@
return match;
}
-void EncoderTest::MismatchHook(const vpx_image_t *img1,
- const vpx_image_t *img2) {
+void EncoderTest::MismatchHook(const vpx_image_t* /*img1*/,
+ const vpx_image_t* /*img2*/) {
ASSERT_TRUE(0) << "Encode/Decode mismatch found";
}
void EncoderTest::RunLoop(VideoSource *video) {
- vpx_codec_dec_cfg_t dec_cfg = {0};
+ vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
stats_.Reset();
@@ -155,9 +192,18 @@
Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_,
&stats_);
ASSERT_TRUE(encoder != NULL);
- Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
+
+ video->Begin();
+ encoder->InitEncoder(video);
+
+ unsigned long dec_init_flags = 0; // NOLINT
+ // Use fragment decoder if encoder outputs partitions.
+ // NOTE: fragment decoder and partition encoder are only supported by VP8.
+ if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION)
+ dec_init_flags |= VPX_CODEC_USE_INPUT_FRAGMENTS;
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, dec_init_flags, 0);
bool again;
- for (again = true, video->Begin(); again; video->Next()) {
+ for (again = true; again; video->Next()) {
again = (video->img() != NULL);
PreEncodeFrameHook(video);
@@ -177,7 +223,10 @@
if (decoder && DoDecode()) {
vpx_codec_err_t res_dec = decoder->DecodeFrame(
(const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+
has_dxdata = true;
}
ASSERT_GE(pkt->data.frame.pts, last_pts_);
@@ -194,6 +243,13 @@
}
}
+ // Flush the decoder when there are no more fragments.
+ if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
+ const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+ }
+
if (has_dxdata && has_cxdata) {
const vpx_image_t *img_enc = encoder->GetPreviewFrame();
DxDataIterator dec_iter = decoder->GetDxData();
|
CWE-119
|
const vpx_image_t *img2) {
|
void EncoderTest::MismatchHook(const vpx_image_t* /*img1*/,
const vpx_image_t* /*img2*/) {
|
150,849 |
void EncoderTest::RunLoop(VideoSource *video) {
vpx_codec_dec_cfg_t dec_cfg = {0};
stats_.Reset();
ASSERT_TRUE(passes_ == 1 || passes_ == 2);
for (unsigned int pass = 0; pass < passes_; pass++) {
last_pts_ = 0;
if (passes_ == 1)
cfg_.g_pass = VPX_RC_ONE_PASS;
else if (pass == 0)
cfg_.g_pass = VPX_RC_FIRST_PASS;
else
cfg_.g_pass = VPX_RC_LAST_PASS;
BeginPassHook(pass);
Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_,
&stats_);
ASSERT_TRUE(encoder != NULL);
Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
bool again;
for (again = true, video->Begin(); again; video->Next()) {
again = (video->img() != NULL);
PreEncodeFrameHook(video);
PreEncodeFrameHook(video, encoder);
encoder->EncodeFrame(video, frame_flags_);
CxDataIterator iter = encoder->GetCxData();
bool has_cxdata = false;
bool has_dxdata = false;
while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) {
pkt = MutateEncoderOutputHook(pkt);
again = true;
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT:
has_cxdata = true;
if (decoder && DoDecode()) {
vpx_codec_err_t res_dec = decoder->DecodeFrame(
(const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
has_dxdata = true;
}
ASSERT_GE(pkt->data.frame.pts, last_pts_);
last_pts_ = pkt->data.frame.pts;
FramePktHook(pkt);
break;
case VPX_CODEC_PSNR_PKT:
PSNRPktHook(pkt);
break;
default:
break;
}
}
if (has_dxdata && has_cxdata) {
const vpx_image_t *img_enc = encoder->GetPreviewFrame();
DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img_dec = dec_iter.Next();
if (img_enc && img_dec) {
const bool res = compare_img(img_enc, img_dec);
if (!res) { // Mismatch
MismatchHook(img_enc, img_dec);
}
}
if (img_dec)
DecompressedFrameHook(*img_dec, video->pts());
}
if (!Continue())
break;
}
EndPassHook();
if (decoder)
delete decoder;
delete encoder;
if (!Continue())
break;
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void EncoderTest::RunLoop(VideoSource *video) {
vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
stats_.Reset();
ASSERT_TRUE(passes_ == 1 || passes_ == 2);
for (unsigned int pass = 0; pass < passes_; pass++) {
last_pts_ = 0;
if (passes_ == 1)
cfg_.g_pass = VPX_RC_ONE_PASS;
else if (pass == 0)
cfg_.g_pass = VPX_RC_FIRST_PASS;
else
cfg_.g_pass = VPX_RC_LAST_PASS;
BeginPassHook(pass);
Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_,
&stats_);
ASSERT_TRUE(encoder != NULL);
video->Begin();
encoder->InitEncoder(video);
unsigned long dec_init_flags = 0; // NOLINT
// Use fragment decoder if encoder outputs partitions.
// NOTE: fragment decoder and partition encoder are only supported by VP8.
if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION)
dec_init_flags |= VPX_CODEC_USE_INPUT_FRAGMENTS;
Decoder* const decoder = codec_->CreateDecoder(dec_cfg, dec_init_flags, 0);
bool again;
for (again = true; again; video->Next()) {
again = (video->img() != NULL);
PreEncodeFrameHook(video);
PreEncodeFrameHook(video, encoder);
encoder->EncodeFrame(video, frame_flags_);
CxDataIterator iter = encoder->GetCxData();
bool has_cxdata = false;
bool has_dxdata = false;
while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) {
pkt = MutateEncoderOutputHook(pkt);
again = true;
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT:
has_cxdata = true;
if (decoder && DoDecode()) {
vpx_codec_err_t res_dec = decoder->DecodeFrame(
(const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
if (!HandleDecodeResult(res_dec, *video, decoder))
break;
has_dxdata = true;
}
ASSERT_GE(pkt->data.frame.pts, last_pts_);
last_pts_ = pkt->data.frame.pts;
FramePktHook(pkt);
break;
case VPX_CODEC_PSNR_PKT:
PSNRPktHook(pkt);
break;
default:
break;
}
}
// Flush the decoder when there are no more fragments.
if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
if (!HandleDecodeResult(res_dec, *video, decoder))
break;
}
if (has_dxdata && has_cxdata) {
const vpx_image_t *img_enc = encoder->GetPreviewFrame();
DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img_dec = dec_iter.Next();
if (img_enc && img_dec) {
const bool res = compare_img(img_enc, img_dec);
if (!res) { // Mismatch
MismatchHook(img_enc, img_dec);
}
}
if (img_dec)
DecompressedFrameHook(*img_dec, video->pts());
}
if (!Continue())
break;
}
EndPassHook();
if (decoder)
delete decoder;
delete encoder;
if (!Continue())
break;
}
}
|
@@ -8,15 +8,59 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "test/codec_factory.h"
-#include "test/encode_test_driver.h"
-#include "test/decode_test_driver.h"
-#include "test/register_state_check.h"
-#include "test/video_source.h"
+#include <string>
+
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "test/codec_factory.h"
+#include "test/decode_test_driver.h"
+#include "test/encode_test_driver.h"
+#include "test/register_state_check.h"
+#include "test/video_source.h"
+
namespace libvpx_test {
+void Encoder::InitEncoder(VideoSource *video) {
+ vpx_codec_err_t res;
+ const vpx_image_t *img = video->img();
+
+ if (video->img() && !encoder_.priv) {
+ cfg_.g_w = img->d_w;
+ cfg_.g_h = img->d_h;
+ cfg_.g_timebase = video->timebase();
+ cfg_.rc_twopass_stats_in = stats_->buf();
+
+ res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
+ init_flags_);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+
+#if CONFIG_VP9_ENCODER
+ if (CodecInterface() == &vpx_codec_vp9_cx_algo) {
+ // Default to 1 tile column for VP9.
+ const int log2_tile_columns = 0;
+ res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
+ log2_tile_columns);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ } else
+#endif
+#if CONFIG_VP10_ENCODER
+ if (CodecInterface() == &vpx_codec_vp10_cx_algo) {
+ // Default to 1 tile column for VP10.
+ const int log2_tile_columns = 0;
+ res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
+ log2_tile_columns);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ } else
+#endif
+ {
+#if CONFIG_VP8_ENCODER
+ ASSERT_EQ(&vpx_codec_vp8_cx_algo, CodecInterface())
+ << "Unknown Codec Interface";
+#endif
+ }
+ }
+}
+
void Encoder::EncodeFrame(VideoSource *video, const unsigned long frame_flags) {
if (video->img())
EncodeFrameInternal(*video, frame_flags);
@@ -39,17 +83,6 @@
vpx_codec_err_t res;
const vpx_image_t *img = video.img();
- // Handle first frame initialization
- if (!encoder_.priv) {
- cfg_.g_w = img->d_w;
- cfg_.g_h = img->d_h;
- cfg_.g_timebase = video.timebase();
- cfg_.rc_twopass_stats_in = stats_->buf();
- res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
- init_flags_);
- ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
- }
-
// Handle frame resizing
if (cfg_.g_w != img->d_w || cfg_.g_h != img->d_h) {
cfg_.g_w = img->d_w;
@@ -59,9 +92,8 @@
}
// Encode the frame
- REGISTER_STATE_CHECK(
- res = vpx_codec_encode(&encoder_,
- video.img(), video.pts(), video.duration(),
+ API_REGISTER_STATE_CHECK(
+ res = vpx_codec_encode(&encoder_, img, video.pts(), video.duration(),
frame_flags, deadline_));
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
@@ -69,11 +101,15 @@
void Encoder::Flush() {
const vpx_codec_err_t res = vpx_codec_encode(&encoder_, NULL, 0, 0, 0,
deadline_);
- ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ if (!encoder_.priv)
+ ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError();
+ else
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
void EncoderTest::InitializeConfig() {
const vpx_codec_err_t res = codec_->DefaultEncoderConfig(&cfg_, 0);
+ dec_cfg_ = vpx_codec_dec_cfg_t();
ASSERT_EQ(VPX_CODEC_OK, res);
}
@@ -107,6 +143,7 @@
static bool compare_img(const vpx_image_t *img1,
const vpx_image_t *img2) {
bool match = (img1->fmt == img2->fmt) &&
+ (img1->cs == img2->cs) &&
(img1->d_w == img2->d_w) &&
(img1->d_h == img2->d_h);
@@ -130,13 +167,13 @@
return match;
}
-void EncoderTest::MismatchHook(const vpx_image_t *img1,
- const vpx_image_t *img2) {
+void EncoderTest::MismatchHook(const vpx_image_t* /*img1*/,
+ const vpx_image_t* /*img2*/) {
ASSERT_TRUE(0) << "Encode/Decode mismatch found";
}
void EncoderTest::RunLoop(VideoSource *video) {
- vpx_codec_dec_cfg_t dec_cfg = {0};
+ vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
stats_.Reset();
@@ -155,9 +192,18 @@
Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_,
&stats_);
ASSERT_TRUE(encoder != NULL);
- Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
+
+ video->Begin();
+ encoder->InitEncoder(video);
+
+ unsigned long dec_init_flags = 0; // NOLINT
+ // Use fragment decoder if encoder outputs partitions.
+ // NOTE: fragment decoder and partition encoder are only supported by VP8.
+ if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION)
+ dec_init_flags |= VPX_CODEC_USE_INPUT_FRAGMENTS;
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, dec_init_flags, 0);
bool again;
- for (again = true, video->Begin(); again; video->Next()) {
+ for (again = true; again; video->Next()) {
again = (video->img() != NULL);
PreEncodeFrameHook(video);
@@ -177,7 +223,10 @@
if (decoder && DoDecode()) {
vpx_codec_err_t res_dec = decoder->DecodeFrame(
(const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+
has_dxdata = true;
}
ASSERT_GE(pkt->data.frame.pts, last_pts_);
@@ -194,6 +243,13 @@
}
}
+ // Flush the decoder when there are no more fragments.
+ if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
+ const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+ }
+
if (has_dxdata && has_cxdata) {
const vpx_image_t *img_enc = encoder->GetPreviewFrame();
DxDataIterator dec_iter = decoder->GetDxData();
|
CWE-119
|
vpx_codec_dec_cfg_t dec_cfg = {0};
Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
for (again = true, video->Begin(); again; video->Next()) {
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
|
vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
video->Begin();
encoder->InitEncoder(video);
unsigned long dec_init_flags = 0; // NOLINT
// Use fragment decoder if encoder outputs partitions.
// NOTE: fragment decoder and partition encoder are only supported by VP8.
if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION)
dec_init_flags |= VPX_CODEC_USE_INPUT_FRAGMENTS;
Decoder* const decoder = codec_->CreateDecoder(dec_cfg, dec_init_flags, 0);
for (again = true; again; video->Next()) {
if (!HandleDecodeResult(res_dec, *video, decoder))
break;
// Flush the decoder when there are no more fragments.
if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
if (!HandleDecodeResult(res_dec, *video, decoder))
break;
}
|
150,850 |
static bool compare_img(const vpx_image_t *img1,
const vpx_image_t *img2) {
bool match = (img1->fmt == img2->fmt) &&
(img1->d_w == img2->d_w) &&
(img1->d_h == img2->d_h);
const unsigned int width_y = img1->d_w;
const unsigned int height_y = img1->d_h;
unsigned int i;
for (i = 0; i < height_y; ++i)
match = (memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y],
img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y],
width_y) == 0) && match;
const unsigned int width_uv = (img1->d_w + 1) >> 1;
const unsigned int height_uv = (img1->d_h + 1) >> 1;
for (i = 0; i < height_uv; ++i)
match = (memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U],
img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U],
width_uv) == 0) && match;
for (i = 0; i < height_uv; ++i)
match = (memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V],
img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V],
width_uv) == 0) && match;
return match;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static bool compare_img(const vpx_image_t *img1,
const vpx_image_t *img2) {
bool match = (img1->fmt == img2->fmt) &&
(img1->cs == img2->cs) &&
(img1->d_w == img2->d_w) &&
(img1->d_h == img2->d_h);
const unsigned int width_y = img1->d_w;
const unsigned int height_y = img1->d_h;
unsigned int i;
for (i = 0; i < height_y; ++i)
match = (memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y],
img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y],
width_y) == 0) && match;
const unsigned int width_uv = (img1->d_w + 1) >> 1;
const unsigned int height_uv = (img1->d_h + 1) >> 1;
for (i = 0; i < height_uv; ++i)
match = (memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U],
img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U],
width_uv) == 0) && match;
for (i = 0; i < height_uv; ++i)
match = (memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V],
img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V],
width_uv) == 0) && match;
return match;
}
|
@@ -8,15 +8,59 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "test/codec_factory.h"
-#include "test/encode_test_driver.h"
-#include "test/decode_test_driver.h"
-#include "test/register_state_check.h"
-#include "test/video_source.h"
+#include <string>
+
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "test/codec_factory.h"
+#include "test/decode_test_driver.h"
+#include "test/encode_test_driver.h"
+#include "test/register_state_check.h"
+#include "test/video_source.h"
+
namespace libvpx_test {
+void Encoder::InitEncoder(VideoSource *video) {
+ vpx_codec_err_t res;
+ const vpx_image_t *img = video->img();
+
+ if (video->img() && !encoder_.priv) {
+ cfg_.g_w = img->d_w;
+ cfg_.g_h = img->d_h;
+ cfg_.g_timebase = video->timebase();
+ cfg_.rc_twopass_stats_in = stats_->buf();
+
+ res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
+ init_flags_);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+
+#if CONFIG_VP9_ENCODER
+ if (CodecInterface() == &vpx_codec_vp9_cx_algo) {
+ // Default to 1 tile column for VP9.
+ const int log2_tile_columns = 0;
+ res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
+ log2_tile_columns);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ } else
+#endif
+#if CONFIG_VP10_ENCODER
+ if (CodecInterface() == &vpx_codec_vp10_cx_algo) {
+ // Default to 1 tile column for VP10.
+ const int log2_tile_columns = 0;
+ res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
+ log2_tile_columns);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ } else
+#endif
+ {
+#if CONFIG_VP8_ENCODER
+ ASSERT_EQ(&vpx_codec_vp8_cx_algo, CodecInterface())
+ << "Unknown Codec Interface";
+#endif
+ }
+ }
+}
+
void Encoder::EncodeFrame(VideoSource *video, const unsigned long frame_flags) {
if (video->img())
EncodeFrameInternal(*video, frame_flags);
@@ -39,17 +83,6 @@
vpx_codec_err_t res;
const vpx_image_t *img = video.img();
- // Handle first frame initialization
- if (!encoder_.priv) {
- cfg_.g_w = img->d_w;
- cfg_.g_h = img->d_h;
- cfg_.g_timebase = video.timebase();
- cfg_.rc_twopass_stats_in = stats_->buf();
- res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
- init_flags_);
- ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
- }
-
// Handle frame resizing
if (cfg_.g_w != img->d_w || cfg_.g_h != img->d_h) {
cfg_.g_w = img->d_w;
@@ -59,9 +92,8 @@
}
// Encode the frame
- REGISTER_STATE_CHECK(
- res = vpx_codec_encode(&encoder_,
- video.img(), video.pts(), video.duration(),
+ API_REGISTER_STATE_CHECK(
+ res = vpx_codec_encode(&encoder_, img, video.pts(), video.duration(),
frame_flags, deadline_));
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
@@ -69,11 +101,15 @@
void Encoder::Flush() {
const vpx_codec_err_t res = vpx_codec_encode(&encoder_, NULL, 0, 0, 0,
deadline_);
- ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ if (!encoder_.priv)
+ ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError();
+ else
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
void EncoderTest::InitializeConfig() {
const vpx_codec_err_t res = codec_->DefaultEncoderConfig(&cfg_, 0);
+ dec_cfg_ = vpx_codec_dec_cfg_t();
ASSERT_EQ(VPX_CODEC_OK, res);
}
@@ -107,6 +143,7 @@
static bool compare_img(const vpx_image_t *img1,
const vpx_image_t *img2) {
bool match = (img1->fmt == img2->fmt) &&
+ (img1->cs == img2->cs) &&
(img1->d_w == img2->d_w) &&
(img1->d_h == img2->d_h);
@@ -130,13 +167,13 @@
return match;
}
-void EncoderTest::MismatchHook(const vpx_image_t *img1,
- const vpx_image_t *img2) {
+void EncoderTest::MismatchHook(const vpx_image_t* /*img1*/,
+ const vpx_image_t* /*img2*/) {
ASSERT_TRUE(0) << "Encode/Decode mismatch found";
}
void EncoderTest::RunLoop(VideoSource *video) {
- vpx_codec_dec_cfg_t dec_cfg = {0};
+ vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
stats_.Reset();
@@ -155,9 +192,18 @@
Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_,
&stats_);
ASSERT_TRUE(encoder != NULL);
- Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
+
+ video->Begin();
+ encoder->InitEncoder(video);
+
+ unsigned long dec_init_flags = 0; // NOLINT
+ // Use fragment decoder if encoder outputs partitions.
+ // NOTE: fragment decoder and partition encoder are only supported by VP8.
+ if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION)
+ dec_init_flags |= VPX_CODEC_USE_INPUT_FRAGMENTS;
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, dec_init_flags, 0);
bool again;
- for (again = true, video->Begin(); again; video->Next()) {
+ for (again = true; again; video->Next()) {
again = (video->img() != NULL);
PreEncodeFrameHook(video);
@@ -177,7 +223,10 @@
if (decoder && DoDecode()) {
vpx_codec_err_t res_dec = decoder->DecodeFrame(
(const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+
has_dxdata = true;
}
ASSERT_GE(pkt->data.frame.pts, last_pts_);
@@ -194,6 +243,13 @@
}
}
+ // Flush the decoder when there are no more fragments.
+ if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
+ const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
+ if (!HandleDecodeResult(res_dec, *video, decoder))
+ break;
+ }
+
if (has_dxdata && has_cxdata) {
const vpx_image_t *img_enc = encoder->GetPreviewFrame();
DxDataIterator dec_iter = decoder->GetDxData();
|
CWE-119
| null |
(img1->cs == img2->cs) &&
|
150,851 |
virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video) {
frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF);
if (droppable_nframes_ > 0 &&
(cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
for (unsigned int i = 0; i < droppable_nframes_; ++i) {
if (droppable_frames_[i] == video->frame()) {
std::cout << " Encoding droppable frame: "
<< droppable_frames_[i] << "\n";
frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF);
return;
}
}
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video) {
//
// Frame flags and layer id for temporal layers.
// For two layers, test pattern is:
// 1 3
// 0 2 .....
// LAST is updated on base/layer 0, GOLDEN updated on layer 1.
// Non-zero pattern_switch parameter means pattern will switch to
// not using LAST for frame_num >= pattern_switch.
int SetFrameFlags(int frame_num,
int num_temp_layers,
int pattern_switch) {
int frame_flags = 0;
if (num_temp_layers == 2) {
if (frame_num % 2 == 0) {
if (frame_num < pattern_switch || pattern_switch == 0) {
// Layer 0: predict from LAST and ARF, update LAST.
frame_flags = VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF;
} else {
// Layer 0: predict from GF and ARF, update GF.
frame_flags = VP8_EFLAG_NO_REF_LAST |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ARF;
}
} else {
if (frame_num < pattern_switch || pattern_switch == 0) {
// Layer 1: predict from L, GF, and ARF, update GF.
frame_flags = VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST;
} else {
// Layer 1: predict from GF and ARF, update GF.
frame_flags = VP8_EFLAG_NO_REF_LAST |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ARF;
}
}
}
return frame_flags;
}
virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF);
// For temporal layer case.
if (cfg_.ts_number_layers > 1) {
frame_flags_ = SetFrameFlags(video->frame(),
cfg_.ts_number_layers,
pattern_switch_);
for (unsigned int i = 0; i < droppable_nframes_; ++i) {
if (droppable_frames_[i] == video->frame()) {
std::cout << "Encoding droppable frame: "
<< droppable_frames_[i] << "\n";
}
}
} else {
if (droppable_nframes_ > 0 &&
(cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
for (unsigned int i = 0; i < droppable_nframes_; ++i) {
if (droppable_frames_[i] == video->frame()) {
std::cout << "Encoding droppable frame: "
<< droppable_frames_[i] << "\n";
frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF);
return;
}
}
}
}
}
|
@@ -37,6 +37,7 @@
void Reset() {
error_nframes_ = 0;
droppable_nframes_ = 0;
+ pattern_switch_ = 0;
}
virtual void SetUp() {
@@ -56,22 +57,77 @@
nframes_++;
}
- virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video) {
+ //
+ // Frame flags and layer id for temporal layers.
+ // For two layers, test pattern is:
+ // 1 3
+ // 0 2 .....
+ // LAST is updated on base/layer 0, GOLDEN updated on layer 1.
+ // Non-zero pattern_switch parameter means pattern will switch to
+ // not using LAST for frame_num >= pattern_switch.
+ int SetFrameFlags(int frame_num,
+ int num_temp_layers,
+ int pattern_switch) {
+ int frame_flags = 0;
+ if (num_temp_layers == 2) {
+ if (frame_num % 2 == 0) {
+ if (frame_num < pattern_switch || pattern_switch == 0) {
+ // Layer 0: predict from LAST and ARF, update LAST.
+ frame_flags = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+ } else {
+ // Layer 0: predict from GF and ARF, update GF.
+ frame_flags = VP8_EFLAG_NO_REF_LAST |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+ }
+ } else {
+ if (frame_num < pattern_switch || pattern_switch == 0) {
+ // Layer 1: predict from L, GF, and ARF, update GF.
+ frame_flags = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+ } else {
+ // Layer 1: predict from GF and ARF, update GF.
+ frame_flags = VP8_EFLAG_NO_REF_LAST |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+ }
+ }
+ }
+ return frame_flags;
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF);
- if (droppable_nframes_ > 0 &&
- (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+ // For temporal layer case.
+ if (cfg_.ts_number_layers > 1) {
+ frame_flags_ = SetFrameFlags(video->frame(),
+ cfg_.ts_number_layers,
+ pattern_switch_);
for (unsigned int i = 0; i < droppable_nframes_; ++i) {
if (droppable_frames_[i] == video->frame()) {
- std::cout << " Encoding droppable frame: "
+ std::cout << "Encoding droppable frame: "
<< droppable_frames_[i] << "\n";
- frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF);
- return;
}
}
+ } else {
+ if (droppable_nframes_ > 0 &&
+ (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+ for (unsigned int i = 0; i < droppable_nframes_; ++i) {
+ if (droppable_frames_[i] == video->frame()) {
+ std::cout << "Encoding droppable frame: "
+ << droppable_frames_[i] << "\n";
+ frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF);
+ return;
+ }
+ }
+ }
}
}
@@ -133,11 +189,16 @@
return mismatch_nframes_;
}
+ void SetPatternSwitch(int frame_switch) {
+ pattern_switch_ = frame_switch;
+ }
+
private:
double psnr_;
unsigned int nframes_;
unsigned int error_nframes_;
unsigned int droppable_nframes_;
+ unsigned int pattern_switch_;
double mismatch_psnr_;
unsigned int mismatch_nframes_;
unsigned int error_frames_[kMaxErrorFrames];
@@ -236,7 +297,291 @@
#endif
}
-VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
-VP9_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
+// Check for successful decoding and no encoder/decoder mismatch
+// if we lose (i.e., drop before decoding) the enhancement layer frames for a
+// two layer temporal pattern. The base layer does not predict from the top
+// layer, so successful decoding is expected.
+TEST_P(ErrorResilienceTestLarge, 2LayersDropEnhancement) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 500;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.rc_end_usage = VPX_CBR;
+ // 2 Temporal layers, no spatial layers, CBR mode.
+ cfg_.ss_number_layers = 1;
+ cfg_.ts_number_layers = 2;
+ cfg_.ts_rate_decimator[0] = 2;
+ cfg_.ts_rate_decimator[1] = 1;
+ cfg_.ts_periodicity = 2;
+ cfg_.ts_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[1] = cfg_.rc_target_bitrate;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ timebase.den, timebase.num, 0, 40);
+
+ // Error resilient mode ON.
+ cfg_.g_error_resilient = 1;
+ cfg_.kf_mode = VPX_KF_DISABLED;
+ SetPatternSwitch(0);
+
+ // The odd frames are the enhancement layer for 2 layer pattern, so set
+ // those frames as droppable. Drop the last 7 frames.
+ unsigned int num_droppable_frames = 7;
+ unsigned int droppable_frame_list[] = {27, 29, 31, 33, 35, 37, 39};
+ SetDroppableFrames(num_droppable_frames, droppable_frame_list);
+ SetErrorFrames(num_droppable_frames, droppable_frame_list);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ // Test that no mismatches have been found
+ std::cout << " Mismatch frames: "
+ << GetMismatchFrames() << "\n";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+
+ // Reset previously set of error/droppable frames.
+ Reset();
+}
+
+// Check for successful decoding and no encoder/decoder mismatch
+// for a two layer temporal pattern, where at some point in the
+// sequence, the LAST ref is not used anymore.
+TEST_P(ErrorResilienceTestLarge, 2LayersNoRefLast) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 500;
+ cfg_.g_lag_in_frames = 0;
+
+ cfg_.rc_end_usage = VPX_CBR;
+ // 2 Temporal layers, no spatial layers, CBR mode.
+ cfg_.ss_number_layers = 1;
+ cfg_.ts_number_layers = 2;
+ cfg_.ts_rate_decimator[0] = 2;
+ cfg_.ts_rate_decimator[1] = 1;
+ cfg_.ts_periodicity = 2;
+ cfg_.ts_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[1] = cfg_.rc_target_bitrate;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ timebase.den, timebase.num, 0, 100);
+
+ // Error resilient mode ON.
+ cfg_.g_error_resilient = 1;
+ cfg_.kf_mode = VPX_KF_DISABLED;
+ SetPatternSwitch(60);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ // Test that no mismatches have been found
+ std::cout << " Mismatch frames: "
+ << GetMismatchFrames() << "\n";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+
+ // Reset previously set of error/droppable frames.
+ Reset();
+}
+
+class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ protected:
+ ErrorResilienceTestLargeCodecControls()
+ : EncoderTest(GET_PARAM(0)),
+ encoding_mode_(GET_PARAM(1)) {
+ Reset();
+ }
+
+ virtual ~ErrorResilienceTestLargeCodecControls() {}
+
+ void Reset() {
+ last_pts_ = 0;
+ tot_frame_number_ = 0;
+ // For testing up to 3 layers.
+ for (int i = 0; i < 3; ++i) {
+ bits_total_[i] = 0;
+ }
+ duration_ = 0.0;
+ }
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(encoding_mode_);
+ }
+
+ //
+ // Frame flags and layer id for temporal layers.
+ //
+
+ // For two layers, test pattern is:
+ // 1 3
+ // 0 2 .....
+ // For three layers, test pattern is:
+ // 1 3 5 7
+ // 2 6
+ // 0 4 ....
+ // LAST is always update on base/layer 0, GOLDEN is updated on layer 1,
+ // and ALTREF is updated on top layer for 3 layer pattern.
+ int SetFrameFlags(int frame_num, int num_temp_layers) {
+ int frame_flags = 0;
+ if (num_temp_layers == 2) {
+ if (frame_num % 2 == 0) {
+ // Layer 0: predict from L and ARF, update L.
+ frame_flags = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+ } else {
+ // Layer 1: predict from L, G and ARF, and update G.
+ frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+ }
+ } else if (num_temp_layers == 3) {
+ if (frame_num % 4 == 0) {
+ // Layer 0: predict from L, update L.
+ frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+ } else if ((frame_num - 2) % 4 == 0) {
+ // Layer 1: predict from L, G, update G.
+ frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_REF_ARF;
+ } else if ((frame_num - 1) % 2 == 0) {
+ // Layer 2: predict from L, G, ARF; update ARG.
+ frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST;
+ }
+ }
+ return frame_flags;
+ }
+
+ int SetLayerId(int frame_num, int num_temp_layers) {
+ int layer_id = 0;
+ if (num_temp_layers == 2) {
+ if (frame_num % 2 == 0) {
+ layer_id = 0;
+ } else {
+ layer_id = 1;
+ }
+ } else if (num_temp_layers == 3) {
+ if (frame_num % 4 == 0) {
+ layer_id = 0;
+ } else if ((frame_num - 2) % 4 == 0) {
+ layer_id = 1;
+ } else if ((frame_num - 1) % 2 == 0) {
+ layer_id = 2;
+ }
+ }
+ return layer_id;
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (cfg_.ts_number_layers > 1) {
+ int layer_id = SetLayerId(video->frame(), cfg_.ts_number_layers);
+ int frame_flags = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
+ if (video->frame() > 0) {
+ encoder->Control(VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
+ encoder->Control(VP8E_SET_FRAME_FLAGS, frame_flags);
+ }
+ const vpx_rational_t tb = video->timebase();
+ timebase_ = static_cast<double>(tb.num) / tb.den;
+ duration_ = 0;
+ return;
+ }
+ }
+
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ // Time since last timestamp = duration.
+ vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
+ if (duration > 1) {
+ // Update counter for total number of frames (#frames input to encoder).
+ // Needed for setting the proper layer_id below.
+ tot_frame_number_ += static_cast<int>(duration - 1);
+ }
+ int layer = SetLayerId(tot_frame_number_, cfg_.ts_number_layers);
+ const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
+ // Update the total encoded bits. For temporal layers, update the cumulative
+ // encoded bits per layer.
+ for (int i = layer; i < static_cast<int>(cfg_.ts_number_layers); ++i) {
+ bits_total_[i] += frame_size_in_bits;
+ }
+ // Update the most recent pts.
+ last_pts_ = pkt->data.frame.pts;
+ ++tot_frame_number_;
+ }
+
+ virtual void EndPassHook(void) {
+ duration_ = (last_pts_ + 1) * timebase_;
+ if (cfg_.ts_number_layers > 1) {
+ for (int layer = 0; layer < static_cast<int>(cfg_.ts_number_layers);
+ ++layer) {
+ if (bits_total_[layer]) {
+ // Effective file datarate:
+ effective_datarate_[layer] = (bits_total_[layer] / 1000.0) / duration_;
+ }
+ }
+ }
+ }
+
+ double effective_datarate_[3];
+ private:
+ libvpx_test::TestMode encoding_mode_;
+ vpx_codec_pts_t last_pts_;
+ double timebase_;
+ int64_t bits_total_[3];
+ double duration_;
+ int tot_frame_number_;
+ };
+
+// Check two codec controls used for:
+// (1) for setting temporal layer id, and (2) for settings encoder flags.
+// This test invokes those controls for each frame, and verifies encoder/decoder
+// mismatch and basic rate control response.
+// TODO(marpan): Maybe move this test to datarate_test.cc.
+TEST_P(ErrorResilienceTestLargeCodecControls, CodecControl3TemporalLayers) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.kf_mode = VPX_KF_DISABLED;
+ cfg_.g_error_resilient = 1;
+
+ // 3 Temporal layers. Framerate decimation (4, 2, 1).
+ cfg_.ts_number_layers = 3;
+ cfg_.ts_rate_decimator[0] = 4;
+ cfg_.ts_rate_decimator[1] = 2;
+ cfg_.ts_rate_decimator[2] = 1;
+ cfg_.ts_periodicity = 4;
+ cfg_.ts_layer_id[0] = 0;
+ cfg_.ts_layer_id[1] = 2;
+ cfg_.ts_layer_id[2] = 1;
+ cfg_.ts_layer_id[3] = 2;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 200);
+ for (int i = 200; i <= 800; i += 200) {
+ cfg_.rc_target_bitrate = i;
+ Reset();
+ // 40-20-40 bitrate allocation for 3 temporal layers.
+ cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
+ ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.75)
+ << " The datarate for the file is lower than target by too much, "
+ "for layer: " << j;
+ ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.25)
+ << " The datarate for the file is greater than target by too much, "
+ "for layer: " << j;
+ }
+ }
+}
+
+VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
+VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLargeCodecControls,
+ ONE_PASS_TEST_MODES);
+VP9_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
+VP10_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
} // namespace
|
CWE-119
|
if (droppable_nframes_ > 0 &&
(cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
std::cout << " Encoding droppable frame: "
frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF);
return;
|
//
// Frame flags and layer id for temporal layers.
// For two layers, test pattern is:
// 1 3
// 0 2 .....
// LAST is updated on base/layer 0, GOLDEN updated on layer 1.
// Non-zero pattern_switch parameter means pattern will switch to
// not using LAST for frame_num >= pattern_switch.
int SetFrameFlags(int frame_num,
int num_temp_layers,
int pattern_switch) {
int frame_flags = 0;
if (num_temp_layers == 2) {
if (frame_num % 2 == 0) {
if (frame_num < pattern_switch || pattern_switch == 0) {
// Layer 0: predict from LAST and ARF, update LAST.
frame_flags = VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF;
} else {
// Layer 0: predict from GF and ARF, update GF.
frame_flags = VP8_EFLAG_NO_REF_LAST |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ARF;
}
} else {
if (frame_num < pattern_switch || pattern_switch == 0) {
// Layer 1: predict from L, GF, and ARF, update GF.
frame_flags = VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST;
} else {
// Layer 1: predict from GF and ARF, update GF.
frame_flags = VP8_EFLAG_NO_REF_LAST |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ARF;
}
}
}
return frame_flags;
}
virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
// For temporal layer case.
if (cfg_.ts_number_layers > 1) {
frame_flags_ = SetFrameFlags(video->frame(),
cfg_.ts_number_layers,
pattern_switch_);
std::cout << "Encoding droppable frame: "
} else {
if (droppable_nframes_ > 0 &&
(cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
for (unsigned int i = 0; i < droppable_nframes_; ++i) {
if (droppable_frames_[i] == video->frame()) {
std::cout << "Encoding droppable frame: "
<< droppable_frames_[i] << "\n";
frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF);
return;
}
}
}
|
150,852 |
void Reset() {
error_nframes_ = 0;
droppable_nframes_ = 0;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void Reset() {
error_nframes_ = 0;
droppable_nframes_ = 0;
pattern_switch_ = 0;
}
|
@@ -37,6 +37,7 @@
void Reset() {
error_nframes_ = 0;
droppable_nframes_ = 0;
+ pattern_switch_ = 0;
}
virtual void SetUp() {
@@ -56,22 +57,77 @@
nframes_++;
}
- virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video) {
+ //
+ // Frame flags and layer id for temporal layers.
+ // For two layers, test pattern is:
+ // 1 3
+ // 0 2 .....
+ // LAST is updated on base/layer 0, GOLDEN updated on layer 1.
+ // Non-zero pattern_switch parameter means pattern will switch to
+ // not using LAST for frame_num >= pattern_switch.
+ int SetFrameFlags(int frame_num,
+ int num_temp_layers,
+ int pattern_switch) {
+ int frame_flags = 0;
+ if (num_temp_layers == 2) {
+ if (frame_num % 2 == 0) {
+ if (frame_num < pattern_switch || pattern_switch == 0) {
+ // Layer 0: predict from LAST and ARF, update LAST.
+ frame_flags = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+ } else {
+ // Layer 0: predict from GF and ARF, update GF.
+ frame_flags = VP8_EFLAG_NO_REF_LAST |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+ }
+ } else {
+ if (frame_num < pattern_switch || pattern_switch == 0) {
+ // Layer 1: predict from L, GF, and ARF, update GF.
+ frame_flags = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+ } else {
+ // Layer 1: predict from GF and ARF, update GF.
+ frame_flags = VP8_EFLAG_NO_REF_LAST |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+ }
+ }
+ }
+ return frame_flags;
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF);
- if (droppable_nframes_ > 0 &&
- (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+ // For temporal layer case.
+ if (cfg_.ts_number_layers > 1) {
+ frame_flags_ = SetFrameFlags(video->frame(),
+ cfg_.ts_number_layers,
+ pattern_switch_);
for (unsigned int i = 0; i < droppable_nframes_; ++i) {
if (droppable_frames_[i] == video->frame()) {
- std::cout << " Encoding droppable frame: "
+ std::cout << "Encoding droppable frame: "
<< droppable_frames_[i] << "\n";
- frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF);
- return;
}
}
+ } else {
+ if (droppable_nframes_ > 0 &&
+ (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+ for (unsigned int i = 0; i < droppable_nframes_; ++i) {
+ if (droppable_frames_[i] == video->frame()) {
+ std::cout << "Encoding droppable frame: "
+ << droppable_frames_[i] << "\n";
+ frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF);
+ return;
+ }
+ }
+ }
}
}
@@ -133,11 +189,16 @@
return mismatch_nframes_;
}
+ void SetPatternSwitch(int frame_switch) {
+ pattern_switch_ = frame_switch;
+ }
+
private:
double psnr_;
unsigned int nframes_;
unsigned int error_nframes_;
unsigned int droppable_nframes_;
+ unsigned int pattern_switch_;
double mismatch_psnr_;
unsigned int mismatch_nframes_;
unsigned int error_frames_[kMaxErrorFrames];
@@ -236,7 +297,291 @@
#endif
}
-VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
-VP9_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
+// Check for successful decoding and no encoder/decoder mismatch
+// if we lose (i.e., drop before decoding) the enhancement layer frames for a
+// two layer temporal pattern. The base layer does not predict from the top
+// layer, so successful decoding is expected.
+TEST_P(ErrorResilienceTestLarge, 2LayersDropEnhancement) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 500;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.rc_end_usage = VPX_CBR;
+ // 2 Temporal layers, no spatial layers, CBR mode.
+ cfg_.ss_number_layers = 1;
+ cfg_.ts_number_layers = 2;
+ cfg_.ts_rate_decimator[0] = 2;
+ cfg_.ts_rate_decimator[1] = 1;
+ cfg_.ts_periodicity = 2;
+ cfg_.ts_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[1] = cfg_.rc_target_bitrate;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ timebase.den, timebase.num, 0, 40);
+
+ // Error resilient mode ON.
+ cfg_.g_error_resilient = 1;
+ cfg_.kf_mode = VPX_KF_DISABLED;
+ SetPatternSwitch(0);
+
+ // The odd frames are the enhancement layer for 2 layer pattern, so set
+ // those frames as droppable. Drop the last 7 frames.
+ unsigned int num_droppable_frames = 7;
+ unsigned int droppable_frame_list[] = {27, 29, 31, 33, 35, 37, 39};
+ SetDroppableFrames(num_droppable_frames, droppable_frame_list);
+ SetErrorFrames(num_droppable_frames, droppable_frame_list);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ // Test that no mismatches have been found
+ std::cout << " Mismatch frames: "
+ << GetMismatchFrames() << "\n";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+
+ // Reset previously set of error/droppable frames.
+ Reset();
+}
+
+// Check for successful decoding and no encoder/decoder mismatch
+// for a two layer temporal pattern, where at some point in the
+// sequence, the LAST ref is not used anymore.
+TEST_P(ErrorResilienceTestLarge, 2LayersNoRefLast) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 500;
+ cfg_.g_lag_in_frames = 0;
+
+ cfg_.rc_end_usage = VPX_CBR;
+ // 2 Temporal layers, no spatial layers, CBR mode.
+ cfg_.ss_number_layers = 1;
+ cfg_.ts_number_layers = 2;
+ cfg_.ts_rate_decimator[0] = 2;
+ cfg_.ts_rate_decimator[1] = 1;
+ cfg_.ts_periodicity = 2;
+ cfg_.ts_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[1] = cfg_.rc_target_bitrate;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ timebase.den, timebase.num, 0, 100);
+
+ // Error resilient mode ON.
+ cfg_.g_error_resilient = 1;
+ cfg_.kf_mode = VPX_KF_DISABLED;
+ SetPatternSwitch(60);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ // Test that no mismatches have been found
+ std::cout << " Mismatch frames: "
+ << GetMismatchFrames() << "\n";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+
+ // Reset previously set of error/droppable frames.
+ Reset();
+}
+
+class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ protected:
+ ErrorResilienceTestLargeCodecControls()
+ : EncoderTest(GET_PARAM(0)),
+ encoding_mode_(GET_PARAM(1)) {
+ Reset();
+ }
+
+ virtual ~ErrorResilienceTestLargeCodecControls() {}
+
+ void Reset() {
+ last_pts_ = 0;
+ tot_frame_number_ = 0;
+ // For testing up to 3 layers.
+ for (int i = 0; i < 3; ++i) {
+ bits_total_[i] = 0;
+ }
+ duration_ = 0.0;
+ }
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(encoding_mode_);
+ }
+
+ //
+ // Frame flags and layer id for temporal layers.
+ //
+
+ // For two layers, test pattern is:
+ // 1 3
+ // 0 2 .....
+ // For three layers, test pattern is:
+ // 1 3 5 7
+ // 2 6
+ // 0 4 ....
+ // LAST is always update on base/layer 0, GOLDEN is updated on layer 1,
+ // and ALTREF is updated on top layer for 3 layer pattern.
+ int SetFrameFlags(int frame_num, int num_temp_layers) {
+ int frame_flags = 0;
+ if (num_temp_layers == 2) {
+ if (frame_num % 2 == 0) {
+ // Layer 0: predict from L and ARF, update L.
+ frame_flags = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+ } else {
+ // Layer 1: predict from L, G and ARF, and update G.
+ frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+ }
+ } else if (num_temp_layers == 3) {
+ if (frame_num % 4 == 0) {
+ // Layer 0: predict from L, update L.
+ frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+ } else if ((frame_num - 2) % 4 == 0) {
+ // Layer 1: predict from L, G, update G.
+ frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_REF_ARF;
+ } else if ((frame_num - 1) % 2 == 0) {
+ // Layer 2: predict from L, G, ARF; update ARG.
+ frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST;
+ }
+ }
+ return frame_flags;
+ }
+
+ int SetLayerId(int frame_num, int num_temp_layers) {
+ int layer_id = 0;
+ if (num_temp_layers == 2) {
+ if (frame_num % 2 == 0) {
+ layer_id = 0;
+ } else {
+ layer_id = 1;
+ }
+ } else if (num_temp_layers == 3) {
+ if (frame_num % 4 == 0) {
+ layer_id = 0;
+ } else if ((frame_num - 2) % 4 == 0) {
+ layer_id = 1;
+ } else if ((frame_num - 1) % 2 == 0) {
+ layer_id = 2;
+ }
+ }
+ return layer_id;
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (cfg_.ts_number_layers > 1) {
+ int layer_id = SetLayerId(video->frame(), cfg_.ts_number_layers);
+ int frame_flags = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
+ if (video->frame() > 0) {
+ encoder->Control(VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
+ encoder->Control(VP8E_SET_FRAME_FLAGS, frame_flags);
+ }
+ const vpx_rational_t tb = video->timebase();
+ timebase_ = static_cast<double>(tb.num) / tb.den;
+ duration_ = 0;
+ return;
+ }
+ }
+
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ // Time since last timestamp = duration.
+ vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
+ if (duration > 1) {
+ // Update counter for total number of frames (#frames input to encoder).
+ // Needed for setting the proper layer_id below.
+ tot_frame_number_ += static_cast<int>(duration - 1);
+ }
+ int layer = SetLayerId(tot_frame_number_, cfg_.ts_number_layers);
+ const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
+ // Update the total encoded bits. For temporal layers, update the cumulative
+ // encoded bits per layer.
+ for (int i = layer; i < static_cast<int>(cfg_.ts_number_layers); ++i) {
+ bits_total_[i] += frame_size_in_bits;
+ }
+ // Update the most recent pts.
+ last_pts_ = pkt->data.frame.pts;
+ ++tot_frame_number_;
+ }
+
+ virtual void EndPassHook(void) {
+ duration_ = (last_pts_ + 1) * timebase_;
+ if (cfg_.ts_number_layers > 1) {
+ for (int layer = 0; layer < static_cast<int>(cfg_.ts_number_layers);
+ ++layer) {
+ if (bits_total_[layer]) {
+ // Effective file datarate:
+ effective_datarate_[layer] = (bits_total_[layer] / 1000.0) / duration_;
+ }
+ }
+ }
+ }
+
+ double effective_datarate_[3];
+ private:
+ libvpx_test::TestMode encoding_mode_;
+ vpx_codec_pts_t last_pts_;
+ double timebase_;
+ int64_t bits_total_[3];
+ double duration_;
+ int tot_frame_number_;
+ };
+
+// Check two codec controls used for:
+// (1) for setting temporal layer id, and (2) for settings encoder flags.
+// This test invokes those controls for each frame, and verifies encoder/decoder
+// mismatch and basic rate control response.
+// TODO(marpan): Maybe move this test to datarate_test.cc.
+TEST_P(ErrorResilienceTestLargeCodecControls, CodecControl3TemporalLayers) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.kf_mode = VPX_KF_DISABLED;
+ cfg_.g_error_resilient = 1;
+
+ // 3 Temporal layers. Framerate decimation (4, 2, 1).
+ cfg_.ts_number_layers = 3;
+ cfg_.ts_rate_decimator[0] = 4;
+ cfg_.ts_rate_decimator[1] = 2;
+ cfg_.ts_rate_decimator[2] = 1;
+ cfg_.ts_periodicity = 4;
+ cfg_.ts_layer_id[0] = 0;
+ cfg_.ts_layer_id[1] = 2;
+ cfg_.ts_layer_id[2] = 1;
+ cfg_.ts_layer_id[3] = 2;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 200);
+ for (int i = 200; i <= 800; i += 200) {
+ cfg_.rc_target_bitrate = i;
+ Reset();
+ // 40-20-40 bitrate allocation for 3 temporal layers.
+ cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
+ ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.75)
+ << " The datarate for the file is lower than target by too much, "
+ "for layer: " << j;
+ ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.25)
+ << " The datarate for the file is greater than target by too much, "
+ "for layer: " << j;
+ }
+ }
+}
+
+VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
+VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLargeCodecControls,
+ ONE_PASS_TEST_MODES);
+VP9_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
+VP10_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
} // namespace
|
CWE-119
| null |
pattern_switch_ = 0;
|
150,853 |
int GetFreeFrameBuffer(size_t min_size, vpx_codec_frame_buffer_t *fb) {
EXPECT_TRUE(fb != NULL);
const int idx = FindFreeBufferIndex();
if (idx == num_buffers_)
return -1;
if (ext_fb_list_[idx].size < min_size) {
delete [] ext_fb_list_[idx].data;
ext_fb_list_[idx].data = new uint8_t[min_size];
ext_fb_list_[idx].size = min_size;
}
SetFrameBuffer(idx, fb);
return 0;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int GetFreeFrameBuffer(size_t min_size, vpx_codec_frame_buffer_t *fb) {
EXPECT_TRUE(fb != NULL);
const int idx = FindFreeBufferIndex();
if (idx == num_buffers_)
return -1;
if (ext_fb_list_[idx].size < min_size) {
delete [] ext_fb_list_[idx].data;
ext_fb_list_[idx].data = new uint8_t[min_size];
memset(ext_fb_list_[idx].data, 0, min_size);
ext_fb_list_[idx].size = min_size;
}
SetFrameBuffer(idx, fb);
return 0;
}
|
@@ -10,13 +10,16 @@
#include <string>
+#include "./vpx_config.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/ivf_video_source.h"
#include "test/md5_helper.h"
#include "test/test_vectors.h"
#include "test/util.h"
+#if CONFIG_WEBM_IO
#include "test/webm_video_source.h"
+#endif
namespace {
@@ -68,6 +71,7 @@
if (ext_fb_list_[idx].size < min_size) {
delete [] ext_fb_list_[idx].data;
ext_fb_list_[idx].data = new uint8_t[min_size];
+ memset(ext_fb_list_[idx].data, 0, min_size);
ext_fb_list_[idx].size = min_size;
}
@@ -93,13 +97,19 @@
return 0;
}
- // Marks the external frame buffer that |fb| is pointing too as free.
+ // Marks the external frame buffer that |fb| is pointing to as free.
// Returns < 0 on an error.
int ReturnFrameBuffer(vpx_codec_frame_buffer_t *fb) {
- EXPECT_TRUE(fb != NULL);
+ if (fb == NULL) {
+ EXPECT_TRUE(fb != NULL);
+ return -1;
+ }
ExternalFrameBuffer *const ext_fb =
reinterpret_cast<ExternalFrameBuffer*>(fb->priv);
- EXPECT_TRUE(ext_fb != NULL);
+ if (ext_fb == NULL) {
+ EXPECT_TRUE(ext_fb != NULL);
+ return -1;
+ }
EXPECT_EQ(1, ext_fb->in_use);
ext_fb->in_use = 0;
return 0;
@@ -267,6 +277,7 @@
ExternalFrameBufferList fb_list_;
};
+#if CONFIG_WEBM_IO
// Class for testing passing in external frame buffers to libvpx.
class ExternalFrameBufferTest : public ::testing::Test {
protected:
@@ -281,7 +292,7 @@
video_->Init();
video_->Begin();
- vpx_codec_dec_cfg_t cfg = {0};
+ vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
decoder_ = new libvpx_test::VP9Decoder(cfg, 0);
ASSERT_TRUE(decoder_ != NULL);
}
@@ -340,6 +351,7 @@
int num_buffers_;
ExternalFrameBufferList fb_list_;
};
+#endif // CONFIG_WEBM_IO
// This test runs through the set of test vectors, and decodes them.
// Libvpx will call into the application to allocate a frame buffer when
@@ -366,7 +378,13 @@
if (filename.substr(filename.length() - 3, 3) == "ivf") {
video = new libvpx_test::IVFVideoSource(filename);
} else {
+#if CONFIG_WEBM_IO
video = new libvpx_test::WebMVideoSource(filename);
+#else
+ fprintf(stderr, "WebM IO is disabled, skipping test vector %s\n",
+ filename.c_str());
+ return;
+#endif
}
ASSERT_TRUE(video != NULL);
video->Init();
@@ -380,6 +398,7 @@
delete video;
}
+#if CONFIG_WEBM_IO
TEST_F(ExternalFrameBufferTest, MinFrameBuffers) {
// Minimum number of external frame buffers for VP9 is
// #VP9_MAXIMUM_REF_BUFFERS + #VPX_MAXIMUM_WORK_BUFFERS.
@@ -460,6 +479,7 @@
SetFrameBufferFunctions(
num_buffers, get_vp9_frame_buffer, release_vp9_frame_buffer));
}
+#endif // CONFIG_WEBM_IO
VP9_INSTANTIATE_TEST_CASE(ExternalFrameBufferMD5Test,
::testing::ValuesIn(libvpx_test::kVP9TestVectors,
|
CWE-119
| null |
memset(ext_fb_list_[idx].data, 0, min_size);
|
150,854 |
int ReturnFrameBuffer(vpx_codec_frame_buffer_t *fb) {
EXPECT_TRUE(fb != NULL);
ExternalFrameBuffer *const ext_fb =
reinterpret_cast<ExternalFrameBuffer*>(fb->priv);
EXPECT_TRUE(ext_fb != NULL);
EXPECT_EQ(1, ext_fb->in_use);
ext_fb->in_use = 0;
return 0;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int ReturnFrameBuffer(vpx_codec_frame_buffer_t *fb) {
if (fb == NULL) {
EXPECT_TRUE(fb != NULL);
return -1;
}
ExternalFrameBuffer *const ext_fb =
reinterpret_cast<ExternalFrameBuffer*>(fb->priv);
if (ext_fb == NULL) {
EXPECT_TRUE(ext_fb != NULL);
return -1;
}
EXPECT_EQ(1, ext_fb->in_use);
ext_fb->in_use = 0;
return 0;
}
|
@@ -10,13 +10,16 @@
#include <string>
+#include "./vpx_config.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/ivf_video_source.h"
#include "test/md5_helper.h"
#include "test/test_vectors.h"
#include "test/util.h"
+#if CONFIG_WEBM_IO
#include "test/webm_video_source.h"
+#endif
namespace {
@@ -68,6 +71,7 @@
if (ext_fb_list_[idx].size < min_size) {
delete [] ext_fb_list_[idx].data;
ext_fb_list_[idx].data = new uint8_t[min_size];
+ memset(ext_fb_list_[idx].data, 0, min_size);
ext_fb_list_[idx].size = min_size;
}
@@ -93,13 +97,19 @@
return 0;
}
- // Marks the external frame buffer that |fb| is pointing too as free.
+ // Marks the external frame buffer that |fb| is pointing to as free.
// Returns < 0 on an error.
int ReturnFrameBuffer(vpx_codec_frame_buffer_t *fb) {
- EXPECT_TRUE(fb != NULL);
+ if (fb == NULL) {
+ EXPECT_TRUE(fb != NULL);
+ return -1;
+ }
ExternalFrameBuffer *const ext_fb =
reinterpret_cast<ExternalFrameBuffer*>(fb->priv);
- EXPECT_TRUE(ext_fb != NULL);
+ if (ext_fb == NULL) {
+ EXPECT_TRUE(ext_fb != NULL);
+ return -1;
+ }
EXPECT_EQ(1, ext_fb->in_use);
ext_fb->in_use = 0;
return 0;
@@ -267,6 +277,7 @@
ExternalFrameBufferList fb_list_;
};
+#if CONFIG_WEBM_IO
// Class for testing passing in external frame buffers to libvpx.
class ExternalFrameBufferTest : public ::testing::Test {
protected:
@@ -281,7 +292,7 @@
video_->Init();
video_->Begin();
- vpx_codec_dec_cfg_t cfg = {0};
+ vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
decoder_ = new libvpx_test::VP9Decoder(cfg, 0);
ASSERT_TRUE(decoder_ != NULL);
}
@@ -340,6 +351,7 @@
int num_buffers_;
ExternalFrameBufferList fb_list_;
};
+#endif // CONFIG_WEBM_IO
// This test runs through the set of test vectors, and decodes them.
// Libvpx will call into the application to allocate a frame buffer when
@@ -366,7 +378,13 @@
if (filename.substr(filename.length() - 3, 3) == "ivf") {
video = new libvpx_test::IVFVideoSource(filename);
} else {
+#if CONFIG_WEBM_IO
video = new libvpx_test::WebMVideoSource(filename);
+#else
+ fprintf(stderr, "WebM IO is disabled, skipping test vector %s\n",
+ filename.c_str());
+ return;
+#endif
}
ASSERT_TRUE(video != NULL);
video->Init();
@@ -380,6 +398,7 @@
delete video;
}
+#if CONFIG_WEBM_IO
TEST_F(ExternalFrameBufferTest, MinFrameBuffers) {
// Minimum number of external frame buffers for VP9 is
// #VP9_MAXIMUM_REF_BUFFERS + #VPX_MAXIMUM_WORK_BUFFERS.
@@ -460,6 +479,7 @@
SetFrameBufferFunctions(
num_buffers, get_vp9_frame_buffer, release_vp9_frame_buffer));
}
+#endif // CONFIG_WEBM_IO
VP9_INSTANTIATE_TEST_CASE(ExternalFrameBufferMD5Test,
::testing::ValuesIn(libvpx_test::kVP9TestVectors,
|
CWE-119
|
EXPECT_TRUE(fb != NULL);
EXPECT_TRUE(ext_fb != NULL);
|
if (fb == NULL) {
EXPECT_TRUE(fb != NULL);
return -1;
}
if (ext_fb == NULL) {
EXPECT_TRUE(ext_fb != NULL);
return -1;
}
|
150,855 |
virtual void SetUp() {
video_ = new libvpx_test::WebMVideoSource(kVP9TestFile);
ASSERT_TRUE(video_ != NULL);
video_->Init();
video_->Begin();
vpx_codec_dec_cfg_t cfg = {0};
decoder_ = new libvpx_test::VP9Decoder(cfg, 0);
ASSERT_TRUE(decoder_ != NULL);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
video_ = new libvpx_test::WebMVideoSource(kVP9TestFile);
ASSERT_TRUE(video_ != NULL);
video_->Init();
video_->Begin();
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
decoder_ = new libvpx_test::VP9Decoder(cfg, 0);
ASSERT_TRUE(decoder_ != NULL);
}
|
@@ -10,13 +10,16 @@
#include <string>
+#include "./vpx_config.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/ivf_video_source.h"
#include "test/md5_helper.h"
#include "test/test_vectors.h"
#include "test/util.h"
+#if CONFIG_WEBM_IO
#include "test/webm_video_source.h"
+#endif
namespace {
@@ -68,6 +71,7 @@
if (ext_fb_list_[idx].size < min_size) {
delete [] ext_fb_list_[idx].data;
ext_fb_list_[idx].data = new uint8_t[min_size];
+ memset(ext_fb_list_[idx].data, 0, min_size);
ext_fb_list_[idx].size = min_size;
}
@@ -93,13 +97,19 @@
return 0;
}
- // Marks the external frame buffer that |fb| is pointing too as free.
+ // Marks the external frame buffer that |fb| is pointing to as free.
// Returns < 0 on an error.
int ReturnFrameBuffer(vpx_codec_frame_buffer_t *fb) {
- EXPECT_TRUE(fb != NULL);
+ if (fb == NULL) {
+ EXPECT_TRUE(fb != NULL);
+ return -1;
+ }
ExternalFrameBuffer *const ext_fb =
reinterpret_cast<ExternalFrameBuffer*>(fb->priv);
- EXPECT_TRUE(ext_fb != NULL);
+ if (ext_fb == NULL) {
+ EXPECT_TRUE(ext_fb != NULL);
+ return -1;
+ }
EXPECT_EQ(1, ext_fb->in_use);
ext_fb->in_use = 0;
return 0;
@@ -267,6 +277,7 @@
ExternalFrameBufferList fb_list_;
};
+#if CONFIG_WEBM_IO
// Class for testing passing in external frame buffers to libvpx.
class ExternalFrameBufferTest : public ::testing::Test {
protected:
@@ -281,7 +292,7 @@
video_->Init();
video_->Begin();
- vpx_codec_dec_cfg_t cfg = {0};
+ vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
decoder_ = new libvpx_test::VP9Decoder(cfg, 0);
ASSERT_TRUE(decoder_ != NULL);
}
@@ -340,6 +351,7 @@
int num_buffers_;
ExternalFrameBufferList fb_list_;
};
+#endif // CONFIG_WEBM_IO
// This test runs through the set of test vectors, and decodes them.
// Libvpx will call into the application to allocate a frame buffer when
@@ -366,7 +378,13 @@
if (filename.substr(filename.length() - 3, 3) == "ivf") {
video = new libvpx_test::IVFVideoSource(filename);
} else {
+#if CONFIG_WEBM_IO
video = new libvpx_test::WebMVideoSource(filename);
+#else
+ fprintf(stderr, "WebM IO is disabled, skipping test vector %s\n",
+ filename.c_str());
+ return;
+#endif
}
ASSERT_TRUE(video != NULL);
video->Init();
@@ -380,6 +398,7 @@
delete video;
}
+#if CONFIG_WEBM_IO
TEST_F(ExternalFrameBufferTest, MinFrameBuffers) {
// Minimum number of external frame buffers for VP9 is
// #VP9_MAXIMUM_REF_BUFFERS + #VPX_MAXIMUM_WORK_BUFFERS.
@@ -460,6 +479,7 @@
SetFrameBufferFunctions(
num_buffers, get_vp9_frame_buffer, release_vp9_frame_buffer));
}
+#endif // CONFIG_WEBM_IO
VP9_INSTANTIATE_TEST_CASE(ExternalFrameBufferMD5Test,
::testing::ValuesIn(libvpx_test::kVP9TestVectors,
|
CWE-119
|
vpx_codec_dec_cfg_t cfg = {0};
|
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
|
150,856 |
void RunAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
for (int j = 0; j < kNumCoeffs; ++j) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
}
REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
test_temp_block, pitch_));
REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
for (int j = 0; j < kNumCoeffs; ++j) {
const uint32_t diff = dst[j] - src[j];
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
}
EXPECT_GE(1u, max_error)
<< "Error: 4x4 FHT/IHT has an individual round trip error > 1";
EXPECT_GE(count_test_block , total_error)
<< "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunAccuracyCheck() {
void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif
for (int j = 0; j < kNumCoeffs; ++j) {
if (bit_depth_ == VPX_BITS_8) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
test_input_block[j] = src16[j] - dst16[j];
#endif
}
}
ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
test_temp_block, pitch_));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
for (int j = 0; j < kNumCoeffs; ++j) {
#if CONFIG_VP9_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
}
EXPECT_GE(static_cast<uint32_t>(limit), max_error)
<< "Error: 4x4 FHT/IHT has an individual round trip error > "
<< limit;
EXPECT_GE(count_test_block * limit, total_error)
<< "Error: 4x4 FHT/IHT has average round trip error > " << limit
<< " per block";
}
|
@@ -13,74 +13,140 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
const int kNumCoeffs = 16;
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_4x4_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_4x4_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
-void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct4x4_c(in, out, stride);
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vpx_fdct4x4_c(in, out, stride);
}
-void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vp9_fwht4x4_c(in, out, stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+}
+
+void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+}
+
+void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+}
+
+void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+}
+
+void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+}
+
+void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans4x4TestBase {
public:
virtual ~Trans4x4TestBase() {}
protected:
- virtual void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) = 0;
- void RunAccuracyCheck() {
+ void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -88,27 +154,29 @@
}
}
- EXPECT_GE(1u, max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > 1";
+ EXPECT_GE(static_cast<uint32_t>(limit), max_error)
+ << "Error: 4x4 FHT/IHT has an individual round trip error > "
+ << limit;
- EXPECT_GE(count_test_block , total_error)
- << "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
+ EXPECT_GE(count_test_block * limit, total_error)
+ << "Error: 4x4 FHT/IHT has average round trip error > " << limit
+ << " per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -119,62 +187,85 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
- << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
+ << "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
- void RunInvAccuracyCheck() {
+ void RunInvAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
fwd_txfm_ref(in, coeff, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
+ EXPECT_GE(static_cast<uint32_t>(limit), error)
+ << "Error: 4x4 IDCT has error " << error
<< " at index " << j;
}
}
@@ -182,12 +273,14 @@
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class Trans4x4DCT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<dct_4x4_param_t> {
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
@@ -197,23 +290,25 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4DCT, CoeffCheck) {
@@ -225,12 +320,12 @@
}
TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
class Trans4x4HT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<ht_4x4_param_t> {
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
@@ -240,24 +335,26 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4HT, CoeffCheck) {
@@ -269,51 +366,189 @@
}
TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
+class Trans4x4WHT
+ : public Trans4x4TestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
+ public:
+ virtual ~Trans4x4WHT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
+ fwd_txfm_ref = fwht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
+};
+
+TEST_P(Trans4x4WHT, AccuracyCheck) {
+ RunAccuracyCheck(0);
+}
+
+TEST_P(Trans4x4WHT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans4x4WHT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans4x4WHT, InvAccuracyCheck) {
+ RunInvAccuracyCheck(0);
+}
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3)));
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c,
- &vp9_idct4x4_16_add_neon, 0)));
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, Trans4x4HT,
+ NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3)));
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MMX, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+#endif
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_sse2,
- &vp9_idct4x4_16_add_sse2, 0)));
+ make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
test_temp_block, pitch_));
REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
EXPECT_GE(1u, max_error)
<< "Error: 4x4 FHT/IHT has an individual round trip error > 1";
EXPECT_GE(count_test_block , total_error)
<< "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
|
void RunAccuracyCheck(int limit) {
DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif
if (bit_depth_ == VPX_BITS_8) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
test_input_block[j] = src16[j] - dst16[j];
#endif
}
ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
test_temp_block, pitch_));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
#if CONFIG_VP9_HIGHBITDEPTH
const uint32_t diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
ASSERT_EQ(VPX_BITS_8, bit_depth_);
#endif
EXPECT_GE(static_cast<uint32_t>(limit), max_error)
<< "Error: 4x4 FHT/IHT has an individual round trip error > "
<< limit;
EXPECT_GE(count_test_block * limit, total_error)
<< "Error: 4x4 FHT/IHT has average round trip error > " << limit
<< " per block";
|
150,857 |
void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
|
@@ -13,74 +13,140 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
const int kNumCoeffs = 16;
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_4x4_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_4x4_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
-void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct4x4_c(in, out, stride);
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vpx_fdct4x4_c(in, out, stride);
}
-void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vp9_fwht4x4_c(in, out, stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+}
+
+void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+}
+
+void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+}
+
+void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+}
+
+void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+}
+
+void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans4x4TestBase {
public:
virtual ~Trans4x4TestBase() {}
protected:
- virtual void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) = 0;
- void RunAccuracyCheck() {
+ void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -88,27 +154,29 @@
}
}
- EXPECT_GE(1u, max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > 1";
+ EXPECT_GE(static_cast<uint32_t>(limit), max_error)
+ << "Error: 4x4 FHT/IHT has an individual round trip error > "
+ << limit;
- EXPECT_GE(count_test_block , total_error)
- << "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
+ EXPECT_GE(count_test_block * limit, total_error)
+ << "Error: 4x4 FHT/IHT has average round trip error > " << limit
+ << " per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -119,62 +187,85 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
- << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
+ << "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
- void RunInvAccuracyCheck() {
+ void RunInvAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
fwd_txfm_ref(in, coeff, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
+ EXPECT_GE(static_cast<uint32_t>(limit), error)
+ << "Error: 4x4 IDCT has error " << error
<< " at index " << j;
}
}
@@ -182,12 +273,14 @@
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class Trans4x4DCT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<dct_4x4_param_t> {
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
@@ -197,23 +290,25 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4DCT, CoeffCheck) {
@@ -225,12 +320,12 @@
}
TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
class Trans4x4HT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<ht_4x4_param_t> {
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
@@ -240,24 +335,26 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4HT, CoeffCheck) {
@@ -269,51 +366,189 @@
}
TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
+class Trans4x4WHT
+ : public Trans4x4TestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
+ public:
+ virtual ~Trans4x4WHT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
+ fwd_txfm_ref = fwht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
+};
+
+TEST_P(Trans4x4WHT, AccuracyCheck) {
+ RunAccuracyCheck(0);
+}
+
+TEST_P(Trans4x4WHT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans4x4WHT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans4x4WHT, InvAccuracyCheck) {
+ RunInvAccuracyCheck(0);
+}
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3)));
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c,
- &vp9_idct4x4_16_add_neon, 0)));
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, Trans4x4HT,
+ NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3)));
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MMX, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+#endif
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_sse2,
- &vp9_idct4x4_16_add_sse2, 0)));
+ make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
|
150,858 |
void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
|
@@ -13,74 +13,140 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
const int kNumCoeffs = 16;
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_4x4_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_4x4_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
-void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct4x4_c(in, out, stride);
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vpx_fdct4x4_c(in, out, stride);
}
-void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vp9_fwht4x4_c(in, out, stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+}
+
+void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+}
+
+void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+}
+
+void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+}
+
+void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+}
+
+void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans4x4TestBase {
public:
virtual ~Trans4x4TestBase() {}
protected:
- virtual void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) = 0;
- void RunAccuracyCheck() {
+ void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -88,27 +154,29 @@
}
}
- EXPECT_GE(1u, max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > 1";
+ EXPECT_GE(static_cast<uint32_t>(limit), max_error)
+ << "Error: 4x4 FHT/IHT has an individual round trip error > "
+ << limit;
- EXPECT_GE(count_test_block , total_error)
- << "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
+ EXPECT_GE(count_test_block * limit, total_error)
+ << "Error: 4x4 FHT/IHT has average round trip error > " << limit
+ << " per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -119,62 +187,85 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
- << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
+ << "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
- void RunInvAccuracyCheck() {
+ void RunInvAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
fwd_txfm_ref(in, coeff, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
+ EXPECT_GE(static_cast<uint32_t>(limit), error)
+ << "Error: 4x4 IDCT has error " << error
<< " at index " << j;
}
}
@@ -182,12 +273,14 @@
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class Trans4x4DCT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<dct_4x4_param_t> {
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
@@ -197,23 +290,25 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4DCT, CoeffCheck) {
@@ -225,12 +320,12 @@
}
TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
class Trans4x4HT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<ht_4x4_param_t> {
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
@@ -240,24 +335,26 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4HT, CoeffCheck) {
@@ -269,51 +366,189 @@
}
TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
+class Trans4x4WHT
+ : public Trans4x4TestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
+ public:
+ virtual ~Trans4x4WHT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
+ fwd_txfm_ref = fwht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
+};
+
+TEST_P(Trans4x4WHT, AccuracyCheck) {
+ RunAccuracyCheck(0);
+}
+
+TEST_P(Trans4x4WHT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans4x4WHT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans4x4WHT, InvAccuracyCheck) {
+ RunInvAccuracyCheck(0);
+}
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3)));
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c,
- &vp9_idct4x4_16_add_neon, 0)));
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, Trans4x4HT,
+ NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3)));
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MMX, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+#endif
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_sse2,
- &vp9_idct4x4_16_add_sse2, 0)));
+ make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
|
150,859 |
void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
|
@@ -13,74 +13,140 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
const int kNumCoeffs = 16;
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_4x4_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_4x4_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
-void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct4x4_c(in, out, stride);
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vpx_fdct4x4_c(in, out, stride);
}
-void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vp9_fwht4x4_c(in, out, stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+}
+
+void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+}
+
+void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+}
+
+void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+}
+
+void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+}
+
+void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans4x4TestBase {
public:
virtual ~Trans4x4TestBase() {}
protected:
- virtual void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) = 0;
- void RunAccuracyCheck() {
+ void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -88,27 +154,29 @@
}
}
- EXPECT_GE(1u, max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > 1";
+ EXPECT_GE(static_cast<uint32_t>(limit), max_error)
+ << "Error: 4x4 FHT/IHT has an individual round trip error > "
+ << limit;
- EXPECT_GE(count_test_block , total_error)
- << "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
+ EXPECT_GE(count_test_block * limit, total_error)
+ << "Error: 4x4 FHT/IHT has average round trip error > " << limit
+ << " per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -119,62 +187,85 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
- << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
+ << "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
- void RunInvAccuracyCheck() {
+ void RunInvAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
fwd_txfm_ref(in, coeff, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
+ EXPECT_GE(static_cast<uint32_t>(limit), error)
+ << "Error: 4x4 IDCT has error " << error
<< " at index " << j;
}
}
@@ -182,12 +273,14 @@
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class Trans4x4DCT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<dct_4x4_param_t> {
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
@@ -197,23 +290,25 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4DCT, CoeffCheck) {
@@ -225,12 +320,12 @@
}
TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
class Trans4x4HT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<ht_4x4_param_t> {
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
@@ -240,24 +335,26 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4HT, CoeffCheck) {
@@ -269,51 +366,189 @@
}
TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
+class Trans4x4WHT
+ : public Trans4x4TestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
+ public:
+ virtual ~Trans4x4WHT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
+ fwd_txfm_ref = fwht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
+};
+
+TEST_P(Trans4x4WHT, AccuracyCheck) {
+ RunAccuracyCheck(0);
+}
+
+TEST_P(Trans4x4WHT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans4x4WHT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans4x4WHT, InvAccuracyCheck) {
+ RunInvAccuracyCheck(0);
+}
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3)));
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c,
- &vp9_idct4x4_16_add_neon, 0)));
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, Trans4x4HT,
+ NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3)));
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MMX, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+#endif
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_sse2,
- &vp9_idct4x4_16_add_sse2, 0)));
+ make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
|
150,860 |
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
for (int i = 0; i < count_test_block; ++i) {
for (int j = 0; j < kNumCoeffs; ++j) {
input_block[j] = rnd.Rand8() - rnd.Rand8();
input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
}
if (i == 0)
for (int j = 0; j < kNumCoeffs; ++j)
input_extreme_block[j] = 255;
if (i == 1)
for (int j = 0; j < kNumCoeffs; ++j)
input_extreme_block[j] = -255;
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
output_block, pitch_));
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
input_extreme_block[j] = mask_;
} else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
input_extreme_block[j] = -mask_;
}
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
output_block, pitch_));
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
|
@@ -13,74 +13,140 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
const int kNumCoeffs = 16;
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_4x4_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_4x4_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
-void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct4x4_c(in, out, stride);
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vpx_fdct4x4_c(in, out, stride);
}
-void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vp9_fwht4x4_c(in, out, stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+}
+
+void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+}
+
+void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+}
+
+void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+}
+
+void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+}
+
+void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans4x4TestBase {
public:
virtual ~Trans4x4TestBase() {}
protected:
- virtual void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) = 0;
- void RunAccuracyCheck() {
+ void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -88,27 +154,29 @@
}
}
- EXPECT_GE(1u, max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > 1";
+ EXPECT_GE(static_cast<uint32_t>(limit), max_error)
+ << "Error: 4x4 FHT/IHT has an individual round trip error > "
+ << limit;
- EXPECT_GE(count_test_block , total_error)
- << "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
+ EXPECT_GE(count_test_block * limit, total_error)
+ << "Error: 4x4 FHT/IHT has average round trip error > " << limit
+ << " per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -119,62 +187,85 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
- << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
+ << "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
- void RunInvAccuracyCheck() {
+ void RunInvAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
fwd_txfm_ref(in, coeff, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
+ EXPECT_GE(static_cast<uint32_t>(limit), error)
+ << "Error: 4x4 IDCT has error " << error
<< " at index " << j;
}
}
@@ -182,12 +273,14 @@
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class Trans4x4DCT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<dct_4x4_param_t> {
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
@@ -197,23 +290,25 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4DCT, CoeffCheck) {
@@ -225,12 +320,12 @@
}
TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
class Trans4x4HT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<ht_4x4_param_t> {
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
@@ -240,24 +335,26 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4HT, CoeffCheck) {
@@ -269,51 +366,189 @@
}
TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
+class Trans4x4WHT
+ : public Trans4x4TestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
+ public:
+ virtual ~Trans4x4WHT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
+ fwd_txfm_ref = fwht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
+};
+
+TEST_P(Trans4x4WHT, AccuracyCheck) {
+ RunAccuracyCheck(0);
+}
+
+TEST_P(Trans4x4WHT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans4x4WHT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans4x4WHT, InvAccuracyCheck) {
+ RunInvAccuracyCheck(0);
+}
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3)));
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c,
- &vp9_idct4x4_16_add_neon, 0)));
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, Trans4x4HT,
+ NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3)));
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MMX, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+#endif
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_sse2,
- &vp9_idct4x4_16_add_sse2, 0)));
+ make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
input_block[j] = rnd.Rand8() - rnd.Rand8();
input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
if (i == 0)
input_extreme_block[j] = 255;
if (i == 1)
input_extreme_block[j] = -255;
REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
output_block, pitch_));
EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
<< "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
|
DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
// Initialize a test block with input range [-mask_, mask_].
input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
if (i == 0) {
input_extreme_block[j] = mask_;
} else if (i == 1) {
input_extreme_block[j] = -mask_;
}
ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
output_block, pitch_));
EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
<< "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
|
150,861 |
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
}
|
@@ -13,74 +13,140 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
const int kNumCoeffs = 16;
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_4x4_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_4x4_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
-void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct4x4_c(in, out, stride);
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vpx_fdct4x4_c(in, out, stride);
}
-void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vp9_fwht4x4_c(in, out, stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+}
+
+void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+}
+
+void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+}
+
+void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+}
+
+void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+}
+
+void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans4x4TestBase {
public:
virtual ~Trans4x4TestBase() {}
protected:
- virtual void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) = 0;
- void RunAccuracyCheck() {
+ void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -88,27 +154,29 @@
}
}
- EXPECT_GE(1u, max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > 1";
+ EXPECT_GE(static_cast<uint32_t>(limit), max_error)
+ << "Error: 4x4 FHT/IHT has an individual round trip error > "
+ << limit;
- EXPECT_GE(count_test_block , total_error)
- << "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
+ EXPECT_GE(count_test_block * limit, total_error)
+ << "Error: 4x4 FHT/IHT has average round trip error > " << limit
+ << " per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -119,62 +187,85 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
- << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
+ << "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
- void RunInvAccuracyCheck() {
+ void RunInvAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
fwd_txfm_ref(in, coeff, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
+ EXPECT_GE(static_cast<uint32_t>(limit), error)
+ << "Error: 4x4 IDCT has error " << error
<< " at index " << j;
}
}
@@ -182,12 +273,14 @@
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class Trans4x4DCT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<dct_4x4_param_t> {
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
@@ -197,23 +290,25 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4DCT, CoeffCheck) {
@@ -225,12 +320,12 @@
}
TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
class Trans4x4HT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<ht_4x4_param_t> {
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
@@ -240,24 +335,26 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4HT, CoeffCheck) {
@@ -269,51 +366,189 @@
}
TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
+class Trans4x4WHT
+ : public Trans4x4TestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
+ public:
+ virtual ~Trans4x4WHT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
+ fwd_txfm_ref = fwht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
+};
+
+TEST_P(Trans4x4WHT, AccuracyCheck) {
+ RunAccuracyCheck(0);
+}
+
+TEST_P(Trans4x4WHT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans4x4WHT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans4x4WHT, InvAccuracyCheck) {
+ RunInvAccuracyCheck(0);
+}
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3)));
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c,
- &vp9_idct4x4_16_add_neon, 0)));
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, Trans4x4HT,
+ NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3)));
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MMX, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+#endif
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_sse2,
- &vp9_idct4x4_16_add_sse2, 0)));
+ make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
|
150,862 |
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
}
|
@@ -13,74 +13,140 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
const int kNumCoeffs = 16;
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_4x4_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_4x4_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
-void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct4x4_c(in, out, stride);
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vpx_fdct4x4_c(in, out, stride);
}
-void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vp9_fwht4x4_c(in, out, stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+}
+
+void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+}
+
+void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+}
+
+void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+}
+
+void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+}
+
+void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans4x4TestBase {
public:
virtual ~Trans4x4TestBase() {}
protected:
- virtual void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) = 0;
- void RunAccuracyCheck() {
+ void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -88,27 +154,29 @@
}
}
- EXPECT_GE(1u, max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > 1";
+ EXPECT_GE(static_cast<uint32_t>(limit), max_error)
+ << "Error: 4x4 FHT/IHT has an individual round trip error > "
+ << limit;
- EXPECT_GE(count_test_block , total_error)
- << "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
+ EXPECT_GE(count_test_block * limit, total_error)
+ << "Error: 4x4 FHT/IHT has average round trip error > " << limit
+ << " per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -119,62 +187,85 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
- << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
+ << "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
- void RunInvAccuracyCheck() {
+ void RunInvAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
fwd_txfm_ref(in, coeff, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
+ EXPECT_GE(static_cast<uint32_t>(limit), error)
+ << "Error: 4x4 IDCT has error " << error
<< " at index " << j;
}
}
@@ -182,12 +273,14 @@
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class Trans4x4DCT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<dct_4x4_param_t> {
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
@@ -197,23 +290,25 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4DCT, CoeffCheck) {
@@ -225,12 +320,12 @@
}
TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
class Trans4x4HT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<ht_4x4_param_t> {
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
@@ -240,24 +335,26 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4HT, CoeffCheck) {
@@ -269,51 +366,189 @@
}
TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
+class Trans4x4WHT
+ : public Trans4x4TestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
+ public:
+ virtual ~Trans4x4WHT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
+ fwd_txfm_ref = fwht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
+};
+
+TEST_P(Trans4x4WHT, AccuracyCheck) {
+ RunAccuracyCheck(0);
+}
+
+TEST_P(Trans4x4WHT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans4x4WHT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans4x4WHT, InvAccuracyCheck) {
+ RunInvAccuracyCheck(0);
+}
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3)));
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c,
- &vp9_idct4x4_16_add_neon, 0)));
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, Trans4x4HT,
+ NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3)));
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MMX, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+#endif
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_sse2,
- &vp9_idct4x4_16_add_sse2, 0)));
+ make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
|
150,863 |
void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
vp9_fdct4x4_c(in, out, stride);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
int tx_type) {
vpx_fdct4x4_c(in, out, stride);
}
|
@@ -13,74 +13,140 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
const int kNumCoeffs = 16;
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_4x4_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_4x4_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
-void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct4x4_c(in, out, stride);
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vpx_fdct4x4_c(in, out, stride);
}
-void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vp9_fwht4x4_c(in, out, stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+}
+
+void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+}
+
+void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+}
+
+void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+}
+
+void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+}
+
+void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans4x4TestBase {
public:
virtual ~Trans4x4TestBase() {}
protected:
- virtual void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) = 0;
- void RunAccuracyCheck() {
+ void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -88,27 +154,29 @@
}
}
- EXPECT_GE(1u, max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > 1";
+ EXPECT_GE(static_cast<uint32_t>(limit), max_error)
+ << "Error: 4x4 FHT/IHT has an individual round trip error > "
+ << limit;
- EXPECT_GE(count_test_block , total_error)
- << "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
+ EXPECT_GE(count_test_block * limit, total_error)
+ << "Error: 4x4 FHT/IHT has average round trip error > " << limit
+ << " per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -119,62 +187,85 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
- << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
+ << "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
- void RunInvAccuracyCheck() {
+ void RunInvAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
fwd_txfm_ref(in, coeff, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
+ EXPECT_GE(static_cast<uint32_t>(limit), error)
+ << "Error: 4x4 IDCT has error " << error
<< " at index " << j;
}
}
@@ -182,12 +273,14 @@
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class Trans4x4DCT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<dct_4x4_param_t> {
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
@@ -197,23 +290,25 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4DCT, CoeffCheck) {
@@ -225,12 +320,12 @@
}
TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
class Trans4x4HT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<ht_4x4_param_t> {
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
@@ -240,24 +335,26 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4HT, CoeffCheck) {
@@ -269,51 +366,189 @@
}
TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
+class Trans4x4WHT
+ : public Trans4x4TestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
+ public:
+ virtual ~Trans4x4WHT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
+ fwd_txfm_ref = fwht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
+};
+
+TEST_P(Trans4x4WHT, AccuracyCheck) {
+ RunAccuracyCheck(0);
+}
+
+TEST_P(Trans4x4WHT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans4x4WHT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans4x4WHT, InvAccuracyCheck) {
+ RunInvAccuracyCheck(0);
+}
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3)));
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c,
- &vp9_idct4x4_16_add_neon, 0)));
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, Trans4x4HT,
+ NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3)));
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MMX, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+#endif
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_sse2,
- &vp9_idct4x4_16_add_sse2, 0)));
+ make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
vp9_fdct4x4_c(in, out, stride);
|
void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
int tx_type) {
vpx_fdct4x4_c(in, out, stride);
|
150,864 |
void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
|
@@ -13,74 +13,140 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
const int kNumCoeffs = 16;
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_4x4_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_4x4_param_t;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht4x4Param;
-void fdct4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct4x4_c(in, out, stride);
+void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vpx_fdct4x4_c(in, out, stride);
}
-void fht4x4_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht4x4_c(in, out, stride, tx_type);
}
+void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type) {
+ vp9_fwht4x4_c(in, out, stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
+}
+
+void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
+}
+
+void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+}
+
+void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
+}
+
+void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
+}
+
+void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class Trans4x4TestBase {
public:
virtual ~Trans4x4TestBase() {}
protected:
- virtual void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) = 0;
+ virtual void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) = 0;
- virtual void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) = 0;
- void RunAccuracyCheck() {
+ void RunAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
uint32_t max_error = 0;
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
- REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
+ CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ ASSERT_EQ(VPX_BITS_8, bit_depth_);
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
if (max_error < error)
max_error = error;
@@ -88,27 +154,29 @@
}
}
- EXPECT_GE(1u, max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > 1";
+ EXPECT_GE(static_cast<uint32_t>(limit), max_error)
+ << "Error: 4x4 FHT/IHT has an individual round trip error > "
+ << limit;
- EXPECT_GE(count_test_block , total_error)
- << "Error: 4x4 FHT/IHT has average round trip error > 1 per block";
+ EXPECT_GE(count_test_block * limit, total_error)
+ << "Error: 4x4 FHT/IHT has average round trip error > " << limit
+ << " per block";
}
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j)
@@ -119,62 +187,85 @@
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
- if (i == 0)
+ if (i == 0) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = 255;
- if (i == 1)
+ input_extreme_block[j] = mask_;
+ } else if (i == 1) {
for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -255;
+ input_extreme_block[j] = -mask_;
+ }
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
EXPECT_EQ(output_block[j], output_ref_block[j]);
- EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
- << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
+ << "Error: 4x4 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
}
}
}
- void RunInvAccuracyCheck() {
+ void RunInvAccuracyCheck(int limit) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- in[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
}
fwd_txfm_ref(in, coeff, pitch_, tx_type_);
- REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const uint32_t diff = dst[j] - src[j];
+#endif
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
+ EXPECT_GE(static_cast<uint32_t>(limit), error)
+ << "Error: 4x4 IDCT has error " << error
<< " at index " << j;
}
}
@@ -182,12 +273,14 @@
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class Trans4x4DCT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<dct_4x4_param_t> {
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
@@ -197,23 +290,25 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4DCT, CoeffCheck) {
@@ -225,12 +320,12 @@
}
TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
class Trans4x4HT
: public Trans4x4TestBase,
- public ::testing::TestWithParam<ht_4x4_param_t> {
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
@@ -240,24 +335,26 @@
tx_type_ = GET_PARAM(2);
pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(const int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(const int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck();
+ RunAccuracyCheck(1);
}
TEST_P(Trans4x4HT, CoeffCheck) {
@@ -269,51 +366,189 @@
}
TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
+ RunInvAccuracyCheck(1);
}
+class Trans4x4WHT
+ : public Trans4x4TestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
+ public:
+ virtual ~Trans4x4WHT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
+ fwd_txfm_ref = fwht4x4_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
+};
+
+TEST_P(Trans4x4WHT, AccuracyCheck) {
+ RunAccuracyCheck(0);
+}
+
+TEST_P(Trans4x4WHT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans4x4WHT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans4x4WHT, InvAccuracyCheck) {
+ RunInvAccuracyCheck(0);
+}
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_c, 0)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3)));
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_c,
- &vp9_idct4x4_16_add_neon, 0)));
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, Trans4x4HT,
+ NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2),
- make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3)));
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MMX, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4WHT,
+ ::testing::Values(
+ make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8)));
+#endif
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vp9_fdct4x4_sse2,
- &vp9_idct4x4_16_add_sse2, 0)));
+ make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2),
- make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3)));
-#endif
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans4x4HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht4x4_msa, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
|
150,865 |
void RunExtremalCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
int total_error = 0;
const int count_test_block = 100000;
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
for (int i = 0; i < count_test_block; ++i) {
for (int j = 0; j < 64; ++j) {
src[j] = rnd.Rand8() % 2 ? 255 : 0;
dst[j] = src[j] > 0 ? 0 : 255;
test_input_block[j] = src[j] - dst[j];
}
REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, dst, pitch_));
for (int j = 0; j < 64; ++j) {
const int diff = dst[j] - src[j];
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
EXPECT_GE(1, max_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
<< "an individual roundtrip error > 1";
EXPECT_GE(count_test_block/5, total_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
<< " roundtrip error > 1/5 per block";
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunExtremalCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
int total_error = 0;
int total_coeff_error = 0;
const int count_test_block = 100000;
DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
DECLARE_ALIGNED(16, uint8_t, dst[64]);
DECLARE_ALIGNED(16, uint8_t, src[64]);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[64]);
DECLARE_ALIGNED(16, uint16_t, src16[64]);
#endif
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
if (bit_depth_ == VPX_BITS_8) {
if (i == 0) {
src[j] = 255;
dst[j] = 0;
} else if (i == 1) {
src[j] = 0;
dst[j] = 255;
} else {
src[j] = rnd.Rand8() % 2 ? 255 : 0;
dst[j] = rnd.Rand8() % 2 ? 255 : 0;
}
test_input_block[j] = src[j] - dst[j];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
if (i == 0) {
src16[j] = mask_;
dst16[j] = 0;
} else if (i == 1) {
src16[j] = 0;
dst16[j] = mask_;
} else {
src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
dst16[j] = rnd.Rand8() % 2 ? mask_ : 0;
}
test_input_block[j] = src16[j] - dst16[j];
#endif
}
}
ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
ASM_REGISTER_STATE_CHECK(
fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
for (int j = 0; j < 64; ++j) {
#if CONFIG_VP9_HIGHBITDEPTH
const int diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
const int diff = dst[j] - src[j];
#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
const int coeff_diff = test_temp_block[j] - ref_temp_block[j];
total_coeff_error += abs(coeff_diff);
}
EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
<< "an individual roundtrip error > 1";
EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
<< " roundtrip error > 1/5 per block";
EXPECT_EQ(0, total_coeff_error)
<< "Error: Extremal 8x8 FDCT/FHT has"
<< "overflow issues in the intermediate steps > 1";
}
}
|
@@ -13,52 +13,139 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct8x8_64_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_8x8_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_8x8_param_t;
+const int kNumCoeffs = 64;
+const double kPi = 3.141592653589793238462643383279502884;
-void fdct8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct8x8_c(in, out, stride);
+const int kSignBiasMaxDiff255 = 1500;
+const int kSignBiasMaxDiff15 = 10000;
+
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
+
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct8x8Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht8x8Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t> Idct8x8Param;
+
+void reference_8x8_dct_1d(const double in[8], double out[8], int stride) {
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 8; k++) {
+ out[k] = 0.0;
+ for (int n = 0; n < 8; n++)
+ out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
+ if (k == 0)
+ out[k] = out[k] * kInvSqrt2;
+ }
}
-void fht8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void reference_8x8_dct_2d(const int16_t input[kNumCoeffs],
+ double output[kNumCoeffs]) {
+ // First transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j*8 + i];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ for (int j = 0; j < 8; ++j)
+ output[j * 8 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = output[j + i*8];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ // Scale by some magic number
+ for (int j = 0; j < 8; ++j)
+ output[j + i * 8] = temp_out[j] * 2;
+ }
+}
+
+
+void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+ vpx_fdct8x8_c(in, out, stride);
+}
+
+void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht8x8_c(in, out, stride, tx_type);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 10);
+}
+
+void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 12);
+}
+
+void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct8x8_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct8x8_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 12);
+}
+
+void idct8x8_64_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_64_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class FwdTrans8x8TestBase {
public:
virtual ~FwdTrans8x8TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunSignBiasCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_output_block, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_output_block[64]);
int count_sign_block[64][2];
const int count_test_block = 100000;
@@ -67,8 +154,9 @@
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = rnd.Rand8() - rnd.Rand8();
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() >> (16 - bit_depth_)) & mask_) -
+ ((rnd.Rand16() >> (16 - bit_depth_)) & mask_);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -81,8 +169,8 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 1125;
- EXPECT_LT(diff, max_diff)
+ const int max_diff = kSignBiasMaxDiff255;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
<< "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-255, 255] at index " << j
@@ -94,10 +182,11 @@
memset(count_sign_block, 0, sizeof(count_sign_block));
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-15, 15].
+ // Initialize a test block with input range [-mask_ / 16, mask_ / 16].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() & mask_) >> 4) -
+ ((rnd.Rand16() & mask_) >> 4);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -110,9 +199,9 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 10000;
- EXPECT_LT(diff, max_diff)
- << "Error: 4x4 FDCT/FHT has a sign bias > "
+ const int max_diff = kSignBiasMaxDiff15;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
+ << "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-15, 15] at index " << j
<< " count0: " << count_sign_block[j][0]
@@ -126,20 +215,32 @@
int max_error = 0;
int total_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
for (int j = 0; j < 64; ++j) {
if (test_temp_block[j] > 0) {
@@ -152,11 +253,23 @@
test_temp_block[j] *= 4;
}
}
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
@@ -164,11 +277,11 @@
}
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
<< " roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
<< "error > 1/5 per block";
}
@@ -177,51 +290,247 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
int total_error = 0;
+ int total_coeff_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8() % 2 ? 255 : 0;
- dst[j] = src[j] > 0 ? 0 : 255;
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ if (i == 0) {
+ src[j] = 255;
+ dst[j] = 0;
+ } else if (i == 1) {
+ src[j] = 0;
+ dst[j] = 255;
+ } else {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = rnd.Rand8() % 2 ? 255 : 0;
+ }
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ if (i == 0) {
+ src16[j] = mask_;
+ dst16[j] = 0;
+ } else if (i == 1) {
+ src16[j] = 0;
+ dst16[j] = mask_;
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ }
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
+
+ const int coeff_diff = test_temp_block[j] - ref_temp_block[j];
+ total_coeff_error += abs(coeff_diff);
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
<< "an individual roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
<< " roundtrip error > 1/5 per block";
+
+ EXPECT_EQ(0, total_coeff_error)
+ << "Error: Extremal 8x8 FDCT/FHT has"
+ << "overflow issues in the intermediate steps > 1";
}
}
+ void RunInvAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = src[j] > 0 ? 0 : 255;
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = src16[j] > 0 ? 0 : mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
+ }
+
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ const uint32_t diff = dst[j] - src[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+ void RunFwdAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff_r[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j)
+ in[j] = rnd.Rand8() % 2 == 0 ? mask_ : -mask_;
+
+ RunFwdTxfm(in, coeff, pitch_);
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff_r[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const uint32_t diff = coeff[j] - coeff_r[j];
+ const uint32_t error = diff * diff;
+ EXPECT_GE(9u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 DCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 12;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+ const int16_t *scan = vp9_default_scan_orders[TX_8X8].scan;
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1-2*(i%2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class FwdTrans8x8DCT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<dct_8x8_param_t> {
+ public ::testing::TestWithParam<Dct8x8Param> {
public:
virtual ~FwdTrans8x8DCT() {}
@@ -231,20 +540,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fdct8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(FwdTrans8x8DCT, SignBiasCheck) {
@@ -259,9 +570,17 @@
RunExtremalCheck();
}
+TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) {
+ RunFwdAccuracyCheck();
+}
+
+TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) {
+ RunInvAccuracyCheck();
+}
+
class FwdTrans8x8HT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<ht_8x8_param_t> {
+ public ::testing::TestWithParam<Ht8x8Param> {
public:
virtual ~FwdTrans8x8HT() {}
@@ -271,20 +590,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fht8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(FwdTrans8x8HT, SignBiasCheck) {
@@ -299,45 +620,170 @@
RunExtremalCheck();
}
+class InvTrans8x8DCT
+ : public FwdTrans8x8TestBase,
+ public ::testing::TestWithParam<Idct8x8Param> {
+ public:
+ virtual ~InvTrans8x8DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ pitch_ = 8;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+ void RunFwdTxfm(int16_t *out, tran_low_t *dst, int stride) {}
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans8x8DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0)));
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3)));
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_neon, 0)));
-INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, FwdTrans8x8HT,
- ::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3)));
-#endif
+ make_tuple(&vpx_fdct8x8_neon, &vpx_idct8x8_64_add_neon, 0,
+ VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ NEON, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_sse2, 0)));
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_sse2, 0,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3)));
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+
+// Optimizations take effect at a threshold of 6201, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&idct8x8_10_add_10_c,
+ &idct8x8_10_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10,
+ &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10_add_12_c,
+ &idct8x8_10_add_12_sse2, 6225, VPX_BITS_12),
+ make_tuple(&idct8x8_12,
+ &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
+ !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSSE3, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_ssse3, &vpx_idct8x8_64_add_ssse3, 0,
+ VPX_BITS_8)));
#endif
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_msa, &vpx_idct8x8_64_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
src[j] = rnd.Rand8() % 2 ? 255 : 0;
dst[j] = src[j] > 0 ? 0 : 255;
test_input_block[j] = src[j] - dst[j];
REGISTER_STATE_CHECK(
REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, dst, pitch_));
EXPECT_GE(1, max_error)
EXPECT_GE(count_test_block/5, total_error)
|
int total_coeff_error = 0;
DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
DECLARE_ALIGNED(16, uint8_t, dst[64]);
DECLARE_ALIGNED(16, uint8_t, src[64]);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[64]);
DECLARE_ALIGNED(16, uint16_t, src16[64]);
#endif
// Initialize a test block with input range [-mask_, mask_].
if (bit_depth_ == VPX_BITS_8) {
if (i == 0) {
src[j] = 255;
dst[j] = 0;
} else if (i == 1) {
src[j] = 0;
dst[j] = 255;
} else {
src[j] = rnd.Rand8() % 2 ? 255 : 0;
dst[j] = rnd.Rand8() % 2 ? 255 : 0;
}
test_input_block[j] = src[j] - dst[j];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
if (i == 0) {
src16[j] = mask_;
dst16[j] = 0;
} else if (i == 1) {
src16[j] = 0;
dst16[j] = mask_;
} else {
src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
dst16[j] = rnd.Rand8() % 2 ? mask_ : 0;
}
test_input_block[j] = src16[j] - dst16[j];
#endif
}
ASM_REGISTER_STATE_CHECK(
ASM_REGISTER_STATE_CHECK(
fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
#if CONFIG_VP9_HIGHBITDEPTH
const int diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
#endif
const int coeff_diff = test_temp_block[j] - ref_temp_block[j];
total_coeff_error += abs(coeff_diff);
EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
EXPECT_EQ(0, total_coeff_error)
<< "Error: Extremal 8x8 FDCT/FHT has"
<< "overflow issues in the intermediate steps > 1";
|
150,866 |
void RunRoundTripErrorCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
int total_error = 0;
const int count_test_block = 100000;
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
for (int i = 0; i < count_test_block; ++i) {
for (int j = 0; j < 64; ++j) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
}
REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
for (int j = 0; j < 64; ++j) {
if (test_temp_block[j] > 0) {
test_temp_block[j] += 2;
test_temp_block[j] /= 4;
test_temp_block[j] *= 4;
} else {
test_temp_block[j] -= 2;
test_temp_block[j] /= 4;
test_temp_block[j] *= 4;
}
}
REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, dst, pitch_));
for (int j = 0; j < 64; ++j) {
const int diff = dst[j] - src[j];
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
}
EXPECT_GE(1, max_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
<< " roundtrip error > 1";
EXPECT_GE(count_test_block/5, total_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
<< "error > 1/5 per block";
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void RunRoundTripErrorCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
int total_error = 0;
const int count_test_block = 100000;
DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
DECLARE_ALIGNED(16, uint8_t, dst[64]);
DECLARE_ALIGNED(16, uint8_t, src[64]);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[64]);
DECLARE_ALIGNED(16, uint16_t, src16[64]);
#endif
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
if (bit_depth_ == VPX_BITS_8) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
test_input_block[j] = src16[j] - dst16[j];
#endif
}
}
ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
for (int j = 0; j < 64; ++j) {
if (test_temp_block[j] > 0) {
test_temp_block[j] += 2;
test_temp_block[j] /= 4;
test_temp_block[j] *= 4;
} else {
test_temp_block[j] -= 2;
test_temp_block[j] /= 4;
test_temp_block[j] *= 4;
}
}
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
for (int j = 0; j < 64; ++j) {
#if CONFIG_VP9_HIGHBITDEPTH
const int diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
const int diff = dst[j] - src[j];
#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
}
EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
<< " roundtrip error > 1";
EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
<< "error > 1/5 per block";
}
|
@@ -13,52 +13,139 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct8x8_64_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_8x8_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_8x8_param_t;
+const int kNumCoeffs = 64;
+const double kPi = 3.141592653589793238462643383279502884;
-void fdct8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct8x8_c(in, out, stride);
+const int kSignBiasMaxDiff255 = 1500;
+const int kSignBiasMaxDiff15 = 10000;
+
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
+
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct8x8Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht8x8Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t> Idct8x8Param;
+
+void reference_8x8_dct_1d(const double in[8], double out[8], int stride) {
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 8; k++) {
+ out[k] = 0.0;
+ for (int n = 0; n < 8; n++)
+ out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
+ if (k == 0)
+ out[k] = out[k] * kInvSqrt2;
+ }
}
-void fht8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void reference_8x8_dct_2d(const int16_t input[kNumCoeffs],
+ double output[kNumCoeffs]) {
+ // First transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j*8 + i];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ for (int j = 0; j < 8; ++j)
+ output[j * 8 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = output[j + i*8];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ // Scale by some magic number
+ for (int j = 0; j < 8; ++j)
+ output[j + i * 8] = temp_out[j] * 2;
+ }
+}
+
+
+void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+ vpx_fdct8x8_c(in, out, stride);
+}
+
+void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht8x8_c(in, out, stride, tx_type);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 10);
+}
+
+void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 12);
+}
+
+void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct8x8_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct8x8_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 12);
+}
+
+void idct8x8_64_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_64_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class FwdTrans8x8TestBase {
public:
virtual ~FwdTrans8x8TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunSignBiasCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_output_block, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_output_block[64]);
int count_sign_block[64][2];
const int count_test_block = 100000;
@@ -67,8 +154,9 @@
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = rnd.Rand8() - rnd.Rand8();
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() >> (16 - bit_depth_)) & mask_) -
+ ((rnd.Rand16() >> (16 - bit_depth_)) & mask_);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -81,8 +169,8 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 1125;
- EXPECT_LT(diff, max_diff)
+ const int max_diff = kSignBiasMaxDiff255;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
<< "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-255, 255] at index " << j
@@ -94,10 +182,11 @@
memset(count_sign_block, 0, sizeof(count_sign_block));
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-15, 15].
+ // Initialize a test block with input range [-mask_ / 16, mask_ / 16].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() & mask_) >> 4) -
+ ((rnd.Rand16() & mask_) >> 4);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -110,9 +199,9 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 10000;
- EXPECT_LT(diff, max_diff)
- << "Error: 4x4 FDCT/FHT has a sign bias > "
+ const int max_diff = kSignBiasMaxDiff15;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
+ << "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-15, 15] at index " << j
<< " count0: " << count_sign_block[j][0]
@@ -126,20 +215,32 @@
int max_error = 0;
int total_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
for (int j = 0; j < 64; ++j) {
if (test_temp_block[j] > 0) {
@@ -152,11 +253,23 @@
test_temp_block[j] *= 4;
}
}
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
@@ -164,11 +277,11 @@
}
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
<< " roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
<< "error > 1/5 per block";
}
@@ -177,51 +290,247 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
int total_error = 0;
+ int total_coeff_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8() % 2 ? 255 : 0;
- dst[j] = src[j] > 0 ? 0 : 255;
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ if (i == 0) {
+ src[j] = 255;
+ dst[j] = 0;
+ } else if (i == 1) {
+ src[j] = 0;
+ dst[j] = 255;
+ } else {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = rnd.Rand8() % 2 ? 255 : 0;
+ }
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ if (i == 0) {
+ src16[j] = mask_;
+ dst16[j] = 0;
+ } else if (i == 1) {
+ src16[j] = 0;
+ dst16[j] = mask_;
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ }
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
+
+ const int coeff_diff = test_temp_block[j] - ref_temp_block[j];
+ total_coeff_error += abs(coeff_diff);
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
<< "an individual roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
<< " roundtrip error > 1/5 per block";
+
+ EXPECT_EQ(0, total_coeff_error)
+ << "Error: Extremal 8x8 FDCT/FHT has"
+ << "overflow issues in the intermediate steps > 1";
}
}
+ void RunInvAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = src[j] > 0 ? 0 : 255;
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = src16[j] > 0 ? 0 : mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
+ }
+
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ const uint32_t diff = dst[j] - src[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+ void RunFwdAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff_r[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j)
+ in[j] = rnd.Rand8() % 2 == 0 ? mask_ : -mask_;
+
+ RunFwdTxfm(in, coeff, pitch_);
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff_r[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const uint32_t diff = coeff[j] - coeff_r[j];
+ const uint32_t error = diff * diff;
+ EXPECT_GE(9u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 DCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 12;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+ const int16_t *scan = vp9_default_scan_orders[TX_8X8].scan;
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1-2*(i%2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class FwdTrans8x8DCT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<dct_8x8_param_t> {
+ public ::testing::TestWithParam<Dct8x8Param> {
public:
virtual ~FwdTrans8x8DCT() {}
@@ -231,20 +540,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fdct8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(FwdTrans8x8DCT, SignBiasCheck) {
@@ -259,9 +570,17 @@
RunExtremalCheck();
}
+TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) {
+ RunFwdAccuracyCheck();
+}
+
+TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) {
+ RunInvAccuracyCheck();
+}
+
class FwdTrans8x8HT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<ht_8x8_param_t> {
+ public ::testing::TestWithParam<Ht8x8Param> {
public:
virtual ~FwdTrans8x8HT() {}
@@ -271,20 +590,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fht8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(FwdTrans8x8HT, SignBiasCheck) {
@@ -299,45 +620,170 @@
RunExtremalCheck();
}
+class InvTrans8x8DCT
+ : public FwdTrans8x8TestBase,
+ public ::testing::TestWithParam<Idct8x8Param> {
+ public:
+ virtual ~InvTrans8x8DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ pitch_ = 8;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+ void RunFwdTxfm(int16_t *out, tran_low_t *dst, int stride) {}
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans8x8DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0)));
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3)));
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_neon, 0)));
-INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, FwdTrans8x8HT,
- ::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3)));
-#endif
+ make_tuple(&vpx_fdct8x8_neon, &vpx_idct8x8_64_add_neon, 0,
+ VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ NEON, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_sse2, 0)));
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_sse2, 0,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3)));
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+
+// Optimizations take effect at a threshold of 6201, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&idct8x8_10_add_10_c,
+ &idct8x8_10_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10,
+ &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10_add_12_c,
+ &idct8x8_10_add_12_sse2, 6225, VPX_BITS_12),
+ make_tuple(&idct8x8_12,
+ &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
+ !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSSE3, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_ssse3, &vpx_idct8x8_64_add_ssse3, 0,
+ VPX_BITS_8)));
#endif
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_msa, &vpx_idct8x8_64_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
REGISTER_STATE_CHECK(
REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, dst, pitch_));
EXPECT_GE(1, max_error)
EXPECT_GE(count_test_block/5, total_error)
|
DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
DECLARE_ALIGNED(16, uint8_t, dst[64]);
DECLARE_ALIGNED(16, uint8_t, src[64]);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, dst16[64]);
DECLARE_ALIGNED(16, uint16_t, src16[64]);
#endif
// Initialize a test block with input range [-mask_, mask_].
if (bit_depth_ == VPX_BITS_8) {
src[j] = rnd.Rand8();
dst[j] = rnd.Rand8();
test_input_block[j] = src[j] - dst[j];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src16[j] = rnd.Rand16() & mask_;
dst16[j] = rnd.Rand16() & mask_;
test_input_block[j] = src16[j] - dst16[j];
#endif
}
ASM_REGISTER_STATE_CHECK(
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
#if CONFIG_VP9_HIGHBITDEPTH
const int diff =
bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
#endif
EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
|
150,867 |
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fdct8x8_ref;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fdct8x8_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
}
|
@@ -13,52 +13,139 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct8x8_64_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_8x8_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_8x8_param_t;
+const int kNumCoeffs = 64;
+const double kPi = 3.141592653589793238462643383279502884;
-void fdct8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct8x8_c(in, out, stride);
+const int kSignBiasMaxDiff255 = 1500;
+const int kSignBiasMaxDiff15 = 10000;
+
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
+
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct8x8Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht8x8Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t> Idct8x8Param;
+
+void reference_8x8_dct_1d(const double in[8], double out[8], int stride) {
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 8; k++) {
+ out[k] = 0.0;
+ for (int n = 0; n < 8; n++)
+ out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
+ if (k == 0)
+ out[k] = out[k] * kInvSqrt2;
+ }
}
-void fht8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void reference_8x8_dct_2d(const int16_t input[kNumCoeffs],
+ double output[kNumCoeffs]) {
+ // First transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j*8 + i];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ for (int j = 0; j < 8; ++j)
+ output[j * 8 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = output[j + i*8];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ // Scale by some magic number
+ for (int j = 0; j < 8; ++j)
+ output[j + i * 8] = temp_out[j] * 2;
+ }
+}
+
+
+void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+ vpx_fdct8x8_c(in, out, stride);
+}
+
+void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht8x8_c(in, out, stride, tx_type);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 10);
+}
+
+void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 12);
+}
+
+void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct8x8_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct8x8_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 12);
+}
+
+void idct8x8_64_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_64_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class FwdTrans8x8TestBase {
public:
virtual ~FwdTrans8x8TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunSignBiasCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_output_block, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_output_block[64]);
int count_sign_block[64][2];
const int count_test_block = 100000;
@@ -67,8 +154,9 @@
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = rnd.Rand8() - rnd.Rand8();
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() >> (16 - bit_depth_)) & mask_) -
+ ((rnd.Rand16() >> (16 - bit_depth_)) & mask_);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -81,8 +169,8 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 1125;
- EXPECT_LT(diff, max_diff)
+ const int max_diff = kSignBiasMaxDiff255;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
<< "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-255, 255] at index " << j
@@ -94,10 +182,11 @@
memset(count_sign_block, 0, sizeof(count_sign_block));
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-15, 15].
+ // Initialize a test block with input range [-mask_ / 16, mask_ / 16].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() & mask_) >> 4) -
+ ((rnd.Rand16() & mask_) >> 4);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -110,9 +199,9 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 10000;
- EXPECT_LT(diff, max_diff)
- << "Error: 4x4 FDCT/FHT has a sign bias > "
+ const int max_diff = kSignBiasMaxDiff15;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
+ << "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-15, 15] at index " << j
<< " count0: " << count_sign_block[j][0]
@@ -126,20 +215,32 @@
int max_error = 0;
int total_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
for (int j = 0; j < 64; ++j) {
if (test_temp_block[j] > 0) {
@@ -152,11 +253,23 @@
test_temp_block[j] *= 4;
}
}
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
@@ -164,11 +277,11 @@
}
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
<< " roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
<< "error > 1/5 per block";
}
@@ -177,51 +290,247 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
int total_error = 0;
+ int total_coeff_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8() % 2 ? 255 : 0;
- dst[j] = src[j] > 0 ? 0 : 255;
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ if (i == 0) {
+ src[j] = 255;
+ dst[j] = 0;
+ } else if (i == 1) {
+ src[j] = 0;
+ dst[j] = 255;
+ } else {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = rnd.Rand8() % 2 ? 255 : 0;
+ }
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ if (i == 0) {
+ src16[j] = mask_;
+ dst16[j] = 0;
+ } else if (i == 1) {
+ src16[j] = 0;
+ dst16[j] = mask_;
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ }
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
+
+ const int coeff_diff = test_temp_block[j] - ref_temp_block[j];
+ total_coeff_error += abs(coeff_diff);
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
<< "an individual roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
<< " roundtrip error > 1/5 per block";
+
+ EXPECT_EQ(0, total_coeff_error)
+ << "Error: Extremal 8x8 FDCT/FHT has"
+ << "overflow issues in the intermediate steps > 1";
}
}
+ void RunInvAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = src[j] > 0 ? 0 : 255;
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = src16[j] > 0 ? 0 : mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
+ }
+
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ const uint32_t diff = dst[j] - src[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+ void RunFwdAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff_r[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j)
+ in[j] = rnd.Rand8() % 2 == 0 ? mask_ : -mask_;
+
+ RunFwdTxfm(in, coeff, pitch_);
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff_r[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const uint32_t diff = coeff[j] - coeff_r[j];
+ const uint32_t error = diff * diff;
+ EXPECT_GE(9u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 DCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 12;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+ const int16_t *scan = vp9_default_scan_orders[TX_8X8].scan;
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1-2*(i%2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class FwdTrans8x8DCT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<dct_8x8_param_t> {
+ public ::testing::TestWithParam<Dct8x8Param> {
public:
virtual ~FwdTrans8x8DCT() {}
@@ -231,20 +540,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fdct8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(FwdTrans8x8DCT, SignBiasCheck) {
@@ -259,9 +570,17 @@
RunExtremalCheck();
}
+TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) {
+ RunFwdAccuracyCheck();
+}
+
+TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) {
+ RunInvAccuracyCheck();
+}
+
class FwdTrans8x8HT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<ht_8x8_param_t> {
+ public ::testing::TestWithParam<Ht8x8Param> {
public:
virtual ~FwdTrans8x8HT() {}
@@ -271,20 +590,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fht8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(FwdTrans8x8HT, SignBiasCheck) {
@@ -299,45 +620,170 @@
RunExtremalCheck();
}
+class InvTrans8x8DCT
+ : public FwdTrans8x8TestBase,
+ public ::testing::TestWithParam<Idct8x8Param> {
+ public:
+ virtual ~InvTrans8x8DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ pitch_ = 8;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+ void RunFwdTxfm(int16_t *out, tran_low_t *dst, int stride) {}
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans8x8DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0)));
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3)));
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_neon, 0)));
-INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, FwdTrans8x8HT,
- ::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3)));
-#endif
+ make_tuple(&vpx_fdct8x8_neon, &vpx_idct8x8_64_add_neon, 0,
+ VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ NEON, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_sse2, 0)));
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_sse2, 0,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3)));
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+
+// Optimizations take effect at a threshold of 6201, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&idct8x8_10_add_10_c,
+ &idct8x8_10_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10,
+ &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10_add_12_c,
+ &idct8x8_10_add_12_sse2, 6225, VPX_BITS_12),
+ make_tuple(&idct8x8_12,
+ &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
+ !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSSE3, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_ssse3, &vpx_idct8x8_64_add_ssse3, 0,
+ VPX_BITS_8)));
#endif
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_msa, &vpx_idct8x8_64_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
|
150,868 |
void fdct8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
vp9_fdct8x8_c(in, out, stride);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void fdct8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
const int kSignBiasMaxDiff255 = 1500;
const int kSignBiasMaxDiff15 = 10000;
typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
int tx_type);
typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
int tx_type);
typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct8x8Param;
typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht8x8Param;
typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t> Idct8x8Param;
void reference_8x8_dct_1d(const double in[8], double out[8], int stride) {
const double kInvSqrt2 = 0.707106781186547524400844362104;
for (int k = 0; k < 8; k++) {
out[k] = 0.0;
for (int n = 0; n < 8; n++)
out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
if (k == 0)
out[k] = out[k] * kInvSqrt2;
}
}
|
@@ -13,52 +13,139 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct8x8_64_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_8x8_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_8x8_param_t;
+const int kNumCoeffs = 64;
+const double kPi = 3.141592653589793238462643383279502884;
-void fdct8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct8x8_c(in, out, stride);
+const int kSignBiasMaxDiff255 = 1500;
+const int kSignBiasMaxDiff15 = 10000;
+
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
+
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct8x8Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht8x8Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t> Idct8x8Param;
+
+void reference_8x8_dct_1d(const double in[8], double out[8], int stride) {
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 8; k++) {
+ out[k] = 0.0;
+ for (int n = 0; n < 8; n++)
+ out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
+ if (k == 0)
+ out[k] = out[k] * kInvSqrt2;
+ }
}
-void fht8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void reference_8x8_dct_2d(const int16_t input[kNumCoeffs],
+ double output[kNumCoeffs]) {
+ // First transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j*8 + i];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ for (int j = 0; j < 8; ++j)
+ output[j * 8 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = output[j + i*8];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ // Scale by some magic number
+ for (int j = 0; j < 8; ++j)
+ output[j + i * 8] = temp_out[j] * 2;
+ }
+}
+
+
+void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+ vpx_fdct8x8_c(in, out, stride);
+}
+
+void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht8x8_c(in, out, stride, tx_type);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 10);
+}
+
+void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 12);
+}
+
+void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct8x8_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct8x8_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 12);
+}
+
+void idct8x8_64_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_64_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class FwdTrans8x8TestBase {
public:
virtual ~FwdTrans8x8TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunSignBiasCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_output_block, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_output_block[64]);
int count_sign_block[64][2];
const int count_test_block = 100000;
@@ -67,8 +154,9 @@
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = rnd.Rand8() - rnd.Rand8();
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() >> (16 - bit_depth_)) & mask_) -
+ ((rnd.Rand16() >> (16 - bit_depth_)) & mask_);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -81,8 +169,8 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 1125;
- EXPECT_LT(diff, max_diff)
+ const int max_diff = kSignBiasMaxDiff255;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
<< "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-255, 255] at index " << j
@@ -94,10 +182,11 @@
memset(count_sign_block, 0, sizeof(count_sign_block));
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-15, 15].
+ // Initialize a test block with input range [-mask_ / 16, mask_ / 16].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() & mask_) >> 4) -
+ ((rnd.Rand16() & mask_) >> 4);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -110,9 +199,9 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 10000;
- EXPECT_LT(diff, max_diff)
- << "Error: 4x4 FDCT/FHT has a sign bias > "
+ const int max_diff = kSignBiasMaxDiff15;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
+ << "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-15, 15] at index " << j
<< " count0: " << count_sign_block[j][0]
@@ -126,20 +215,32 @@
int max_error = 0;
int total_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
for (int j = 0; j < 64; ++j) {
if (test_temp_block[j] > 0) {
@@ -152,11 +253,23 @@
test_temp_block[j] *= 4;
}
}
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
@@ -164,11 +277,11 @@
}
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
<< " roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
<< "error > 1/5 per block";
}
@@ -177,51 +290,247 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
int total_error = 0;
+ int total_coeff_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8() % 2 ? 255 : 0;
- dst[j] = src[j] > 0 ? 0 : 255;
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ if (i == 0) {
+ src[j] = 255;
+ dst[j] = 0;
+ } else if (i == 1) {
+ src[j] = 0;
+ dst[j] = 255;
+ } else {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = rnd.Rand8() % 2 ? 255 : 0;
+ }
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ if (i == 0) {
+ src16[j] = mask_;
+ dst16[j] = 0;
+ } else if (i == 1) {
+ src16[j] = 0;
+ dst16[j] = mask_;
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ }
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
+
+ const int coeff_diff = test_temp_block[j] - ref_temp_block[j];
+ total_coeff_error += abs(coeff_diff);
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
<< "an individual roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
<< " roundtrip error > 1/5 per block";
+
+ EXPECT_EQ(0, total_coeff_error)
+ << "Error: Extremal 8x8 FDCT/FHT has"
+ << "overflow issues in the intermediate steps > 1";
}
}
+ void RunInvAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = src[j] > 0 ? 0 : 255;
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = src16[j] > 0 ? 0 : mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
+ }
+
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ const uint32_t diff = dst[j] - src[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+ void RunFwdAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff_r[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j)
+ in[j] = rnd.Rand8() % 2 == 0 ? mask_ : -mask_;
+
+ RunFwdTxfm(in, coeff, pitch_);
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff_r[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const uint32_t diff = coeff[j] - coeff_r[j];
+ const uint32_t error = diff * diff;
+ EXPECT_GE(9u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 DCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 12;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+ const int16_t *scan = vp9_default_scan_orders[TX_8X8].scan;
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1-2*(i%2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class FwdTrans8x8DCT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<dct_8x8_param_t> {
+ public ::testing::TestWithParam<Dct8x8Param> {
public:
virtual ~FwdTrans8x8DCT() {}
@@ -231,20 +540,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fdct8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(FwdTrans8x8DCT, SignBiasCheck) {
@@ -259,9 +570,17 @@
RunExtremalCheck();
}
+TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) {
+ RunFwdAccuracyCheck();
+}
+
+TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) {
+ RunInvAccuracyCheck();
+}
+
class FwdTrans8x8HT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<ht_8x8_param_t> {
+ public ::testing::TestWithParam<Ht8x8Param> {
public:
virtual ~FwdTrans8x8HT() {}
@@ -271,20 +590,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fht8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(FwdTrans8x8HT, SignBiasCheck) {
@@ -299,45 +620,170 @@
RunExtremalCheck();
}
+class InvTrans8x8DCT
+ : public FwdTrans8x8TestBase,
+ public ::testing::TestWithParam<Idct8x8Param> {
+ public:
+ virtual ~InvTrans8x8DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ pitch_ = 8;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+ void RunFwdTxfm(int16_t *out, tran_low_t *dst, int stride) {}
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans8x8DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0)));
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3)));
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_neon, 0)));
-INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, FwdTrans8x8HT,
- ::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3)));
-#endif
+ make_tuple(&vpx_fdct8x8_neon, &vpx_idct8x8_64_add_neon, 0,
+ VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ NEON, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_sse2, 0)));
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_sse2, 0,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3)));
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+
+// Optimizations take effect at a threshold of 6201, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&idct8x8_10_add_10_c,
+ &idct8x8_10_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10,
+ &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10_add_12_c,
+ &idct8x8_10_add_12_sse2, 6225, VPX_BITS_12),
+ make_tuple(&idct8x8_12,
+ &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
+ !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSSE3, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_ssse3, &vpx_idct8x8_64_add_ssse3, 0,
+ VPX_BITS_8)));
#endif
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_msa, &vpx_idct8x8_64_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
|
vp9_fdct8x8_c(in, out, stride);
|
const int kSignBiasMaxDiff255 = 1500;
const int kSignBiasMaxDiff15 = 10000;
typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
int tx_type);
typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
int tx_type);
typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct8x8Param;
typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht8x8Param;
typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t> Idct8x8Param;
void reference_8x8_dct_1d(const double in[8], double out[8], int stride) {
const double kInvSqrt2 = 0.707106781186547524400844362104;
for (int k = 0; k < 8; k++) {
out[k] = 0.0;
for (int n = 0; n < 8; n++)
out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
if (k == 0)
out[k] = out[k] * kInvSqrt2;
}
|
150,869 |
void fht8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
vp9_fht8x8_c(in, out, stride, tx_type);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void fht8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
void reference_8x8_dct_2d(const int16_t input[kNumCoeffs],
double output[kNumCoeffs]) {
// First transform columns
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
for (int j = 0; j < 8; ++j)
temp_in[j] = input[j*8 + i];
reference_8x8_dct_1d(temp_in, temp_out, 1);
for (int j = 0; j < 8; ++j)
output[j * 8 + i] = temp_out[j];
}
// Then transform rows
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
for (int j = 0; j < 8; ++j)
temp_in[j] = output[j + i*8];
reference_8x8_dct_1d(temp_in, temp_out, 1);
// Scale by some magic number
for (int j = 0; j < 8; ++j)
output[j + i * 8] = temp_out[j] * 2;
}
}
void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vpx_fdct8x8_c(in, out, stride);
}
void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht8x8_c(in, out, stride, tx_type);
}
|
@@ -13,52 +13,139 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-
-extern "C" {
-void vp9_idct8x8_64_add_c(const int16_t *input, uint8_t *output, int pitch);
-}
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
namespace {
-typedef void (*fdct_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*idct_t)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*fht_t) (const int16_t *in, int16_t *out, int stride,
- int tx_type);
-typedef void (*iht_t) (const int16_t *in, uint8_t *out, int stride,
- int tx_type);
-typedef std::tr1::tuple<fdct_t, idct_t, int> dct_8x8_param_t;
-typedef std::tr1::tuple<fht_t, iht_t, int> ht_8x8_param_t;
+const int kNumCoeffs = 64;
+const double kPi = 3.141592653589793238462643383279502884;
-void fdct8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
- vp9_fdct8x8_c(in, out, stride);
+const int kSignBiasMaxDiff255 = 1500;
+const int kSignBiasMaxDiff15 = 10000;
+
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
+ int tx_type);
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+ int tx_type);
+
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct8x8Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht8x8Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t> Idct8x8Param;
+
+void reference_8x8_dct_1d(const double in[8], double out[8], int stride) {
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 8; k++) {
+ out[k] = 0.0;
+ for (int n = 0; n < 8; n++)
+ out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
+ if (k == 0)
+ out[k] = out[k] * kInvSqrt2;
+ }
}
-void fht8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void reference_8x8_dct_2d(const int16_t input[kNumCoeffs],
+ double output[kNumCoeffs]) {
+ // First transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j*8 + i];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ for (int j = 0; j < 8; ++j)
+ output[j * 8 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = output[j + i*8];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ // Scale by some magic number
+ for (int j = 0; j < 8; ++j)
+ output[j + i * 8] = temp_out[j] * 2;
+ }
+}
+
+
+void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+ vpx_fdct8x8_c(in, out, stride);
+}
+
+void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht8x8_c(in, out, stride, tx_type);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 10);
+}
+
+void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_c(in, out, stride, 12);
+}
+
+void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
+}
+
+void idct8x8_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_c(in, out, stride, 12);
+}
+
+#if HAVE_SSE2
+void idct8x8_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_10_add_sse2(in, out, stride, 12);
+}
+
+void idct8x8_64_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 10);
+}
+
+void idct8x8_64_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
+ vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
+}
+#endif // HAVE_SSE2
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
class FwdTrans8x8TestBase {
public:
virtual ~FwdTrans8x8TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunSignBiasCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_output_block, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_output_block[64]);
int count_sign_block[64][2];
const int count_test_block = 100000;
@@ -67,8 +154,9 @@
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = rnd.Rand8() - rnd.Rand8();
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() >> (16 - bit_depth_)) & mask_) -
+ ((rnd.Rand16() >> (16 - bit_depth_)) & mask_);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -81,8 +169,8 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 1125;
- EXPECT_LT(diff, max_diff)
+ const int max_diff = kSignBiasMaxDiff255;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
<< "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-255, 255] at index " << j
@@ -94,10 +182,11 @@
memset(count_sign_block, 0, sizeof(count_sign_block));
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-15, 15].
+ // Initialize a test block with input range [-mask_ / 16, mask_ / 16].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
- REGISTER_STATE_CHECK(
+ test_input_block[j] = ((rnd.Rand16() & mask_) >> 4) -
+ ((rnd.Rand16() & mask_) >> 4);
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
for (int j = 0; j < 64; ++j) {
@@ -110,9 +199,9 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
- const int max_diff = 10000;
- EXPECT_LT(diff, max_diff)
- << "Error: 4x4 FDCT/FHT has a sign bias > "
+ const int max_diff = kSignBiasMaxDiff15;
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
+ << "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-15, 15] at index " << j
<< " count0: " << count_sign_block[j][0]
@@ -126,20 +215,32 @@
int max_error = 0;
int total_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
for (int j = 0; j < 64; ++j) {
if (test_temp_block[j] > 0) {
@@ -152,11 +253,23 @@
test_temp_block[j] *= 4;
}
}
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
@@ -164,11 +277,11 @@
}
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
<< " roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
<< "error > 1/5 per block";
}
@@ -177,51 +290,247 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
int total_error = 0;
+ int total_coeff_error = 0;
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
+ DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
+ DECLARE_ALIGNED(16, uint8_t, dst[64]);
+ DECLARE_ALIGNED(16, uint8_t, src[64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[64]);
+ DECLARE_ALIGNED(16, uint16_t, src16[64]);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8() % 2 ? 255 : 0;
- dst[j] = src[j] > 0 ? 0 : 255;
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ if (i == 0) {
+ src[j] = 255;
+ dst[j] = 0;
+ } else if (i == 1) {
+ src[j] = 0;
+ dst[j] = 255;
+ } else {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = rnd.Rand8() % 2 ? 255 : 0;
+ }
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ if (i == 0) {
+ src16[j] = mask_;
+ dst16[j] = 0;
+ } else if (i == 1) {
+ src16[j] = 0;
+ dst16[j] = mask_;
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ }
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
- REGISTER_STATE_CHECK(
+ ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
- REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
+
+ const int coeff_diff = test_temp_block[j] - ref_temp_block[j];
+ total_coeff_error += abs(coeff_diff);
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
<< "an individual roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
<< " roundtrip error > 1/5 per block";
+
+ EXPECT_EQ(0, total_coeff_error)
+ << "Error: Extremal 8x8 FDCT/FHT has"
+ << "overflow issues in the intermediate steps > 1";
}
}
+ void RunInvAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = src[j] > 0 ? 0 : 255;
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = src16[j] > 0 ? 0 : mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
+ }
+
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ const uint32_t diff = dst[j] - src[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+ void RunFwdAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff_r[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j)
+ in[j] = rnd.Rand8() % 2 == 0 ? mask_ : -mask_;
+
+ RunFwdTxfm(in, coeff, pitch_);
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff_r[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const uint32_t diff = coeff[j] - coeff_r[j];
+ const uint32_t error = diff * diff;
+ EXPECT_GE(9u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 DCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ const int eob = 12;
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
+#endif
+ const int16_t *scan = vp9_default_scan_orders[TX_8X8].scan;
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (j < eob) {
+ // Random values less than the threshold, either positive or negative
+ coeff[scan[j]] = rnd(thresh) * (1-2*(i%2));
+ } else {
+ coeff[scan[j]] = 0;
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ dst[j] = 0;
+ ref[j] = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ dst16[j] = 0;
+ ref16[j] = 0;
+#endif
+ }
+ }
+ if (bit_depth_ == VPX_BITS_8) {
+ ref_txfm(coeff, ref, pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
+#else
+ const uint32_t diff = dst[j] - ref[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_EQ(0u, error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
int pitch_;
int tx_type_;
- fht_t fwd_txfm_ref;
+ FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class FwdTrans8x8DCT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<dct_8x8_param_t> {
+ public ::testing::TestWithParam<Dct8x8Param> {
public:
virtual ~FwdTrans8x8DCT() {}
@@ -231,20 +540,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fdct8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
- fdct_t fwd_txfm_;
- idct_t inv_txfm_;
+ FdctFunc fwd_txfm_;
+ IdctFunc inv_txfm_;
};
TEST_P(FwdTrans8x8DCT, SignBiasCheck) {
@@ -259,9 +570,17 @@
RunExtremalCheck();
}
+TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) {
+ RunFwdAccuracyCheck();
+}
+
+TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) {
+ RunInvAccuracyCheck();
+}
+
class FwdTrans8x8HT
: public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<ht_8x8_param_t> {
+ public ::testing::TestWithParam<Ht8x8Param> {
public:
virtual ~FwdTrans8x8HT() {}
@@ -271,20 +590,22 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fht8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
- fht_t fwd_txfm_;
- iht_t inv_txfm_;
+ FhtFunc fwd_txfm_;
+ IhtFunc inv_txfm_;
};
TEST_P(FwdTrans8x8HT, SignBiasCheck) {
@@ -299,45 +620,170 @@
RunExtremalCheck();
}
+class InvTrans8x8DCT
+ : public FwdTrans8x8TestBase,
+ public ::testing::TestWithParam<Idct8x8Param> {
+ public:
+ virtual ~InvTrans8x8DCT() {}
+
+ virtual void SetUp() {
+ ref_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ thresh_ = GET_PARAM(2);
+ pitch_ = 8;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
+ }
+
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride);
+ }
+ void RunFwdTxfm(int16_t *out, tran_low_t *dst, int stride) {}
+
+ IdctFunc ref_txfm_;
+ IdctFunc inv_txfm_;
+ int thresh_;
+};
+
+TEST_P(InvTrans8x8DCT, CompareReference) {
+ CompareInvReference(ref_txfm_, thresh_);
+}
+
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0)));
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3)));
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_NEON
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_neon, 0)));
-INSTANTIATE_TEST_CASE_P(
- DISABLED_NEON, FwdTrans8x8HT,
- ::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3)));
-#endif
+ make_tuple(&vpx_fdct8x8_neon, &vpx_idct8x8_64_add_neon, 0,
+ VPX_BITS_8)));
+#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ NEON, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3, VPX_BITS_8)));
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_sse2, 0)));
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_sse2, 0,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3)));
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3, VPX_BITS_8)));
+#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+
+// Optimizations take effect at a threshold of 6201, so we use a value close to
+// that to test both branches.
+INSTANTIATE_TEST_CASE_P(
+ SSE2, InvTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&idct8x8_10_add_10_c,
+ &idct8x8_10_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10,
+ &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10_add_12_c,
+ &idct8x8_10_add_12_sse2, 6225, VPX_BITS_12),
+ make_tuple(&idct8x8_12,
+ &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
+#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
+ !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSSE3, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_ssse3, &vpx_idct8x8_64_add_ssse3, 0,
+ VPX_BITS_8)));
#endif
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_msa, &vpx_idct8x8_64_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 3, VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
|
CWE-119
| null |
void reference_8x8_dct_2d(const int16_t input[kNumCoeffs],
double output[kNumCoeffs]) {
// First transform columns
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
for (int j = 0; j < 8; ++j)
temp_in[j] = input[j*8 + i];
reference_8x8_dct_1d(temp_in, temp_out, 1);
for (int j = 0; j < 8; ++j)
output[j * 8 + i] = temp_out[j];
}
// Then transform rows
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
for (int j = 0; j < 8; ++j)
temp_in[j] = output[j + i*8];
reference_8x8_dct_1d(temp_in, temp_out, 1);
// Scale by some magic number
for (int j = 0; j < 8; ++j)
output[j + i * 8] = temp_out[j] * 2;
}
}
void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vpx_fdct8x8_c(in, out, stride);
}
void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
|
150,870 |
virtual void Predict(MB_PREDICTION_MODE mode) {
mbptr_->mode_info_context->mbmi.mode = mode;
REGISTER_STATE_CHECK(pred_fn_(mbptr_,
data_ptr_[0] - kStride,
data_ptr_[0] - 1, kStride,
data_ptr_[0], kStride));
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void Predict(MB_PREDICTION_MODE mode) {
mbptr_->mode_info_context->mbmi.mode = mode;
ASM_REGISTER_STATE_CHECK(pred_fn_(mbptr_,
data_ptr_[0] - kStride,
data_ptr_[0] - 1, kStride,
data_ptr_[0], kStride));
}
|
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <string.h>
-#include "test/acm_random.h"
-#include "test/clear_system_state.h"
-#include "test/register_state_check.h"
+
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vp8_rtcd.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "vp8/common/blockd.h"
#include "vpx_mem/vpx_mem.h"
@@ -216,16 +216,16 @@
int num_planes_;
};
-typedef void (*intra_pred_y_fn_t)(MACROBLOCKD *x,
- uint8_t *yabove_row,
- uint8_t *yleft,
- int left_stride,
- uint8_t *ypred_ptr,
- int y_stride);
+typedef void (*IntraPredYFunc)(MACROBLOCKD *x,
+ uint8_t *yabove_row,
+ uint8_t *yleft,
+ int left_stride,
+ uint8_t *ypred_ptr,
+ int y_stride);
class IntraPredYTest
: public IntraPredBase,
- public ::testing::TestWithParam<intra_pred_y_fn_t> {
+ public ::testing::TestWithParam<IntraPredYFunc> {
public:
static void SetUpTestCase() {
mb_ = reinterpret_cast<MACROBLOCKD*>(
@@ -261,13 +261,13 @@
virtual void Predict(MB_PREDICTION_MODE mode) {
mbptr_->mode_info_context->mbmi.mode = mode;
- REGISTER_STATE_CHECK(pred_fn_(mbptr_,
- data_ptr_[0] - kStride,
- data_ptr_[0] - 1, kStride,
- data_ptr_[0], kStride));
+ ASM_REGISTER_STATE_CHECK(pred_fn_(mbptr_,
+ data_ptr_[0] - kStride,
+ data_ptr_[0] - 1, kStride,
+ data_ptr_[0], kStride));
}
- intra_pred_y_fn_t pred_fn_;
+ IntraPredYFunc pred_fn_;
static uint8_t* data_array_;
static MACROBLOCKD * mb_;
static MODE_INFO *mi_;
@@ -294,20 +294,30 @@
::testing::Values(
vp8_build_intra_predictors_mby_s_ssse3));
#endif
+#if HAVE_NEON
+INSTANTIATE_TEST_CASE_P(NEON, IntraPredYTest,
+ ::testing::Values(
+ vp8_build_intra_predictors_mby_s_neon));
+#endif
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, IntraPredYTest,
+ ::testing::Values(
+ vp8_build_intra_predictors_mby_s_msa));
+#endif
-typedef void (*intra_pred_uv_fn_t)(MACROBLOCKD *x,
- uint8_t *uabove_row,
- uint8_t *vabove_row,
- uint8_t *uleft,
- uint8_t *vleft,
- int left_stride,
- uint8_t *upred_ptr,
- uint8_t *vpred_ptr,
- int pred_stride);
+typedef void (*IntraPredUvFunc)(MACROBLOCKD *x,
+ uint8_t *uabove_row,
+ uint8_t *vabove_row,
+ uint8_t *uleft,
+ uint8_t *vleft,
+ int left_stride,
+ uint8_t *upred_ptr,
+ uint8_t *vpred_ptr,
+ int pred_stride);
class IntraPredUVTest
: public IntraPredBase,
- public ::testing::TestWithParam<intra_pred_uv_fn_t> {
+ public ::testing::TestWithParam<IntraPredUvFunc> {
public:
static void SetUpTestCase() {
mb_ = reinterpret_cast<MACROBLOCKD*>(
@@ -349,7 +359,7 @@
data_ptr_[0], data_ptr_[1], kStride);
}
- intra_pred_uv_fn_t pred_fn_;
+ IntraPredUvFunc pred_fn_;
// We use 24 so that the data pointer of the first pixel in each row of
// each macroblock is 8-byte aligned, and this gives us access to the
// top-left and top-right corner pixels belonging to the top-left/right
@@ -382,5 +392,15 @@
::testing::Values(
vp8_build_intra_predictors_mbuv_s_ssse3));
#endif
+#if HAVE_NEON
+INSTANTIATE_TEST_CASE_P(NEON, IntraPredUVTest,
+ ::testing::Values(
+ vp8_build_intra_predictors_mbuv_s_neon));
+#endif
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, IntraPredUVTest,
+ ::testing::Values(
+ vp8_build_intra_predictors_mbuv_s_msa));
+#endif
} // namespace
|
CWE-119
|
REGISTER_STATE_CHECK(pred_fn_(mbptr_,
data_ptr_[0] - kStride,
data_ptr_[0] - 1, kStride,
data_ptr_[0], kStride));
|
ASM_REGISTER_STATE_CHECK(pred_fn_(mbptr_,
data_ptr_[0] - kStride,
data_ptr_[0] - 1, kStride,
data_ptr_[0], kStride));
|
150,871 |
virtual void SetUp() {
full_itxfm_ = GET_PARAM(0);
partial_itxfm_ = GET_PARAM(1);
tx_size_ = GET_PARAM(2);
last_nonzero_ = GET_PARAM(3);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
ftxfm_ = GET_PARAM(0);
full_itxfm_ = GET_PARAM(1);
partial_itxfm_ = GET_PARAM(2);
tx_size_ = GET_PARAM(3);
last_nonzero_ = GET_PARAM(4);
}
|
@@ -13,12 +13,13 @@
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_blockd.h"
#include "vp9/common/vp9_scan.h"
#include "vpx/vpx_integer.h"
@@ -26,20 +27,22 @@
using libvpx_test::ACMRandom;
namespace {
-typedef void (*fwd_txfm_t)(const int16_t *in, int16_t *out, int stride);
-typedef void (*inv_txfm_t)(const int16_t *in, uint8_t *out, int stride);
-typedef std::tr1::tuple<inv_txfm_t,
- inv_txfm_t,
- TX_SIZE, int> partial_itxfm_param_t;
+typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef std::tr1::tuple<FwdTxfmFunc,
+ InvTxfmFunc,
+ InvTxfmFunc,
+ TX_SIZE, int> PartialInvTxfmParam;
const int kMaxNumCoeffs = 1024;
-class PartialIDctTest : public ::testing::TestWithParam<partial_itxfm_param_t> {
+class PartialIDctTest : public ::testing::TestWithParam<PartialInvTxfmParam> {
public:
virtual ~PartialIDctTest() {}
virtual void SetUp() {
- full_itxfm_ = GET_PARAM(0);
- partial_itxfm_ = GET_PARAM(1);
- tx_size_ = GET_PARAM(2);
- last_nonzero_ = GET_PARAM(3);
+ ftxfm_ = GET_PARAM(0);
+ full_itxfm_ = GET_PARAM(1);
+ partial_itxfm_ = GET_PARAM(2);
+ tx_size_ = GET_PARAM(3);
+ last_nonzero_ = GET_PARAM(4);
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
@@ -47,10 +50,90 @@
protected:
int last_nonzero_;
TX_SIZE tx_size_;
- inv_txfm_t full_itxfm_;
- inv_txfm_t partial_itxfm_;
+ FwdTxfmFunc ftxfm_;
+ InvTxfmFunc full_itxfm_;
+ InvTxfmFunc partial_itxfm_;
};
+TEST_P(PartialIDctTest, RunQuantCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int size;
+ switch (tx_size_) {
+ case TX_4X4:
+ size = 4;
+ break;
+ case TX_8X8:
+ size = 8;
+ break;
+ case TX_16X16:
+ size = 16;
+ break;
+ case TX_32X32:
+ size = 32;
+ break;
+ default:
+ FAIL() << "Wrong Size!";
+ break;
+ }
+ DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst1[kMaxNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst2[kMaxNumCoeffs]);
+
+ const int count_test_block = 1000;
+ const int block_size = size * size;
+
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kMaxNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kMaxNumCoeffs]);
+
+ int max_error = 0;
+ for (int i = 0; i < count_test_block; ++i) {
+ // clear out destination buffer
+ memset(dst1, 0, sizeof(*dst1) * block_size);
+ memset(dst2, 0, sizeof(*dst2) * block_size);
+ memset(test_coef_block1, 0, sizeof(*test_coef_block1) * block_size);
+ memset(test_coef_block2, 0, sizeof(*test_coef_block2) * block_size);
+
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ if (i == 0) {
+ for (int j = 0; j < block_size; ++j)
+ input_extreme_block[j] = 255;
+ } else if (i == 1) {
+ for (int j = 0; j < block_size; ++j)
+ input_extreme_block[j] = -255;
+ } else {
+ for (int j = 0; j < block_size; ++j) {
+ input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ }
+ }
+
+ ftxfm_(input_extreme_block, output_ref_block, size);
+
+ // quantization with maximum allowed step sizes
+ test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
+ for (int j = 1; j < last_nonzero_; ++j)
+ test_coef_block1[vp9_default_scan_orders[tx_size_].scan[j]]
+ = (output_ref_block[j] / 1828) * 1828;
+ }
+
+ ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size));
+ ASM_REGISTER_STATE_CHECK(partial_itxfm_(test_coef_block1, dst2, size));
+
+ for (int j = 0; j < block_size; ++j) {
+ const int diff = dst1[j] - dst2[j];
+ const int error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ }
+ }
+
+ EXPECT_EQ(0, max_error)
+ << "Error: partial inverse transform produces different results";
+}
+
TEST_P(PartialIDctTest, ResultsMatch) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
int size;
@@ -71,10 +154,10 @@
FAIL() << "Wrong Size!";
break;
}
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_coef_block1, kMaxNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_coef_block2, kMaxNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst1, kMaxNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst2, kMaxNumCoeffs);
+ DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst1[kMaxNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst2[kMaxNumCoeffs]);
const int count_test_block = 1000;
const int max_coeff = 32766 / 4;
const int block_size = size * size;
@@ -100,8 +183,8 @@
memcpy(test_coef_block2, test_coef_block1,
sizeof(*test_coef_block2) * block_size);
- REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size));
- REGISTER_STATE_CHECK(partial_itxfm_(test_coef_block2, dst2, size));
+ ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size));
+ ASM_REGISTER_STATE_CHECK(partial_itxfm_(test_coef_block2, dst2, size));
for (int j = 0; j < block_size; ++j) {
const int diff = dst1[j] - dst2[j];
@@ -119,75 +202,142 @@
INSTANTIATE_TEST_CASE_P(
C, PartialIDctTest,
::testing::Values(
- make_tuple(&vp9_idct32x32_1024_add_c,
- &vp9_idct32x32_34_add_c,
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_34_add_c,
TX_32X32, 34),
- make_tuple(&vp9_idct32x32_1024_add_c,
- &vp9_idct32x32_1_add_c,
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_1_add_c,
TX_32X32, 1),
- make_tuple(&vp9_idct16x16_256_add_c,
- &vp9_idct16x16_10_add_c,
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_10_add_c,
TX_16X16, 10),
- make_tuple(&vp9_idct16x16_256_add_c,
- &vp9_idct16x16_1_add_c,
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_1_add_c,
TX_16X16, 1),
- make_tuple(&vp9_idct8x8_64_add_c,
- &vp9_idct8x8_10_add_c,
- TX_8X8, 10),
- make_tuple(&vp9_idct8x8_64_add_c,
- &vp9_idct8x8_1_add_c,
+ make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_c,
+ TX_8X8, 12),
+ make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_1_add_c,
TX_8X8, 1),
- make_tuple(&vp9_idct4x4_16_add_c,
- &vp9_idct4x4_1_add_c,
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_c,
+ &vpx_idct4x4_1_add_c,
TX_4X4, 1)));
-#if HAVE_NEON
+
+#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, PartialIDctTest,
::testing::Values(
- make_tuple(&vp9_idct32x32_1024_add_c,
- &vp9_idct32x32_1_add_neon,
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_1_add_neon,
TX_32X32, 1),
- make_tuple(&vp9_idct16x16_256_add_c,
- &vp9_idct16x16_10_add_neon,
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_10_add_neon,
TX_16X16, 10),
- make_tuple(&vp9_idct16x16_256_add_c,
- &vp9_idct16x16_1_add_neon,
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_1_add_neon,
TX_16X16, 1),
- make_tuple(&vp9_idct8x8_64_add_c,
- &vp9_idct8x8_10_add_neon,
- TX_8X8, 10),
- make_tuple(&vp9_idct8x8_64_add_c,
- &vp9_idct8x8_1_add_neon,
+ make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_neon,
+ TX_8X8, 12),
+ make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_1_add_neon,
TX_8X8, 1),
- make_tuple(&vp9_idct4x4_16_add_c,
- &vp9_idct4x4_1_add_neon,
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_c,
+ &vpx_idct4x4_1_add_neon,
TX_4X4, 1)));
-#endif
+#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, PartialIDctTest,
::testing::Values(
- make_tuple(&vp9_idct32x32_1024_add_c,
- &vp9_idct32x32_34_add_sse2,
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_34_add_sse2,
TX_32X32, 34),
- make_tuple(&vp9_idct32x32_1024_add_c,
- &vp9_idct32x32_1_add_sse2,
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_1_add_sse2,
TX_32X32, 1),
- make_tuple(&vp9_idct16x16_256_add_c,
- &vp9_idct16x16_10_add_sse2,
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_10_add_sse2,
TX_16X16, 10),
- make_tuple(&vp9_idct16x16_256_add_c,
- &vp9_idct16x16_1_add_sse2,
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_1_add_sse2,
TX_16X16, 1),
- make_tuple(&vp9_idct8x8_64_add_c,
- &vp9_idct8x8_10_add_sse2,
- TX_8X8, 10),
- make_tuple(&vp9_idct8x8_64_add_c,
- &vp9_idct8x8_1_add_sse2,
+ make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_sse2,
+ TX_8X8, 12),
+ make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_1_add_sse2,
TX_8X8, 1),
- make_tuple(&vp9_idct4x4_16_add_c,
- &vp9_idct4x4_1_add_sse2,
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_c,
+ &vpx_idct4x4_1_add_sse2,
TX_4X4, 1)));
#endif
+
+#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
+ !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ SSSE3_64, PartialIDctTest,
+ ::testing::Values(
+ make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_ssse3,
+ TX_8X8, 12)));
+#endif
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, PartialIDctTest,
+ ::testing::Values(
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_34_add_msa,
+ TX_32X32, 34),
+ make_tuple(&vpx_fdct32x32_c,
+ &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_1_add_msa,
+ TX_32X32, 1),
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_10_add_msa,
+ TX_16X16, 10),
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_1_add_msa,
+ TX_16X16, 1),
+ make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_msa,
+ TX_8X8, 10),
+ make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_1_add_msa,
+ TX_8X8, 1),
+ make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_c,
+ &vpx_idct4x4_1_add_msa,
+ TX_4X4, 1)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
} // namespace
|
CWE-119
|
full_itxfm_ = GET_PARAM(0);
partial_itxfm_ = GET_PARAM(1);
tx_size_ = GET_PARAM(2);
last_nonzero_ = GET_PARAM(3);
|
ftxfm_ = GET_PARAM(0);
full_itxfm_ = GET_PARAM(1);
partial_itxfm_ = GET_PARAM(2);
tx_size_ = GET_PARAM(3);
last_nonzero_ = GET_PARAM(4);
|
150,872 |
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
#if WRITE_COMPRESSED_STREAM
++out_frames_;
if (pkt->data.frame.pts == 0)
write_ivf_file_header(&cfg_, 0, outfile_);
write_ivf_frame_header(pkt, outfile_);
(void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, outfile_);
#endif
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
#if WRITE_COMPRESSED_STREAM
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
++out_frames_;
if (pkt->data.frame.pts == 0)
write_ivf_file_header(&cfg_, 0, outfile_);
write_ivf_frame_header(pkt, outfile_);
(void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, outfile_);
}
|
@@ -144,6 +144,7 @@
TEST_P(ResizeTest, TestExternalResizeWorks) {
ResizingVideoSource video;
+ cfg_.g_lag_in_frames = 0;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (std::vector<FrameInfo>::const_iterator info = frame_info_list_.begin();
@@ -153,9 +154,9 @@
const unsigned int expected_h = ScaleForFrameNumber(frame, kInitialHeight);
EXPECT_EQ(expected_w, info->w)
- << "Frame " << frame << "had unexpected width";
+ << "Frame " << frame << " had unexpected width";
EXPECT_EQ(expected_h, info->h)
- << "Frame " << frame << "had unexpected height";
+ << "Frame " << frame << " had unexpected height";
}
}
@@ -211,8 +212,8 @@
EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0);
}
- virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
#if WRITE_COMPRESSED_STREAM
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
++out_frames_;
// Write initial file header if first frame.
@@ -222,8 +223,8 @@
// Write frame header and data.
write_ivf_frame_header(pkt, outfile_);
(void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, outfile_);
-#endif
}
+#endif
double frame0_psnr_;
#if WRITE_COMPRESSED_STREAM
@@ -260,7 +261,116 @@
}
}
+vpx_img_fmt_t CspForFrameNumber(int frame) {
+ if (frame < 10)
+ return VPX_IMG_FMT_I420;
+ if (frame < 20)
+ return VPX_IMG_FMT_I444;
+ return VPX_IMG_FMT_I420;
+}
+
+class ResizeCspTest : public ResizeTest {
+ protected:
+#if WRITE_COMPRESSED_STREAM
+ ResizeCspTest()
+ : ResizeTest(),
+ frame0_psnr_(0.0),
+ outfile_(NULL),
+ out_frames_(0) {}
+#else
+ ResizeCspTest() : ResizeTest(), frame0_psnr_(0.0) {}
+#endif
+
+ virtual ~ResizeCspTest() {}
+
+ virtual void BeginPassHook(unsigned int /*pass*/) {
+#if WRITE_COMPRESSED_STREAM
+ outfile_ = fopen("vp91-2-05-cspchape.ivf", "wb");
+#endif
+ }
+
+ virtual void EndPassHook() {
+#if WRITE_COMPRESSED_STREAM
+ if (outfile_) {
+ if (!fseek(outfile_, 0, SEEK_SET))
+ write_ivf_file_header(&cfg_, out_frames_, outfile_);
+ fclose(outfile_);
+ outfile_ = NULL;
+ }
+#endif
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (CspForFrameNumber(video->frame()) != VPX_IMG_FMT_I420 &&
+ cfg_.g_profile != 1) {
+ cfg_.g_profile = 1;
+ encoder->Config(&cfg_);
+ }
+ if (CspForFrameNumber(video->frame()) == VPX_IMG_FMT_I420 &&
+ cfg_.g_profile != 0) {
+ cfg_.g_profile = 0;
+ encoder->Config(&cfg_);
+ }
+ }
+
+ virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (!frame0_psnr_)
+ frame0_psnr_ = pkt->data.psnr.psnr[0];
+ EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0);
+ }
+
+#if WRITE_COMPRESSED_STREAM
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ ++out_frames_;
+
+ // Write initial file header if first frame.
+ if (pkt->data.frame.pts == 0)
+ write_ivf_file_header(&cfg_, 0, outfile_);
+
+ // Write frame header and data.
+ write_ivf_frame_header(pkt, outfile_);
+ (void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, outfile_);
+ }
+#endif
+
+ double frame0_psnr_;
+#if WRITE_COMPRESSED_STREAM
+ FILE *outfile_;
+ unsigned int out_frames_;
+#endif
+};
+
+class ResizingCspVideoSource : public ::libvpx_test::DummyVideoSource {
+ public:
+ ResizingCspVideoSource() {
+ SetSize(kInitialWidth, kInitialHeight);
+ limit_ = 30;
+ }
+
+ virtual ~ResizingCspVideoSource() {}
+
+ protected:
+ virtual void Next() {
+ ++frame_;
+ SetImageFormat(CspForFrameNumber(frame_));
+ FillFrame();
+ }
+};
+
+TEST_P(ResizeCspTest, TestResizeCspWorks) {
+ ResizingCspVideoSource video;
+ init_flags_ = VPX_CODEC_USE_PSNR;
+ cfg_.rc_min_quantizer = cfg_.rc_max_quantizer = 48;
+ cfg_.g_lag_in_frames = 0;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+}
+
VP8_INSTANTIATE_TEST_CASE(ResizeTest, ONE_PASS_TEST_MODES);
+VP9_INSTANTIATE_TEST_CASE(ResizeTest,
+ ::testing::Values(::libvpx_test::kRealTime));
VP9_INSTANTIATE_TEST_CASE(ResizeInternalTest,
::testing::Values(::libvpx_test::kOnePassBest));
+VP9_INSTANTIATE_TEST_CASE(ResizeCspTest,
+ ::testing::Values(::libvpx_test::kRealTime));
} // namespace
|
CWE-119
|
#endif
|
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
|
150,873 |
void CheckSADs() {
unsigned int reference_sad, exp_sad[4];
SADs(exp_sad);
for (int block = 0; block < 4; block++) {
reference_sad = ReferenceSAD(UINT_MAX, block);
EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void CheckSADs() {
unsigned int reference_sad, exp_sad[4];
SADs(exp_sad);
for (int block = 0; block < 4; ++block) {
reference_sad = ReferenceSAD(block);
EXPECT_EQ(reference_sad, exp_sad[block]) << "block " << block;
}
}
|
@@ -13,56 +13,74 @@
#include <limits.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#if CONFIG_VP8_ENCODER
-#include "./vp8_rtcd.h"
-#endif
-#if CONFIG_VP9_ENCODER
-#include "./vp9_rtcd.h"
-#endif
-#include "vpx_mem/vpx_mem.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_codec.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride);
+typedef std::tr1::tuple<int, int, SadMxNFunc, int> SadMxNParam;
-typedef unsigned int (*sad_m_by_n_fn_t)(const unsigned char *source_ptr,
- int source_stride,
- const unsigned char *reference_ptr,
- int reference_stride,
- unsigned int max_sad);
-typedef std::tr1::tuple<int, int, sad_m_by_n_fn_t> sad_m_by_n_test_param_t;
+typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ const uint8_t *second_pred);
+typedef std::tr1::tuple<int, int, SadMxNAvgFunc, int> SadMxNAvgParam;
-typedef void (*sad_n_by_n_by_4_fn_t)(const uint8_t *src_ptr,
- int src_stride,
- const unsigned char * const ref_ptr[],
- int ref_stride,
- unsigned int *sad_array);
-typedef std::tr1::tuple<int, int, sad_n_by_n_by_4_fn_t>
- sad_n_by_n_by_4_test_param_t;
+typedef void (*SadMxNx4Func)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[],
+ int ref_stride,
+ uint32_t *sad_array);
+typedef std::tr1::tuple<int, int, SadMxNx4Func, int> SadMxNx4Param;
using libvpx_test::ACMRandom;
namespace {
class SADTestBase : public ::testing::Test {
public:
- SADTestBase(int width, int height) : width_(width), height_(height) {}
+ SADTestBase(int width, int height, int bit_depth) :
+ width_(width), height_(height), bd_(bit_depth) {}
static void SetUpTestCase() {
- source_data_ = reinterpret_cast<uint8_t*>(
+ source_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize));
- reference_data_ = reinterpret_cast<uint8_t*>(
+ reference_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
+ second_pred8_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, 64*64));
+ source_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
+ reference_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
+ second_pred16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t)));
}
static void TearDownTestCase() {
- vpx_free(source_data_);
- source_data_ = NULL;
- vpx_free(reference_data_);
- reference_data_ = NULL;
+ vpx_free(source_data8_);
+ source_data8_ = NULL;
+ vpx_free(reference_data8_);
+ reference_data8_ = NULL;
+ vpx_free(second_pred8_);
+ second_pred8_ = NULL;
+ vpx_free(source_data16_);
+ source_data16_ = NULL;
+ vpx_free(reference_data16_);
+ reference_data16_ = NULL;
+ vpx_free(second_pred16_);
+ second_pred16_ = NULL;
}
virtual void TearDown() {
@@ -76,142 +94,335 @@
static const int kDataBufferSize = 4 * kDataBlockSize;
virtual void SetUp() {
+ if (bd_ == -1) {
+ use_high_bit_depth_ = false;
+ bit_depth_ = VPX_BITS_8;
+ source_data_ = source_data8_;
+ reference_data_ = reference_data8_;
+ second_pred_ = second_pred8_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ use_high_bit_depth_ = true;
+ bit_depth_ = static_cast<vpx_bit_depth_t>(bd_);
+ source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
+ reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
+ second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ mask_ = (1 << bit_depth_) - 1;
source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
- virtual uint8_t* GetReference(int block_idx) {
+ virtual uint8_t *GetReference(int block_idx) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_high_bit_depth_)
+ return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
+ block_idx * kDataBlockSize);
+#endif // CONFIG_VP9_HIGHBITDEPTH
return reference_data_ + block_idx * kDataBlockSize;
}
// Sum of Absolute Differences. Given two blocks, calculate the absolute
// difference between two pixels in the same relative location; accumulate.
- unsigned int ReferenceSAD(unsigned int max_sad, int block_idx = 0) {
+ unsigned int ReferenceSAD(int block_idx) {
unsigned int sad = 0;
- const uint8_t* const reference = GetReference(block_idx);
-
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- sad += abs(source_data_[h * source_stride_ + w]
- - reference[h * reference_stride_ + w]);
- }
- if (sad > max_sad) {
- break;
+ if (!use_high_bit_depth_) {
+ sad += abs(source8[h * source_stride_ + w] -
+ reference8[h * reference_stride_ + w]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ sad += abs(source16[h * source_stride_ + w] -
+ reference16[h * reference_stride_ + w]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
return sad;
}
- void FillConstant(uint8_t *data, int stride, uint8_t fill_constant) {
+ // Sum of Absolute Differences Average. Given two blocks, and a prediction
+ // calculate the absolute difference between one pixel and average of the
+ // corresponding and predicted pixels; accumulate.
+ unsigned int ReferenceSADavg(int block_idx) {
+ unsigned int sad = 0;
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+ const uint8_t *const second_pred8 = second_pred_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+ const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = fill_constant;
+ if (!use_high_bit_depth_) {
+ const int tmp = second_pred8[h * width_ + w] +
+ reference8[h * reference_stride_ + w];
+ const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source8[h * source_stride_ + w] - comp_pred);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ const int tmp = second_pred16[h * width_ + w] +
+ reference16[h * reference_stride_ + w];
+ const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source16[h * source_stride_ + w] - comp_pred);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ return sad;
+ }
+
+ void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ for (int h = 0; h < height_; ++h) {
+ for (int w = 0; w < width_; ++w) {
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = static_cast<uint8_t>(fill_constant);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = fill_constant;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
void FillRandom(uint8_t *data, int stride) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = rnd_.Rand8();
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = rnd_.Rand16() & mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
- int width_, height_;
- static uint8_t* source_data_;
+ int width_, height_, mask_, bd_;
+ vpx_bit_depth_t bit_depth_;
+ static uint8_t *source_data_;
+ static uint8_t *reference_data_;
+ static uint8_t *second_pred_;
int source_stride_;
- static uint8_t* reference_data_;
+ bool use_high_bit_depth_;
+ static uint8_t *source_data8_;
+ static uint8_t *reference_data8_;
+ static uint8_t *second_pred8_;
+ static uint16_t *source_data16_;
+ static uint16_t *reference_data16_;
+ static uint16_t *second_pred16_;
int reference_stride_;
ACMRandom rnd_;
};
-class SADTest : public SADTestBase,
- public ::testing::WithParamInterface<sad_m_by_n_test_param_t> {
+class SADx4Test
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNx4Param> {
public:
- SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
-
- protected:
- unsigned int SAD(unsigned int max_sad, int block_idx = 0) {
- unsigned int ret;
- const uint8_t* const reference = GetReference(block_idx);
-
- REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
- reference, reference_stride_,
- max_sad));
- return ret;
- }
-
- void CheckSad(unsigned int max_sad) {
- unsigned int reference_sad, exp_sad;
-
- reference_sad = ReferenceSAD(max_sad);
- exp_sad = SAD(max_sad);
-
- if (reference_sad <= max_sad) {
- ASSERT_EQ(exp_sad, reference_sad);
- } else {
- // Alternative implementations are not required to check max_sad
- ASSERT_GE(exp_sad, reference_sad);
- }
- }
-};
-
-class SADx4Test : public SADTestBase,
- public ::testing::WithParamInterface<sad_n_by_n_by_4_test_param_t> {
- public:
- SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
+ SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
protected:
void SADs(unsigned int *results) {
- const uint8_t* refs[] = {GetReference(0), GetReference(1),
- GetReference(2), GetReference(3)};
+ const uint8_t *references[] = {GetReference(0), GetReference(1),
+ GetReference(2), GetReference(3)};
- REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
- refs, reference_stride_,
- results));
+ ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
+ references, reference_stride_,
+ results));
}
void CheckSADs() {
unsigned int reference_sad, exp_sad[4];
SADs(exp_sad);
- for (int block = 0; block < 4; block++) {
- reference_sad = ReferenceSAD(UINT_MAX, block);
+ for (int block = 0; block < 4; ++block) {
+ reference_sad = ReferenceSAD(block);
- EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
+ EXPECT_EQ(reference_sad, exp_sad[block]) << "block " << block;
}
}
};
-uint8_t* SADTestBase::source_data_ = NULL;
-uint8_t* SADTestBase::reference_data_ = NULL;
+class SADTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNParam> {
+ public:
+ SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSAD(0);
+ const unsigned int exp_sad = SAD(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+class SADavgTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNAvgParam> {
+ public:
+ SADavgTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD_avg(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_,
+ second_pred_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSADavg(0);
+ const unsigned int exp_sad = SAD_avg(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+uint8_t *SADTestBase::source_data_ = NULL;
+uint8_t *SADTestBase::reference_data_ = NULL;
+uint8_t *SADTestBase::second_pred_ = NULL;
+uint8_t *SADTestBase::source_data8_ = NULL;
+uint8_t *SADTestBase::reference_data8_ = NULL;
+uint8_t *SADTestBase::second_pred8_ = NULL;
+uint16_t *SADTestBase::source_data16_ = NULL;
+uint16_t *SADTestBase::reference_data16_ = NULL;
+uint16_t *SADTestBase::second_pred16_ = NULL;
TEST_P(SADTest, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(reference_data_, reference_stride_, 255);
- CheckSad(UINT_MAX);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ CheckSAD();
+}
+
+TEST_P(SADTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, MaxRef) {
+ FillConstant(source_data_, source_stride_, 0);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+TEST_P(SADavgTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADavgTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
}
TEST_P(SADx4Test, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(GetReference(0), reference_stride_, 255);
- FillConstant(GetReference(1), reference_stride_, 255);
- FillConstant(GetReference(2), reference_stride_, 255);
- FillConstant(GetReference(3), reference_stride_, 255);
+ FillConstant(GetReference(0), reference_stride_, mask_);
+ FillConstant(GetReference(1), reference_stride_, mask_);
+ FillConstant(GetReference(2), reference_stride_, mask_);
+ FillConstant(GetReference(3), reference_stride_, mask_);
CheckSADs();
}
-TEST_P(SADTest, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(UINT_MAX);
-}
-
TEST_P(SADx4Test, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
+ FillConstant(source_data_, source_stride_, mask_);
FillConstant(GetReference(0), reference_stride_, 0);
FillConstant(GetReference(1), reference_stride_, 0);
FillConstant(GetReference(2), reference_stride_, 0);
@@ -219,15 +430,6 @@
CheckSADs();
}
-TEST_P(SADTest, ShortRef) {
- int tmp_stride = reference_stride_;
- reference_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortRef) {
int tmp_stride = reference_stride_;
reference_stride_ >>= 1;
@@ -240,17 +442,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, UnalignedRef) {
- // The reference frame, but not the source frame, may be unaligned for
- // certain types of searches.
- int tmp_stride = reference_stride_;
- reference_stride_ -= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, UnalignedRef) {
// The reference frame, but not the source frame, may be unaligned for
// certain types of searches.
@@ -265,15 +456,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, ShortSrc) {
- int tmp_stride = source_stride_;
- source_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- source_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortSrc) {
int tmp_stride = source_stride_;
source_stride_ >>= 1;
@@ -286,271 +468,743 @@
source_stride_ = tmp_stride;
}
-TEST_P(SADTest, MaxSAD) {
- // Verify that, when max_sad is set, the implementation does not return a
- // value lower than the reference.
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(128);
+TEST_P(SADx4Test, SrcAlignedByWidth) {
+ uint8_t * tmp_source_data = source_data_;
+ source_data_ += width_;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ source_data_ = tmp_source_data;
}
using std::tr1::make_tuple;
//------------------------------------------------------------------------------
// C functions
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_c = vp8_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c = vp8_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c = vp8_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c = vp8_sad8x8_c;
-const sad_m_by_n_fn_t sad_4x4_c = vp8_sad4x4_c;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_64x64_c_vp9 = vp9_sad64x64_c;
-const sad_m_by_n_fn_t sad_32x32_c_vp9 = vp9_sad32x32_c;
-const sad_m_by_n_fn_t sad_16x16_c_vp9 = vp9_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c_vp9 = vp9_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c_vp9 = vp9_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c_vp9 = vp9_sad8x8_c;
-const sad_m_by_n_fn_t sad_8x4_c_vp9 = vp9_sad8x4_c;
-const sad_m_by_n_fn_t sad_4x8_c_vp9 = vp9_sad4x8_c;
-const sad_m_by_n_fn_t sad_4x4_c_vp9 = vp9_sad4x4_c;
-#endif
-const sad_m_by_n_test_param_t c_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_c),
- make_tuple(8, 16, sad_8x16_c),
- make_tuple(16, 8, sad_16x8_c),
- make_tuple(8, 8, sad_8x8_c),
- make_tuple(4, 4, sad_4x4_c),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(64, 64, sad_64x64_c_vp9),
- make_tuple(32, 32, sad_32x32_c_vp9),
- make_tuple(16, 16, sad_16x16_c_vp9),
- make_tuple(8, 16, sad_8x16_c_vp9),
- make_tuple(16, 8, sad_16x8_c_vp9),
- make_tuple(8, 8, sad_8x8_c_vp9),
- make_tuple(8, 4, sad_8x4_c_vp9),
- make_tuple(4, 8, sad_4x8_c_vp9),
- make_tuple(4, 4, sad_4x4_c_vp9),
-#endif
+const SadMxNFunc sad64x64_c = vpx_sad64x64_c;
+const SadMxNFunc sad64x32_c = vpx_sad64x32_c;
+const SadMxNFunc sad32x64_c = vpx_sad32x64_c;
+const SadMxNFunc sad32x32_c = vpx_sad32x32_c;
+const SadMxNFunc sad32x16_c = vpx_sad32x16_c;
+const SadMxNFunc sad16x32_c = vpx_sad16x32_c;
+const SadMxNFunc sad16x16_c = vpx_sad16x16_c;
+const SadMxNFunc sad16x8_c = vpx_sad16x8_c;
+const SadMxNFunc sad8x16_c = vpx_sad8x16_c;
+const SadMxNFunc sad8x8_c = vpx_sad8x8_c;
+const SadMxNFunc sad8x4_c = vpx_sad8x4_c;
+const SadMxNFunc sad4x8_c = vpx_sad4x8_c;
+const SadMxNFunc sad4x4_c = vpx_sad4x4_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_c = vpx_highbd_sad64x64_c;
+const SadMxNFunc highbd_sad64x32_c = vpx_highbd_sad64x32_c;
+const SadMxNFunc highbd_sad32x64_c = vpx_highbd_sad32x64_c;
+const SadMxNFunc highbd_sad32x32_c = vpx_highbd_sad32x32_c;
+const SadMxNFunc highbd_sad32x16_c = vpx_highbd_sad32x16_c;
+const SadMxNFunc highbd_sad16x32_c = vpx_highbd_sad16x32_c;
+const SadMxNFunc highbd_sad16x16_c = vpx_highbd_sad16x16_c;
+const SadMxNFunc highbd_sad16x8_c = vpx_highbd_sad16x8_c;
+const SadMxNFunc highbd_sad8x16_c = vpx_highbd_sad8x16_c;
+const SadMxNFunc highbd_sad8x8_c = vpx_highbd_sad8x8_c;
+const SadMxNFunc highbd_sad8x4_c = vpx_highbd_sad8x4_c;
+const SadMxNFunc highbd_sad4x8_c = vpx_highbd_sad4x8_c;
+const SadMxNFunc highbd_sad4x4_c = vpx_highbd_sad4x4_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam c_tests[] = {
+ make_tuple(64, 64, sad64x64_c, -1),
+ make_tuple(64, 32, sad64x32_c, -1),
+ make_tuple(32, 64, sad32x64_c, -1),
+ make_tuple(32, 32, sad32x32_c, -1),
+ make_tuple(32, 16, sad32x16_c, -1),
+ make_tuple(16, 32, sad16x32_c, -1),
+ make_tuple(16, 16, sad16x16_c, -1),
+ make_tuple(16, 8, sad16x8_c, -1),
+ make_tuple(8, 16, sad8x16_c, -1),
+ make_tuple(8, 8, sad8x8_c, -1),
+ make_tuple(8, 4, sad8x4_c, -1),
+ make_tuple(4, 8, sad4x8_c, -1),
+ make_tuple(4, 4, sad4x4_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
-#if CONFIG_VP9_ENCODER
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_c = vp9_sad64x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_c = vp9_sad64x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_c = vp9_sad32x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_c = vp9_sad32x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_c = vp9_sad32x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_c = vp9_sad16x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_c = vp9_sad16x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_c = vp9_sad16x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_c = vp9_sad8x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_c = vp9_sad8x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_c = vp9_sad8x4x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_c = vp9_sad4x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_c = vp9_sad4x4x4d_c;
-INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_c),
- make_tuple(64, 32, sad_64x32x4d_c),
- make_tuple(32, 64, sad_32x64x4d_c),
- make_tuple(32, 32, sad_32x32x4d_c),
- make_tuple(32, 16, sad_32x16x4d_c),
- make_tuple(16, 32, sad_16x32x4d_c),
- make_tuple(16, 16, sad_16x16x4d_c),
- make_tuple(16, 8, sad_16x8x4d_c),
- make_tuple(8, 16, sad_8x16x4d_c),
- make_tuple(8, 8, sad_8x8x4d_c),
- make_tuple(8, 4, sad_8x4x4d_c),
- make_tuple(4, 8, sad_4x8x4d_c),
- make_tuple(4, 4, sad_4x4x4d_c)));
-#endif // CONFIG_VP9_ENCODER
+const SadMxNAvgFunc sad64x64_avg_c = vpx_sad64x64_avg_c;
+const SadMxNAvgFunc sad64x32_avg_c = vpx_sad64x32_avg_c;
+const SadMxNAvgFunc sad32x64_avg_c = vpx_sad32x64_avg_c;
+const SadMxNAvgFunc sad32x32_avg_c = vpx_sad32x32_avg_c;
+const SadMxNAvgFunc sad32x16_avg_c = vpx_sad32x16_avg_c;
+const SadMxNAvgFunc sad16x32_avg_c = vpx_sad16x32_avg_c;
+const SadMxNAvgFunc sad16x16_avg_c = vpx_sad16x16_avg_c;
+const SadMxNAvgFunc sad16x8_avg_c = vpx_sad16x8_avg_c;
+const SadMxNAvgFunc sad8x16_avg_c = vpx_sad8x16_avg_c;
+const SadMxNAvgFunc sad8x8_avg_c = vpx_sad8x8_avg_c;
+const SadMxNAvgFunc sad8x4_avg_c = vpx_sad8x4_avg_c;
+const SadMxNAvgFunc sad4x8_avg_c = vpx_sad4x8_avg_c;
+const SadMxNAvgFunc sad4x4_avg_c = vpx_sad4x4_avg_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_c = vpx_highbd_sad64x64_avg_c;
+const SadMxNAvgFunc highbd_sad64x32_avg_c = vpx_highbd_sad64x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x64_avg_c = vpx_highbd_sad32x64_avg_c;
+const SadMxNAvgFunc highbd_sad32x32_avg_c = vpx_highbd_sad32x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x16_avg_c = vpx_highbd_sad32x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x32_avg_c = vpx_highbd_sad16x32_avg_c;
+const SadMxNAvgFunc highbd_sad16x16_avg_c = vpx_highbd_sad16x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x8_avg_c = vpx_highbd_sad16x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x16_avg_c = vpx_highbd_sad8x16_avg_c;
+const SadMxNAvgFunc highbd_sad8x8_avg_c = vpx_highbd_sad8x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x4_avg_c = vpx_highbd_sad8x4_avg_c;
+const SadMxNAvgFunc highbd_sad4x8_avg_c = vpx_highbd_sad4x8_avg_c;
+const SadMxNAvgFunc highbd_sad4x4_avg_c = vpx_highbd_sad4x4_avg_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_c_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_c, -1),
+ make_tuple(64, 32, sad64x32_avg_c, -1),
+ make_tuple(32, 64, sad32x64_avg_c, -1),
+ make_tuple(32, 32, sad32x32_avg_c, -1),
+ make_tuple(32, 16, sad32x16_avg_c, -1),
+ make_tuple(16, 32, sad16x32_avg_c, -1),
+ make_tuple(16, 16, sad16x16_avg_c, -1),
+ make_tuple(16, 8, sad16x8_avg_c, -1),
+ make_tuple(8, 16, sad8x16_avg_c, -1),
+ make_tuple(8, 8, sad8x8_avg_c, -1),
+ make_tuple(8, 4, sad8x4_avg_c, -1),
+ make_tuple(4, 8, sad4x8_avg_c, -1),
+ make_tuple(4, 4, sad4x4_avg_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
+
+const SadMxNx4Func sad64x64x4d_c = vpx_sad64x64x4d_c;
+const SadMxNx4Func sad64x32x4d_c = vpx_sad64x32x4d_c;
+const SadMxNx4Func sad32x64x4d_c = vpx_sad32x64x4d_c;
+const SadMxNx4Func sad32x32x4d_c = vpx_sad32x32x4d_c;
+const SadMxNx4Func sad32x16x4d_c = vpx_sad32x16x4d_c;
+const SadMxNx4Func sad16x32x4d_c = vpx_sad16x32x4d_c;
+const SadMxNx4Func sad16x16x4d_c = vpx_sad16x16x4d_c;
+const SadMxNx4Func sad16x8x4d_c = vpx_sad16x8x4d_c;
+const SadMxNx4Func sad8x16x4d_c = vpx_sad8x16x4d_c;
+const SadMxNx4Func sad8x8x4d_c = vpx_sad8x8x4d_c;
+const SadMxNx4Func sad8x4x4d_c = vpx_sad8x4x4d_c;
+const SadMxNx4Func sad4x8x4d_c = vpx_sad4x8x4d_c;
+const SadMxNx4Func sad4x4x4d_c = vpx_sad4x4x4d_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_c = vpx_highbd_sad64x64x4d_c;
+const SadMxNx4Func highbd_sad64x32x4d_c = vpx_highbd_sad64x32x4d_c;
+const SadMxNx4Func highbd_sad32x64x4d_c = vpx_highbd_sad32x64x4d_c;
+const SadMxNx4Func highbd_sad32x32x4d_c = vpx_highbd_sad32x32x4d_c;
+const SadMxNx4Func highbd_sad32x16x4d_c = vpx_highbd_sad32x16x4d_c;
+const SadMxNx4Func highbd_sad16x32x4d_c = vpx_highbd_sad16x32x4d_c;
+const SadMxNx4Func highbd_sad16x16x4d_c = vpx_highbd_sad16x16x4d_c;
+const SadMxNx4Func highbd_sad16x8x4d_c = vpx_highbd_sad16x8x4d_c;
+const SadMxNx4Func highbd_sad8x16x4d_c = vpx_highbd_sad8x16x4d_c;
+const SadMxNx4Func highbd_sad8x8x4d_c = vpx_highbd_sad8x8x4d_c;
+const SadMxNx4Func highbd_sad8x4x4d_c = vpx_highbd_sad8x4x4d_c;
+const SadMxNx4Func highbd_sad4x8x4d_c = vpx_highbd_sad4x8x4d_c;
+const SadMxNx4Func highbd_sad4x4x4d_c = vpx_highbd_sad4x4x4d_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_c_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_c, -1),
+ make_tuple(64, 32, sad64x32x4d_c, -1),
+ make_tuple(32, 64, sad32x64x4d_c, -1),
+ make_tuple(32, 32, sad32x32x4d_c, -1),
+ make_tuple(32, 16, sad32x16x4d_c, -1),
+ make_tuple(16, 32, sad16x32x4d_c, -1),
+ make_tuple(16, 16, sad16x16x4d_c, -1),
+ make_tuple(16, 8, sad16x8x4d_c, -1),
+ make_tuple(8, 16, sad8x16x4d_c, -1),
+ make_tuple(8, 8, sad8x8x4d_c, -1),
+ make_tuple(8, 4, sad8x4x4d_c, -1),
+ make_tuple(4, 8, sad4x8x4d_c, -1),
+ make_tuple(4, 4, sad4x4x4d_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
//------------------------------------------------------------------------------
// ARM functions
#if HAVE_MEDIA
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_armv6 = vp8_sad16x16_armv6;
-INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_armv6)));
-#endif
-#endif
+const SadMxNFunc sad16x16_media = vpx_sad16x16_media;
+const SadMxNParam media_tests[] = {
+ make_tuple(16, 16, sad16x16_media, -1),
+};
+INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::ValuesIn(media_tests));
+#endif // HAVE_MEDIA
#if HAVE_NEON
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_neon = vp8_sad16x16_neon;
-const sad_m_by_n_fn_t sad_8x16_neon = vp8_sad8x16_neon;
-const sad_m_by_n_fn_t sad_16x8_neon = vp8_sad16x8_neon;
-const sad_m_by_n_fn_t sad_8x8_neon = vp8_sad8x8_neon;
-const sad_m_by_n_fn_t sad_4x4_neon = vp8_sad4x4_neon;
-INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_neon),
- make_tuple(8, 16, sad_8x16_neon),
- make_tuple(16, 8, sad_16x8_neon),
- make_tuple(8, 8, sad_8x8_neon),
- make_tuple(4, 4, sad_4x4_neon)));
-#endif
-#endif
+const SadMxNFunc sad64x64_neon = vpx_sad64x64_neon;
+const SadMxNFunc sad32x32_neon = vpx_sad32x32_neon;
+const SadMxNFunc sad16x16_neon = vpx_sad16x16_neon;
+const SadMxNFunc sad16x8_neon = vpx_sad16x8_neon;
+const SadMxNFunc sad8x16_neon = vpx_sad8x16_neon;
+const SadMxNFunc sad8x8_neon = vpx_sad8x8_neon;
+const SadMxNFunc sad4x4_neon = vpx_sad4x4_neon;
+
+const SadMxNParam neon_tests[] = {
+ make_tuple(64, 64, sad64x64_neon, -1),
+ make_tuple(32, 32, sad32x32_neon, -1),
+ make_tuple(16, 16, sad16x16_neon, -1),
+ make_tuple(16, 8, sad16x8_neon, -1),
+ make_tuple(8, 16, sad8x16_neon, -1),
+ make_tuple(8, 8, sad8x8_neon, -1),
+ make_tuple(4, 4, sad4x4_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
+
+const SadMxNx4Func sad64x64x4d_neon = vpx_sad64x64x4d_neon;
+const SadMxNx4Func sad32x32x4d_neon = vpx_sad32x32x4d_neon;
+const SadMxNx4Func sad16x16x4d_neon = vpx_sad16x16x4d_neon;
+const SadMxNx4Param x4d_neon_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_neon, -1),
+ make_tuple(32, 32, sad32x32x4d_neon, -1),
+ make_tuple(16, 16, sad16x16x4d_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests));
+#endif // HAVE_NEON
//------------------------------------------------------------------------------
// x86 functions
#if HAVE_MMX
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx = vp8_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx = vp8_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx = vp8_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx = vp8_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx = vp8_sad4x4_mmx;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx_vp9 = vp9_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx_vp9 = vp9_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx_vp9 = vp9_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx_vp9 = vp9_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx_vp9 = vp9_sad4x4_mmx;
-#endif
-
-const sad_m_by_n_test_param_t mmx_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_mmx),
- make_tuple(8, 16, sad_8x16_mmx),
- make_tuple(16, 8, sad_16x8_mmx),
- make_tuple(8, 8, sad_8x8_mmx),
- make_tuple(4, 4, sad_4x4_mmx),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(16, 16, sad_16x16_mmx_vp9),
- make_tuple(8, 16, sad_8x16_mmx_vp9),
- make_tuple(16, 8, sad_16x8_mmx_vp9),
- make_tuple(8, 8, sad_8x8_mmx_vp9),
- make_tuple(4, 4, sad_4x4_mmx_vp9),
-#endif
+const SadMxNFunc sad16x16_mmx = vpx_sad16x16_mmx;
+const SadMxNFunc sad16x8_mmx = vpx_sad16x8_mmx;
+const SadMxNFunc sad8x16_mmx = vpx_sad8x16_mmx;
+const SadMxNFunc sad8x8_mmx = vpx_sad8x8_mmx;
+const SadMxNFunc sad4x4_mmx = vpx_sad4x4_mmx;
+const SadMxNParam mmx_tests[] = {
+ make_tuple(16, 16, sad16x16_mmx, -1),
+ make_tuple(16, 8, sad16x8_mmx, -1),
+ make_tuple(8, 16, sad8x16_mmx, -1),
+ make_tuple(8, 8, sad8x8_mmx, -1),
+ make_tuple(4, 4, sad4x4_mmx, -1),
};
INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests));
-#endif
+#endif // HAVE_MMX
#if HAVE_SSE
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_4x4_sse_vp9 = vp9_sad4x4_sse;
-const sad_m_by_n_fn_t sad_4x8_sse_vp9 = vp9_sad4x8_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::Values(
- make_tuple(4, 4, sad_4x4_sse_vp9),
- make_tuple(4, 8, sad_4x8_sse_vp9)));
+const SadMxNFunc sad4x8_sse = vpx_sad4x8_sse;
+const SadMxNFunc sad4x4_sse = vpx_sad4x4_sse;
+const SadMxNParam sse_tests[] = {
+ make_tuple(4, 8, sad4x8_sse, -1),
+ make_tuple(4, 4, sad4x4_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::ValuesIn(sse_tests));
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_sse = vp9_sad4x8x4d_sse;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse = vp9_sad4x4x4d_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::Values(
- make_tuple(4, 8, sad_4x8x4d_sse),
- make_tuple(4, 4, sad_4x4x4d_sse)));
+const SadMxNAvgFunc sad4x8_avg_sse = vpx_sad4x8_avg_sse;
+const SadMxNAvgFunc sad4x4_avg_sse = vpx_sad4x4_avg_sse;
+const SadMxNAvgParam avg_sse_tests[] = {
+ make_tuple(4, 8, sad4x8_avg_sse, -1),
+ make_tuple(4, 4, sad4x4_avg_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADavgTest, ::testing::ValuesIn(avg_sse_tests));
+
+const SadMxNx4Func sad4x8x4d_sse = vpx_sad4x8x4d_sse;
+const SadMxNx4Func sad4x4x4d_sse = vpx_sad4x4x4d_sse;
+const SadMxNx4Param x4d_sse_tests[] = {
+ make_tuple(4, 8, sad4x8x4d_sse, -1),
+ make_tuple(4, 4, sad4x4x4d_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::ValuesIn(x4d_sse_tests));
#endif // CONFIG_USE_X86INC
-#endif // CONFIG_VP9_ENCODER
#endif // HAVE_SSE
#if HAVE_SSE2
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_wmt = vp8_sad16x16_wmt;
-const sad_m_by_n_fn_t sad_8x16_wmt = vp8_sad8x16_wmt;
-const sad_m_by_n_fn_t sad_16x8_wmt = vp8_sad16x8_wmt;
-const sad_m_by_n_fn_t sad_8x8_wmt = vp8_sad8x8_wmt;
-const sad_m_by_n_fn_t sad_4x4_wmt = vp8_sad4x4_wmt;
-#endif
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_64x64_sse2_vp9 = vp9_sad64x64_sse2;
-const sad_m_by_n_fn_t sad_64x32_sse2_vp9 = vp9_sad64x32_sse2;
-const sad_m_by_n_fn_t sad_32x64_sse2_vp9 = vp9_sad32x64_sse2;
-const sad_m_by_n_fn_t sad_32x32_sse2_vp9 = vp9_sad32x32_sse2;
-const sad_m_by_n_fn_t sad_32x16_sse2_vp9 = vp9_sad32x16_sse2;
-const sad_m_by_n_fn_t sad_16x32_sse2_vp9 = vp9_sad16x32_sse2;
-const sad_m_by_n_fn_t sad_16x16_sse2_vp9 = vp9_sad16x16_sse2;
-const sad_m_by_n_fn_t sad_16x8_sse2_vp9 = vp9_sad16x8_sse2;
-const sad_m_by_n_fn_t sad_8x16_sse2_vp9 = vp9_sad8x16_sse2;
-const sad_m_by_n_fn_t sad_8x8_sse2_vp9 = vp9_sad8x8_sse2;
-const sad_m_by_n_fn_t sad_8x4_sse2_vp9 = vp9_sad8x4_sse2;
-#endif
-#endif
-const sad_m_by_n_test_param_t sse2_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_wmt),
- make_tuple(8, 16, sad_8x16_wmt),
- make_tuple(16, 8, sad_16x8_wmt),
- make_tuple(8, 8, sad_8x8_wmt),
- make_tuple(4, 4, sad_4x4_wmt),
-#endif
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
- make_tuple(64, 64, sad_64x64_sse2_vp9),
- make_tuple(64, 32, sad_64x32_sse2_vp9),
- make_tuple(32, 64, sad_32x64_sse2_vp9),
- make_tuple(32, 32, sad_32x32_sse2_vp9),
- make_tuple(32, 16, sad_32x16_sse2_vp9),
- make_tuple(16, 32, sad_16x32_sse2_vp9),
- make_tuple(16, 16, sad_16x16_sse2_vp9),
- make_tuple(16, 8, sad_16x8_sse2_vp9),
- make_tuple(8, 16, sad_8x16_sse2_vp9),
- make_tuple(8, 8, sad_8x8_sse2_vp9),
- make_tuple(8, 4, sad_8x4_sse2_vp9),
-#endif
-#endif
+const SadMxNFunc sad64x64_sse2 = vpx_sad64x64_sse2;
+const SadMxNFunc sad64x32_sse2 = vpx_sad64x32_sse2;
+const SadMxNFunc sad32x64_sse2 = vpx_sad32x64_sse2;
+const SadMxNFunc sad32x32_sse2 = vpx_sad32x32_sse2;
+const SadMxNFunc sad32x16_sse2 = vpx_sad32x16_sse2;
+const SadMxNFunc sad16x32_sse2 = vpx_sad16x32_sse2;
+const SadMxNFunc sad16x16_sse2 = vpx_sad16x16_sse2;
+const SadMxNFunc sad16x8_sse2 = vpx_sad16x8_sse2;
+const SadMxNFunc sad8x16_sse2 = vpx_sad8x16_sse2;
+const SadMxNFunc sad8x8_sse2 = vpx_sad8x8_sse2;
+const SadMxNFunc sad8x4_sse2 = vpx_sad8x4_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_sse2 = vpx_highbd_sad64x64_sse2;
+const SadMxNFunc highbd_sad64x32_sse2 = vpx_highbd_sad64x32_sse2;
+const SadMxNFunc highbd_sad32x64_sse2 = vpx_highbd_sad32x64_sse2;
+const SadMxNFunc highbd_sad32x32_sse2 = vpx_highbd_sad32x32_sse2;
+const SadMxNFunc highbd_sad32x16_sse2 = vpx_highbd_sad32x16_sse2;
+const SadMxNFunc highbd_sad16x32_sse2 = vpx_highbd_sad16x32_sse2;
+const SadMxNFunc highbd_sad16x16_sse2 = vpx_highbd_sad16x16_sse2;
+const SadMxNFunc highbd_sad16x8_sse2 = vpx_highbd_sad16x8_sse2;
+const SadMxNFunc highbd_sad8x16_sse2 = vpx_highbd_sad8x16_sse2;
+const SadMxNFunc highbd_sad8x8_sse2 = vpx_highbd_sad8x8_sse2;
+const SadMxNFunc highbd_sad8x4_sse2 = vpx_highbd_sad8x4_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_sse2, -1),
+ make_tuple(64, 32, sad64x32_sse2, -1),
+ make_tuple(32, 64, sad32x64_sse2, -1),
+ make_tuple(32, 32, sad32x32_sse2, -1),
+ make_tuple(32, 16, sad32x16_sse2, -1),
+ make_tuple(16, 32, sad16x32_sse2, -1),
+ make_tuple(16, 16, sad16x16_sse2, -1),
+ make_tuple(16, 8, sad16x8_sse2, -1),
+ make_tuple(8, 16, sad8x16_sse2, -1),
+ make_tuple(8, 8, sad8x8_sse2, -1),
+ make_tuple(8, 4, sad8x4_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_sse2 = vp9_sad64x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_sse2 = vp9_sad64x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_sse2 = vp9_sad32x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_sse2 = vp9_sad32x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_sse2 = vp9_sad32x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_sse2 = vp9_sad16x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse2 = vp9_sad16x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse2 = vp9_sad16x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse2 = vp9_sad8x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse2 = vp9_sad8x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_sse2 = vp9_sad8x4x4d_sse2;
-INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_sse2),
- make_tuple(64, 32, sad_64x32x4d_sse2),
- make_tuple(32, 64, sad_32x64x4d_sse2),
- make_tuple(32, 32, sad_32x32x4d_sse2),
- make_tuple(32, 16, sad_32x16x4d_sse2),
- make_tuple(16, 32, sad_16x32x4d_sse2),
- make_tuple(16, 16, sad_16x16x4d_sse2),
- make_tuple(16, 8, sad_16x8x4d_sse2),
- make_tuple(8, 16, sad_8x16x4d_sse2),
- make_tuple(8, 8, sad_8x8x4d_sse2),
- make_tuple(8, 4, sad_8x4x4d_sse2)));
-#endif
-#endif
-#endif
+const SadMxNAvgFunc sad64x64_avg_sse2 = vpx_sad64x64_avg_sse2;
+const SadMxNAvgFunc sad64x32_avg_sse2 = vpx_sad64x32_avg_sse2;
+const SadMxNAvgFunc sad32x64_avg_sse2 = vpx_sad32x64_avg_sse2;
+const SadMxNAvgFunc sad32x32_avg_sse2 = vpx_sad32x32_avg_sse2;
+const SadMxNAvgFunc sad32x16_avg_sse2 = vpx_sad32x16_avg_sse2;
+const SadMxNAvgFunc sad16x32_avg_sse2 = vpx_sad16x32_avg_sse2;
+const SadMxNAvgFunc sad16x16_avg_sse2 = vpx_sad16x16_avg_sse2;
+const SadMxNAvgFunc sad16x8_avg_sse2 = vpx_sad16x8_avg_sse2;
+const SadMxNAvgFunc sad8x16_avg_sse2 = vpx_sad8x16_avg_sse2;
+const SadMxNAvgFunc sad8x8_avg_sse2 = vpx_sad8x8_avg_sse2;
+const SadMxNAvgFunc sad8x4_avg_sse2 = vpx_sad8x4_avg_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_sse2 = vpx_highbd_sad64x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad64x32_avg_sse2 = vpx_highbd_sad64x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x64_avg_sse2 = vpx_highbd_sad32x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x32_avg_sse2 = vpx_highbd_sad32x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x16_avg_sse2 = vpx_highbd_sad32x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x32_avg_sse2 = vpx_highbd_sad16x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x16_avg_sse2 = vpx_highbd_sad16x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x8_avg_sse2 = vpx_highbd_sad16x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x16_avg_sse2 = vpx_highbd_sad8x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x8_avg_sse2 = vpx_highbd_sad8x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x4_avg_sse2 = vpx_highbd_sad8x4_avg_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_sse2, -1),
+ make_tuple(64, 32, sad64x32_avg_sse2, -1),
+ make_tuple(32, 64, sad32x64_avg_sse2, -1),
+ make_tuple(32, 32, sad32x32_avg_sse2, -1),
+ make_tuple(32, 16, sad32x16_avg_sse2, -1),
+ make_tuple(16, 32, sad16x32_avg_sse2, -1),
+ make_tuple(16, 16, sad16x16_avg_sse2, -1),
+ make_tuple(16, 8, sad16x8_avg_sse2, -1),
+ make_tuple(8, 16, sad8x16_avg_sse2, -1),
+ make_tuple(8, 8, sad8x8_avg_sse2, -1),
+ make_tuple(8, 4, sad8x4_avg_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
+
+const SadMxNx4Func sad64x64x4d_sse2 = vpx_sad64x64x4d_sse2;
+const SadMxNx4Func sad64x32x4d_sse2 = vpx_sad64x32x4d_sse2;
+const SadMxNx4Func sad32x64x4d_sse2 = vpx_sad32x64x4d_sse2;
+const SadMxNx4Func sad32x32x4d_sse2 = vpx_sad32x32x4d_sse2;
+const SadMxNx4Func sad32x16x4d_sse2 = vpx_sad32x16x4d_sse2;
+const SadMxNx4Func sad16x32x4d_sse2 = vpx_sad16x32x4d_sse2;
+const SadMxNx4Func sad16x16x4d_sse2 = vpx_sad16x16x4d_sse2;
+const SadMxNx4Func sad16x8x4d_sse2 = vpx_sad16x8x4d_sse2;
+const SadMxNx4Func sad8x16x4d_sse2 = vpx_sad8x16x4d_sse2;
+const SadMxNx4Func sad8x8x4d_sse2 = vpx_sad8x8x4d_sse2;
+const SadMxNx4Func sad8x4x4d_sse2 = vpx_sad8x4x4d_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_sse2 = vpx_highbd_sad64x64x4d_sse2;
+const SadMxNx4Func highbd_sad64x32x4d_sse2 = vpx_highbd_sad64x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x64x4d_sse2 = vpx_highbd_sad32x64x4d_sse2;
+const SadMxNx4Func highbd_sad32x32x4d_sse2 = vpx_highbd_sad32x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x16x4d_sse2 = vpx_highbd_sad32x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x32x4d_sse2 = vpx_highbd_sad16x32x4d_sse2;
+const SadMxNx4Func highbd_sad16x16x4d_sse2 = vpx_highbd_sad16x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x8x4d_sse2 = vpx_highbd_sad16x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x16x4d_sse2 = vpx_highbd_sad8x16x4d_sse2;
+const SadMxNx4Func highbd_sad8x8x4d_sse2 = vpx_highbd_sad8x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x4x4d_sse2 = vpx_highbd_sad8x4x4d_sse2;
+const SadMxNx4Func highbd_sad4x8x4d_sse2 = vpx_highbd_sad4x8x4d_sse2;
+const SadMxNx4Func highbd_sad4x4x4d_sse2 = vpx_highbd_sad4x4x4d_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_sse2, -1),
+ make_tuple(64, 32, sad64x32x4d_sse2, -1),
+ make_tuple(32, 64, sad32x64x4d_sse2, -1),
+ make_tuple(32, 32, sad32x32x4d_sse2, -1),
+ make_tuple(32, 16, sad32x16x4d_sse2, -1),
+ make_tuple(16, 32, sad16x32x4d_sse2, -1),
+ make_tuple(16, 16, sad16x16x4d_sse2, -1),
+ make_tuple(16, 8, sad16x8x4d_sse2, -1),
+ make_tuple(8, 16, sad8x16x4d_sse2, -1),
+ make_tuple(8, 8, sad8x8x4d_sse2, -1),
+ make_tuple(8, 4, sad8x4x4d_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSE2
#if HAVE_SSE3
-#if CONFIG_VP8_ENCODER
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse3 = vp8_sad16x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse3 = vp8_sad16x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse3 = vp8_sad8x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse3 = vp8_sad8x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse3 = vp8_sad4x4x4d_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADx4Test, ::testing::Values(
- make_tuple(16, 16, sad_16x16x4d_sse3),
- make_tuple(16, 8, sad_16x8x4d_sse3),
- make_tuple(8, 16, sad_8x16x4d_sse3),
- make_tuple(8, 8, sad_8x8x4d_sse3),
- make_tuple(4, 4, sad_4x4x4d_sse3)));
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSE3
#if HAVE_SSSE3
-#if CONFIG_USE_X86INC
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_sse3 = vp8_sad16x16_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_sse3)));
-#endif
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSSE3
+
+#if HAVE_SSE4_1
+// Only functions are x8, which do not have tests.
+#endif // HAVE_SSE4_1
+
+#if HAVE_AVX2
+const SadMxNFunc sad64x64_avx2 = vpx_sad64x64_avx2;
+const SadMxNFunc sad64x32_avx2 = vpx_sad64x32_avx2;
+const SadMxNFunc sad32x64_avx2 = vpx_sad32x64_avx2;
+const SadMxNFunc sad32x32_avx2 = vpx_sad32x32_avx2;
+const SadMxNFunc sad32x16_avx2 = vpx_sad32x16_avx2;
+const SadMxNParam avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avx2, -1),
+ make_tuple(64, 32, sad64x32_avx2, -1),
+ make_tuple(32, 64, sad32x64_avx2, -1),
+ make_tuple(32, 32, sad32x32_avx2, -1),
+ make_tuple(32, 16, sad32x16_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADTest, ::testing::ValuesIn(avx2_tests));
+
+const SadMxNAvgFunc sad64x64_avg_avx2 = vpx_sad64x64_avg_avx2;
+const SadMxNAvgFunc sad64x32_avg_avx2 = vpx_sad64x32_avg_avx2;
+const SadMxNAvgFunc sad32x64_avg_avx2 = vpx_sad32x64_avg_avx2;
+const SadMxNAvgFunc sad32x32_avg_avx2 = vpx_sad32x32_avg_avx2;
+const SadMxNAvgFunc sad32x16_avg_avx2 = vpx_sad32x16_avg_avx2;
+const SadMxNAvgParam avg_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_avx2, -1),
+ make_tuple(64, 32, sad64x32_avg_avx2, -1),
+ make_tuple(32, 64, sad32x64_avg_avx2, -1),
+ make_tuple(32, 32, sad32x32_avg_avx2, -1),
+ make_tuple(32, 16, sad32x16_avg_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests));
+
+const SadMxNx4Func sad64x64x4d_avx2 = vpx_sad64x64x4d_avx2;
+const SadMxNx4Func sad32x32x4d_avx2 = vpx_sad32x32x4d_avx2;
+const SadMxNx4Param x4d_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_avx2, -1),
+ make_tuple(32, 32, sad32x32x4d_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests));
+#endif // HAVE_AVX2
+
+//------------------------------------------------------------------------------
+// MIPS functions
+#if HAVE_MSA
+const SadMxNFunc sad64x64_msa = vpx_sad64x64_msa;
+const SadMxNFunc sad64x32_msa = vpx_sad64x32_msa;
+const SadMxNFunc sad32x64_msa = vpx_sad32x64_msa;
+const SadMxNFunc sad32x32_msa = vpx_sad32x32_msa;
+const SadMxNFunc sad32x16_msa = vpx_sad32x16_msa;
+const SadMxNFunc sad16x32_msa = vpx_sad16x32_msa;
+const SadMxNFunc sad16x16_msa = vpx_sad16x16_msa;
+const SadMxNFunc sad16x8_msa = vpx_sad16x8_msa;
+const SadMxNFunc sad8x16_msa = vpx_sad8x16_msa;
+const SadMxNFunc sad8x8_msa = vpx_sad8x8_msa;
+const SadMxNFunc sad8x4_msa = vpx_sad8x4_msa;
+const SadMxNFunc sad4x8_msa = vpx_sad4x8_msa;
+const SadMxNFunc sad4x4_msa = vpx_sad4x4_msa;
+const SadMxNParam msa_tests[] = {
+ make_tuple(64, 64, sad64x64_msa, -1),
+ make_tuple(64, 32, sad64x32_msa, -1),
+ make_tuple(32, 64, sad32x64_msa, -1),
+ make_tuple(32, 32, sad32x32_msa, -1),
+ make_tuple(32, 16, sad32x16_msa, -1),
+ make_tuple(16, 32, sad16x32_msa, -1),
+ make_tuple(16, 16, sad16x16_msa, -1),
+ make_tuple(16, 8, sad16x8_msa, -1),
+ make_tuple(8, 16, sad8x16_msa, -1),
+ make_tuple(8, 8, sad8x8_msa, -1),
+ make_tuple(8, 4, sad8x4_msa, -1),
+ make_tuple(4, 8, sad4x8_msa, -1),
+ make_tuple(4, 4, sad4x4_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests));
+
+const SadMxNAvgFunc sad64x64_avg_msa = vpx_sad64x64_avg_msa;
+const SadMxNAvgFunc sad64x32_avg_msa = vpx_sad64x32_avg_msa;
+const SadMxNAvgFunc sad32x64_avg_msa = vpx_sad32x64_avg_msa;
+const SadMxNAvgFunc sad32x32_avg_msa = vpx_sad32x32_avg_msa;
+const SadMxNAvgFunc sad32x16_avg_msa = vpx_sad32x16_avg_msa;
+const SadMxNAvgFunc sad16x32_avg_msa = vpx_sad16x32_avg_msa;
+const SadMxNAvgFunc sad16x16_avg_msa = vpx_sad16x16_avg_msa;
+const SadMxNAvgFunc sad16x8_avg_msa = vpx_sad16x8_avg_msa;
+const SadMxNAvgFunc sad8x16_avg_msa = vpx_sad8x16_avg_msa;
+const SadMxNAvgFunc sad8x8_avg_msa = vpx_sad8x8_avg_msa;
+const SadMxNAvgFunc sad8x4_avg_msa = vpx_sad8x4_avg_msa;
+const SadMxNAvgFunc sad4x8_avg_msa = vpx_sad4x8_avg_msa;
+const SadMxNAvgFunc sad4x4_avg_msa = vpx_sad4x4_avg_msa;
+const SadMxNAvgParam avg_msa_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_msa, -1),
+ make_tuple(64, 32, sad64x32_avg_msa, -1),
+ make_tuple(32, 64, sad32x64_avg_msa, -1),
+ make_tuple(32, 32, sad32x32_avg_msa, -1),
+ make_tuple(32, 16, sad32x16_avg_msa, -1),
+ make_tuple(16, 32, sad16x32_avg_msa, -1),
+ make_tuple(16, 16, sad16x16_avg_msa, -1),
+ make_tuple(16, 8, sad16x8_avg_msa, -1),
+ make_tuple(8, 16, sad8x16_avg_msa, -1),
+ make_tuple(8, 8, sad8x8_avg_msa, -1),
+ make_tuple(8, 4, sad8x4_avg_msa, -1),
+ make_tuple(4, 8, sad4x8_avg_msa, -1),
+ make_tuple(4, 4, sad4x4_avg_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests));
+
+const SadMxNx4Func sad64x64x4d_msa = vpx_sad64x64x4d_msa;
+const SadMxNx4Func sad64x32x4d_msa = vpx_sad64x32x4d_msa;
+const SadMxNx4Func sad32x64x4d_msa = vpx_sad32x64x4d_msa;
+const SadMxNx4Func sad32x32x4d_msa = vpx_sad32x32x4d_msa;
+const SadMxNx4Func sad32x16x4d_msa = vpx_sad32x16x4d_msa;
+const SadMxNx4Func sad16x32x4d_msa = vpx_sad16x32x4d_msa;
+const SadMxNx4Func sad16x16x4d_msa = vpx_sad16x16x4d_msa;
+const SadMxNx4Func sad16x8x4d_msa = vpx_sad16x8x4d_msa;
+const SadMxNx4Func sad8x16x4d_msa = vpx_sad8x16x4d_msa;
+const SadMxNx4Func sad8x8x4d_msa = vpx_sad8x8x4d_msa;
+const SadMxNx4Func sad8x4x4d_msa = vpx_sad8x4x4d_msa;
+const SadMxNx4Func sad4x8x4d_msa = vpx_sad4x8x4d_msa;
+const SadMxNx4Func sad4x4x4d_msa = vpx_sad4x4x4d_msa;
+const SadMxNx4Param x4d_msa_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_msa, -1),
+ make_tuple(64, 32, sad64x32x4d_msa, -1),
+ make_tuple(32, 64, sad32x64x4d_msa, -1),
+ make_tuple(32, 32, sad32x32x4d_msa, -1),
+ make_tuple(32, 16, sad32x16x4d_msa, -1),
+ make_tuple(16, 32, sad16x32x4d_msa, -1),
+ make_tuple(16, 16, sad16x16x4d_msa, -1),
+ make_tuple(16, 8, sad16x8x4d_msa, -1),
+ make_tuple(8, 16, sad8x16x4d_msa, -1),
+ make_tuple(8, 8, sad8x8x4d_msa, -1),
+ make_tuple(8, 4, sad8x4x4d_msa, -1),
+ make_tuple(4, 8, sad4x8x4d_msa, -1),
+ make_tuple(4, 4, sad4x4x4d_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
for (int block = 0; block < 4; block++) {
reference_sad = ReferenceSAD(UINT_MAX, block);
EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
|
for (int block = 0; block < 4; ++block) {
reference_sad = ReferenceSAD(block);
EXPECT_EQ(reference_sad, exp_sad[block]) << "block " << block;
|
150,874 |
void CheckSad(unsigned int max_sad) {
unsigned int reference_sad, exp_sad;
reference_sad = ReferenceSAD(max_sad);
exp_sad = SAD(max_sad);
if (reference_sad <= max_sad) {
ASSERT_EQ(exp_sad, reference_sad);
} else {
ASSERT_GE(exp_sad, reference_sad);
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void CheckSad(unsigned int max_sad) {
|
@@ -13,56 +13,74 @@
#include <limits.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#if CONFIG_VP8_ENCODER
-#include "./vp8_rtcd.h"
-#endif
-#if CONFIG_VP9_ENCODER
-#include "./vp9_rtcd.h"
-#endif
-#include "vpx_mem/vpx_mem.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_codec.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride);
+typedef std::tr1::tuple<int, int, SadMxNFunc, int> SadMxNParam;
-typedef unsigned int (*sad_m_by_n_fn_t)(const unsigned char *source_ptr,
- int source_stride,
- const unsigned char *reference_ptr,
- int reference_stride,
- unsigned int max_sad);
-typedef std::tr1::tuple<int, int, sad_m_by_n_fn_t> sad_m_by_n_test_param_t;
+typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ const uint8_t *second_pred);
+typedef std::tr1::tuple<int, int, SadMxNAvgFunc, int> SadMxNAvgParam;
-typedef void (*sad_n_by_n_by_4_fn_t)(const uint8_t *src_ptr,
- int src_stride,
- const unsigned char * const ref_ptr[],
- int ref_stride,
- unsigned int *sad_array);
-typedef std::tr1::tuple<int, int, sad_n_by_n_by_4_fn_t>
- sad_n_by_n_by_4_test_param_t;
+typedef void (*SadMxNx4Func)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[],
+ int ref_stride,
+ uint32_t *sad_array);
+typedef std::tr1::tuple<int, int, SadMxNx4Func, int> SadMxNx4Param;
using libvpx_test::ACMRandom;
namespace {
class SADTestBase : public ::testing::Test {
public:
- SADTestBase(int width, int height) : width_(width), height_(height) {}
+ SADTestBase(int width, int height, int bit_depth) :
+ width_(width), height_(height), bd_(bit_depth) {}
static void SetUpTestCase() {
- source_data_ = reinterpret_cast<uint8_t*>(
+ source_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize));
- reference_data_ = reinterpret_cast<uint8_t*>(
+ reference_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
+ second_pred8_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, 64*64));
+ source_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
+ reference_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
+ second_pred16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t)));
}
static void TearDownTestCase() {
- vpx_free(source_data_);
- source_data_ = NULL;
- vpx_free(reference_data_);
- reference_data_ = NULL;
+ vpx_free(source_data8_);
+ source_data8_ = NULL;
+ vpx_free(reference_data8_);
+ reference_data8_ = NULL;
+ vpx_free(second_pred8_);
+ second_pred8_ = NULL;
+ vpx_free(source_data16_);
+ source_data16_ = NULL;
+ vpx_free(reference_data16_);
+ reference_data16_ = NULL;
+ vpx_free(second_pred16_);
+ second_pred16_ = NULL;
}
virtual void TearDown() {
@@ -76,142 +94,335 @@
static const int kDataBufferSize = 4 * kDataBlockSize;
virtual void SetUp() {
+ if (bd_ == -1) {
+ use_high_bit_depth_ = false;
+ bit_depth_ = VPX_BITS_8;
+ source_data_ = source_data8_;
+ reference_data_ = reference_data8_;
+ second_pred_ = second_pred8_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ use_high_bit_depth_ = true;
+ bit_depth_ = static_cast<vpx_bit_depth_t>(bd_);
+ source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
+ reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
+ second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ mask_ = (1 << bit_depth_) - 1;
source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
- virtual uint8_t* GetReference(int block_idx) {
+ virtual uint8_t *GetReference(int block_idx) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_high_bit_depth_)
+ return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
+ block_idx * kDataBlockSize);
+#endif // CONFIG_VP9_HIGHBITDEPTH
return reference_data_ + block_idx * kDataBlockSize;
}
// Sum of Absolute Differences. Given two blocks, calculate the absolute
// difference between two pixels in the same relative location; accumulate.
- unsigned int ReferenceSAD(unsigned int max_sad, int block_idx = 0) {
+ unsigned int ReferenceSAD(int block_idx) {
unsigned int sad = 0;
- const uint8_t* const reference = GetReference(block_idx);
-
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- sad += abs(source_data_[h * source_stride_ + w]
- - reference[h * reference_stride_ + w]);
- }
- if (sad > max_sad) {
- break;
+ if (!use_high_bit_depth_) {
+ sad += abs(source8[h * source_stride_ + w] -
+ reference8[h * reference_stride_ + w]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ sad += abs(source16[h * source_stride_ + w] -
+ reference16[h * reference_stride_ + w]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
return sad;
}
- void FillConstant(uint8_t *data, int stride, uint8_t fill_constant) {
+ // Sum of Absolute Differences Average. Given two blocks, and a prediction
+ // calculate the absolute difference between one pixel and average of the
+ // corresponding and predicted pixels; accumulate.
+ unsigned int ReferenceSADavg(int block_idx) {
+ unsigned int sad = 0;
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+ const uint8_t *const second_pred8 = second_pred_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+ const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = fill_constant;
+ if (!use_high_bit_depth_) {
+ const int tmp = second_pred8[h * width_ + w] +
+ reference8[h * reference_stride_ + w];
+ const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source8[h * source_stride_ + w] - comp_pred);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ const int tmp = second_pred16[h * width_ + w] +
+ reference16[h * reference_stride_ + w];
+ const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source16[h * source_stride_ + w] - comp_pred);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ return sad;
+ }
+
+ void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ for (int h = 0; h < height_; ++h) {
+ for (int w = 0; w < width_; ++w) {
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = static_cast<uint8_t>(fill_constant);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = fill_constant;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
void FillRandom(uint8_t *data, int stride) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = rnd_.Rand8();
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = rnd_.Rand16() & mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
- int width_, height_;
- static uint8_t* source_data_;
+ int width_, height_, mask_, bd_;
+ vpx_bit_depth_t bit_depth_;
+ static uint8_t *source_data_;
+ static uint8_t *reference_data_;
+ static uint8_t *second_pred_;
int source_stride_;
- static uint8_t* reference_data_;
+ bool use_high_bit_depth_;
+ static uint8_t *source_data8_;
+ static uint8_t *reference_data8_;
+ static uint8_t *second_pred8_;
+ static uint16_t *source_data16_;
+ static uint16_t *reference_data16_;
+ static uint16_t *second_pred16_;
int reference_stride_;
ACMRandom rnd_;
};
-class SADTest : public SADTestBase,
- public ::testing::WithParamInterface<sad_m_by_n_test_param_t> {
+class SADx4Test
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNx4Param> {
public:
- SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
-
- protected:
- unsigned int SAD(unsigned int max_sad, int block_idx = 0) {
- unsigned int ret;
- const uint8_t* const reference = GetReference(block_idx);
-
- REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
- reference, reference_stride_,
- max_sad));
- return ret;
- }
-
- void CheckSad(unsigned int max_sad) {
- unsigned int reference_sad, exp_sad;
-
- reference_sad = ReferenceSAD(max_sad);
- exp_sad = SAD(max_sad);
-
- if (reference_sad <= max_sad) {
- ASSERT_EQ(exp_sad, reference_sad);
- } else {
- // Alternative implementations are not required to check max_sad
- ASSERT_GE(exp_sad, reference_sad);
- }
- }
-};
-
-class SADx4Test : public SADTestBase,
- public ::testing::WithParamInterface<sad_n_by_n_by_4_test_param_t> {
- public:
- SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
+ SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
protected:
void SADs(unsigned int *results) {
- const uint8_t* refs[] = {GetReference(0), GetReference(1),
- GetReference(2), GetReference(3)};
+ const uint8_t *references[] = {GetReference(0), GetReference(1),
+ GetReference(2), GetReference(3)};
- REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
- refs, reference_stride_,
- results));
+ ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
+ references, reference_stride_,
+ results));
}
void CheckSADs() {
unsigned int reference_sad, exp_sad[4];
SADs(exp_sad);
- for (int block = 0; block < 4; block++) {
- reference_sad = ReferenceSAD(UINT_MAX, block);
+ for (int block = 0; block < 4; ++block) {
+ reference_sad = ReferenceSAD(block);
- EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
+ EXPECT_EQ(reference_sad, exp_sad[block]) << "block " << block;
}
}
};
-uint8_t* SADTestBase::source_data_ = NULL;
-uint8_t* SADTestBase::reference_data_ = NULL;
+class SADTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNParam> {
+ public:
+ SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSAD(0);
+ const unsigned int exp_sad = SAD(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+class SADavgTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNAvgParam> {
+ public:
+ SADavgTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD_avg(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_,
+ second_pred_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSADavg(0);
+ const unsigned int exp_sad = SAD_avg(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+uint8_t *SADTestBase::source_data_ = NULL;
+uint8_t *SADTestBase::reference_data_ = NULL;
+uint8_t *SADTestBase::second_pred_ = NULL;
+uint8_t *SADTestBase::source_data8_ = NULL;
+uint8_t *SADTestBase::reference_data8_ = NULL;
+uint8_t *SADTestBase::second_pred8_ = NULL;
+uint16_t *SADTestBase::source_data16_ = NULL;
+uint16_t *SADTestBase::reference_data16_ = NULL;
+uint16_t *SADTestBase::second_pred16_ = NULL;
TEST_P(SADTest, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(reference_data_, reference_stride_, 255);
- CheckSad(UINT_MAX);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ CheckSAD();
+}
+
+TEST_P(SADTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, MaxRef) {
+ FillConstant(source_data_, source_stride_, 0);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+TEST_P(SADavgTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADavgTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
}
TEST_P(SADx4Test, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(GetReference(0), reference_stride_, 255);
- FillConstant(GetReference(1), reference_stride_, 255);
- FillConstant(GetReference(2), reference_stride_, 255);
- FillConstant(GetReference(3), reference_stride_, 255);
+ FillConstant(GetReference(0), reference_stride_, mask_);
+ FillConstant(GetReference(1), reference_stride_, mask_);
+ FillConstant(GetReference(2), reference_stride_, mask_);
+ FillConstant(GetReference(3), reference_stride_, mask_);
CheckSADs();
}
-TEST_P(SADTest, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(UINT_MAX);
-}
-
TEST_P(SADx4Test, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
+ FillConstant(source_data_, source_stride_, mask_);
FillConstant(GetReference(0), reference_stride_, 0);
FillConstant(GetReference(1), reference_stride_, 0);
FillConstant(GetReference(2), reference_stride_, 0);
@@ -219,15 +430,6 @@
CheckSADs();
}
-TEST_P(SADTest, ShortRef) {
- int tmp_stride = reference_stride_;
- reference_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortRef) {
int tmp_stride = reference_stride_;
reference_stride_ >>= 1;
@@ -240,17 +442,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, UnalignedRef) {
- // The reference frame, but not the source frame, may be unaligned for
- // certain types of searches.
- int tmp_stride = reference_stride_;
- reference_stride_ -= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, UnalignedRef) {
// The reference frame, but not the source frame, may be unaligned for
// certain types of searches.
@@ -265,15 +456,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, ShortSrc) {
- int tmp_stride = source_stride_;
- source_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- source_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortSrc) {
int tmp_stride = source_stride_;
source_stride_ >>= 1;
@@ -286,271 +468,743 @@
source_stride_ = tmp_stride;
}
-TEST_P(SADTest, MaxSAD) {
- // Verify that, when max_sad is set, the implementation does not return a
- // value lower than the reference.
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(128);
+TEST_P(SADx4Test, SrcAlignedByWidth) {
+ uint8_t * tmp_source_data = source_data_;
+ source_data_ += width_;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ source_data_ = tmp_source_data;
}
using std::tr1::make_tuple;
//------------------------------------------------------------------------------
// C functions
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_c = vp8_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c = vp8_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c = vp8_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c = vp8_sad8x8_c;
-const sad_m_by_n_fn_t sad_4x4_c = vp8_sad4x4_c;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_64x64_c_vp9 = vp9_sad64x64_c;
-const sad_m_by_n_fn_t sad_32x32_c_vp9 = vp9_sad32x32_c;
-const sad_m_by_n_fn_t sad_16x16_c_vp9 = vp9_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c_vp9 = vp9_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c_vp9 = vp9_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c_vp9 = vp9_sad8x8_c;
-const sad_m_by_n_fn_t sad_8x4_c_vp9 = vp9_sad8x4_c;
-const sad_m_by_n_fn_t sad_4x8_c_vp9 = vp9_sad4x8_c;
-const sad_m_by_n_fn_t sad_4x4_c_vp9 = vp9_sad4x4_c;
-#endif
-const sad_m_by_n_test_param_t c_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_c),
- make_tuple(8, 16, sad_8x16_c),
- make_tuple(16, 8, sad_16x8_c),
- make_tuple(8, 8, sad_8x8_c),
- make_tuple(4, 4, sad_4x4_c),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(64, 64, sad_64x64_c_vp9),
- make_tuple(32, 32, sad_32x32_c_vp9),
- make_tuple(16, 16, sad_16x16_c_vp9),
- make_tuple(8, 16, sad_8x16_c_vp9),
- make_tuple(16, 8, sad_16x8_c_vp9),
- make_tuple(8, 8, sad_8x8_c_vp9),
- make_tuple(8, 4, sad_8x4_c_vp9),
- make_tuple(4, 8, sad_4x8_c_vp9),
- make_tuple(4, 4, sad_4x4_c_vp9),
-#endif
+const SadMxNFunc sad64x64_c = vpx_sad64x64_c;
+const SadMxNFunc sad64x32_c = vpx_sad64x32_c;
+const SadMxNFunc sad32x64_c = vpx_sad32x64_c;
+const SadMxNFunc sad32x32_c = vpx_sad32x32_c;
+const SadMxNFunc sad32x16_c = vpx_sad32x16_c;
+const SadMxNFunc sad16x32_c = vpx_sad16x32_c;
+const SadMxNFunc sad16x16_c = vpx_sad16x16_c;
+const SadMxNFunc sad16x8_c = vpx_sad16x8_c;
+const SadMxNFunc sad8x16_c = vpx_sad8x16_c;
+const SadMxNFunc sad8x8_c = vpx_sad8x8_c;
+const SadMxNFunc sad8x4_c = vpx_sad8x4_c;
+const SadMxNFunc sad4x8_c = vpx_sad4x8_c;
+const SadMxNFunc sad4x4_c = vpx_sad4x4_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_c = vpx_highbd_sad64x64_c;
+const SadMxNFunc highbd_sad64x32_c = vpx_highbd_sad64x32_c;
+const SadMxNFunc highbd_sad32x64_c = vpx_highbd_sad32x64_c;
+const SadMxNFunc highbd_sad32x32_c = vpx_highbd_sad32x32_c;
+const SadMxNFunc highbd_sad32x16_c = vpx_highbd_sad32x16_c;
+const SadMxNFunc highbd_sad16x32_c = vpx_highbd_sad16x32_c;
+const SadMxNFunc highbd_sad16x16_c = vpx_highbd_sad16x16_c;
+const SadMxNFunc highbd_sad16x8_c = vpx_highbd_sad16x8_c;
+const SadMxNFunc highbd_sad8x16_c = vpx_highbd_sad8x16_c;
+const SadMxNFunc highbd_sad8x8_c = vpx_highbd_sad8x8_c;
+const SadMxNFunc highbd_sad8x4_c = vpx_highbd_sad8x4_c;
+const SadMxNFunc highbd_sad4x8_c = vpx_highbd_sad4x8_c;
+const SadMxNFunc highbd_sad4x4_c = vpx_highbd_sad4x4_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam c_tests[] = {
+ make_tuple(64, 64, sad64x64_c, -1),
+ make_tuple(64, 32, sad64x32_c, -1),
+ make_tuple(32, 64, sad32x64_c, -1),
+ make_tuple(32, 32, sad32x32_c, -1),
+ make_tuple(32, 16, sad32x16_c, -1),
+ make_tuple(16, 32, sad16x32_c, -1),
+ make_tuple(16, 16, sad16x16_c, -1),
+ make_tuple(16, 8, sad16x8_c, -1),
+ make_tuple(8, 16, sad8x16_c, -1),
+ make_tuple(8, 8, sad8x8_c, -1),
+ make_tuple(8, 4, sad8x4_c, -1),
+ make_tuple(4, 8, sad4x8_c, -1),
+ make_tuple(4, 4, sad4x4_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
-#if CONFIG_VP9_ENCODER
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_c = vp9_sad64x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_c = vp9_sad64x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_c = vp9_sad32x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_c = vp9_sad32x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_c = vp9_sad32x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_c = vp9_sad16x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_c = vp9_sad16x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_c = vp9_sad16x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_c = vp9_sad8x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_c = vp9_sad8x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_c = vp9_sad8x4x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_c = vp9_sad4x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_c = vp9_sad4x4x4d_c;
-INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_c),
- make_tuple(64, 32, sad_64x32x4d_c),
- make_tuple(32, 64, sad_32x64x4d_c),
- make_tuple(32, 32, sad_32x32x4d_c),
- make_tuple(32, 16, sad_32x16x4d_c),
- make_tuple(16, 32, sad_16x32x4d_c),
- make_tuple(16, 16, sad_16x16x4d_c),
- make_tuple(16, 8, sad_16x8x4d_c),
- make_tuple(8, 16, sad_8x16x4d_c),
- make_tuple(8, 8, sad_8x8x4d_c),
- make_tuple(8, 4, sad_8x4x4d_c),
- make_tuple(4, 8, sad_4x8x4d_c),
- make_tuple(4, 4, sad_4x4x4d_c)));
-#endif // CONFIG_VP9_ENCODER
+const SadMxNAvgFunc sad64x64_avg_c = vpx_sad64x64_avg_c;
+const SadMxNAvgFunc sad64x32_avg_c = vpx_sad64x32_avg_c;
+const SadMxNAvgFunc sad32x64_avg_c = vpx_sad32x64_avg_c;
+const SadMxNAvgFunc sad32x32_avg_c = vpx_sad32x32_avg_c;
+const SadMxNAvgFunc sad32x16_avg_c = vpx_sad32x16_avg_c;
+const SadMxNAvgFunc sad16x32_avg_c = vpx_sad16x32_avg_c;
+const SadMxNAvgFunc sad16x16_avg_c = vpx_sad16x16_avg_c;
+const SadMxNAvgFunc sad16x8_avg_c = vpx_sad16x8_avg_c;
+const SadMxNAvgFunc sad8x16_avg_c = vpx_sad8x16_avg_c;
+const SadMxNAvgFunc sad8x8_avg_c = vpx_sad8x8_avg_c;
+const SadMxNAvgFunc sad8x4_avg_c = vpx_sad8x4_avg_c;
+const SadMxNAvgFunc sad4x8_avg_c = vpx_sad4x8_avg_c;
+const SadMxNAvgFunc sad4x4_avg_c = vpx_sad4x4_avg_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_c = vpx_highbd_sad64x64_avg_c;
+const SadMxNAvgFunc highbd_sad64x32_avg_c = vpx_highbd_sad64x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x64_avg_c = vpx_highbd_sad32x64_avg_c;
+const SadMxNAvgFunc highbd_sad32x32_avg_c = vpx_highbd_sad32x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x16_avg_c = vpx_highbd_sad32x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x32_avg_c = vpx_highbd_sad16x32_avg_c;
+const SadMxNAvgFunc highbd_sad16x16_avg_c = vpx_highbd_sad16x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x8_avg_c = vpx_highbd_sad16x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x16_avg_c = vpx_highbd_sad8x16_avg_c;
+const SadMxNAvgFunc highbd_sad8x8_avg_c = vpx_highbd_sad8x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x4_avg_c = vpx_highbd_sad8x4_avg_c;
+const SadMxNAvgFunc highbd_sad4x8_avg_c = vpx_highbd_sad4x8_avg_c;
+const SadMxNAvgFunc highbd_sad4x4_avg_c = vpx_highbd_sad4x4_avg_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_c_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_c, -1),
+ make_tuple(64, 32, sad64x32_avg_c, -1),
+ make_tuple(32, 64, sad32x64_avg_c, -1),
+ make_tuple(32, 32, sad32x32_avg_c, -1),
+ make_tuple(32, 16, sad32x16_avg_c, -1),
+ make_tuple(16, 32, sad16x32_avg_c, -1),
+ make_tuple(16, 16, sad16x16_avg_c, -1),
+ make_tuple(16, 8, sad16x8_avg_c, -1),
+ make_tuple(8, 16, sad8x16_avg_c, -1),
+ make_tuple(8, 8, sad8x8_avg_c, -1),
+ make_tuple(8, 4, sad8x4_avg_c, -1),
+ make_tuple(4, 8, sad4x8_avg_c, -1),
+ make_tuple(4, 4, sad4x4_avg_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
+
+const SadMxNx4Func sad64x64x4d_c = vpx_sad64x64x4d_c;
+const SadMxNx4Func sad64x32x4d_c = vpx_sad64x32x4d_c;
+const SadMxNx4Func sad32x64x4d_c = vpx_sad32x64x4d_c;
+const SadMxNx4Func sad32x32x4d_c = vpx_sad32x32x4d_c;
+const SadMxNx4Func sad32x16x4d_c = vpx_sad32x16x4d_c;
+const SadMxNx4Func sad16x32x4d_c = vpx_sad16x32x4d_c;
+const SadMxNx4Func sad16x16x4d_c = vpx_sad16x16x4d_c;
+const SadMxNx4Func sad16x8x4d_c = vpx_sad16x8x4d_c;
+const SadMxNx4Func sad8x16x4d_c = vpx_sad8x16x4d_c;
+const SadMxNx4Func sad8x8x4d_c = vpx_sad8x8x4d_c;
+const SadMxNx4Func sad8x4x4d_c = vpx_sad8x4x4d_c;
+const SadMxNx4Func sad4x8x4d_c = vpx_sad4x8x4d_c;
+const SadMxNx4Func sad4x4x4d_c = vpx_sad4x4x4d_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_c = vpx_highbd_sad64x64x4d_c;
+const SadMxNx4Func highbd_sad64x32x4d_c = vpx_highbd_sad64x32x4d_c;
+const SadMxNx4Func highbd_sad32x64x4d_c = vpx_highbd_sad32x64x4d_c;
+const SadMxNx4Func highbd_sad32x32x4d_c = vpx_highbd_sad32x32x4d_c;
+const SadMxNx4Func highbd_sad32x16x4d_c = vpx_highbd_sad32x16x4d_c;
+const SadMxNx4Func highbd_sad16x32x4d_c = vpx_highbd_sad16x32x4d_c;
+const SadMxNx4Func highbd_sad16x16x4d_c = vpx_highbd_sad16x16x4d_c;
+const SadMxNx4Func highbd_sad16x8x4d_c = vpx_highbd_sad16x8x4d_c;
+const SadMxNx4Func highbd_sad8x16x4d_c = vpx_highbd_sad8x16x4d_c;
+const SadMxNx4Func highbd_sad8x8x4d_c = vpx_highbd_sad8x8x4d_c;
+const SadMxNx4Func highbd_sad8x4x4d_c = vpx_highbd_sad8x4x4d_c;
+const SadMxNx4Func highbd_sad4x8x4d_c = vpx_highbd_sad4x8x4d_c;
+const SadMxNx4Func highbd_sad4x4x4d_c = vpx_highbd_sad4x4x4d_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_c_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_c, -1),
+ make_tuple(64, 32, sad64x32x4d_c, -1),
+ make_tuple(32, 64, sad32x64x4d_c, -1),
+ make_tuple(32, 32, sad32x32x4d_c, -1),
+ make_tuple(32, 16, sad32x16x4d_c, -1),
+ make_tuple(16, 32, sad16x32x4d_c, -1),
+ make_tuple(16, 16, sad16x16x4d_c, -1),
+ make_tuple(16, 8, sad16x8x4d_c, -1),
+ make_tuple(8, 16, sad8x16x4d_c, -1),
+ make_tuple(8, 8, sad8x8x4d_c, -1),
+ make_tuple(8, 4, sad8x4x4d_c, -1),
+ make_tuple(4, 8, sad4x8x4d_c, -1),
+ make_tuple(4, 4, sad4x4x4d_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
//------------------------------------------------------------------------------
// ARM functions
#if HAVE_MEDIA
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_armv6 = vp8_sad16x16_armv6;
-INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_armv6)));
-#endif
-#endif
+const SadMxNFunc sad16x16_media = vpx_sad16x16_media;
+const SadMxNParam media_tests[] = {
+ make_tuple(16, 16, sad16x16_media, -1),
+};
+INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::ValuesIn(media_tests));
+#endif // HAVE_MEDIA
#if HAVE_NEON
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_neon = vp8_sad16x16_neon;
-const sad_m_by_n_fn_t sad_8x16_neon = vp8_sad8x16_neon;
-const sad_m_by_n_fn_t sad_16x8_neon = vp8_sad16x8_neon;
-const sad_m_by_n_fn_t sad_8x8_neon = vp8_sad8x8_neon;
-const sad_m_by_n_fn_t sad_4x4_neon = vp8_sad4x4_neon;
-INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_neon),
- make_tuple(8, 16, sad_8x16_neon),
- make_tuple(16, 8, sad_16x8_neon),
- make_tuple(8, 8, sad_8x8_neon),
- make_tuple(4, 4, sad_4x4_neon)));
-#endif
-#endif
+const SadMxNFunc sad64x64_neon = vpx_sad64x64_neon;
+const SadMxNFunc sad32x32_neon = vpx_sad32x32_neon;
+const SadMxNFunc sad16x16_neon = vpx_sad16x16_neon;
+const SadMxNFunc sad16x8_neon = vpx_sad16x8_neon;
+const SadMxNFunc sad8x16_neon = vpx_sad8x16_neon;
+const SadMxNFunc sad8x8_neon = vpx_sad8x8_neon;
+const SadMxNFunc sad4x4_neon = vpx_sad4x4_neon;
+
+const SadMxNParam neon_tests[] = {
+ make_tuple(64, 64, sad64x64_neon, -1),
+ make_tuple(32, 32, sad32x32_neon, -1),
+ make_tuple(16, 16, sad16x16_neon, -1),
+ make_tuple(16, 8, sad16x8_neon, -1),
+ make_tuple(8, 16, sad8x16_neon, -1),
+ make_tuple(8, 8, sad8x8_neon, -1),
+ make_tuple(4, 4, sad4x4_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
+
+const SadMxNx4Func sad64x64x4d_neon = vpx_sad64x64x4d_neon;
+const SadMxNx4Func sad32x32x4d_neon = vpx_sad32x32x4d_neon;
+const SadMxNx4Func sad16x16x4d_neon = vpx_sad16x16x4d_neon;
+const SadMxNx4Param x4d_neon_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_neon, -1),
+ make_tuple(32, 32, sad32x32x4d_neon, -1),
+ make_tuple(16, 16, sad16x16x4d_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests));
+#endif // HAVE_NEON
//------------------------------------------------------------------------------
// x86 functions
#if HAVE_MMX
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx = vp8_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx = vp8_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx = vp8_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx = vp8_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx = vp8_sad4x4_mmx;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx_vp9 = vp9_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx_vp9 = vp9_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx_vp9 = vp9_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx_vp9 = vp9_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx_vp9 = vp9_sad4x4_mmx;
-#endif
-
-const sad_m_by_n_test_param_t mmx_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_mmx),
- make_tuple(8, 16, sad_8x16_mmx),
- make_tuple(16, 8, sad_16x8_mmx),
- make_tuple(8, 8, sad_8x8_mmx),
- make_tuple(4, 4, sad_4x4_mmx),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(16, 16, sad_16x16_mmx_vp9),
- make_tuple(8, 16, sad_8x16_mmx_vp9),
- make_tuple(16, 8, sad_16x8_mmx_vp9),
- make_tuple(8, 8, sad_8x8_mmx_vp9),
- make_tuple(4, 4, sad_4x4_mmx_vp9),
-#endif
+const SadMxNFunc sad16x16_mmx = vpx_sad16x16_mmx;
+const SadMxNFunc sad16x8_mmx = vpx_sad16x8_mmx;
+const SadMxNFunc sad8x16_mmx = vpx_sad8x16_mmx;
+const SadMxNFunc sad8x8_mmx = vpx_sad8x8_mmx;
+const SadMxNFunc sad4x4_mmx = vpx_sad4x4_mmx;
+const SadMxNParam mmx_tests[] = {
+ make_tuple(16, 16, sad16x16_mmx, -1),
+ make_tuple(16, 8, sad16x8_mmx, -1),
+ make_tuple(8, 16, sad8x16_mmx, -1),
+ make_tuple(8, 8, sad8x8_mmx, -1),
+ make_tuple(4, 4, sad4x4_mmx, -1),
};
INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests));
-#endif
+#endif // HAVE_MMX
#if HAVE_SSE
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_4x4_sse_vp9 = vp9_sad4x4_sse;
-const sad_m_by_n_fn_t sad_4x8_sse_vp9 = vp9_sad4x8_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::Values(
- make_tuple(4, 4, sad_4x4_sse_vp9),
- make_tuple(4, 8, sad_4x8_sse_vp9)));
+const SadMxNFunc sad4x8_sse = vpx_sad4x8_sse;
+const SadMxNFunc sad4x4_sse = vpx_sad4x4_sse;
+const SadMxNParam sse_tests[] = {
+ make_tuple(4, 8, sad4x8_sse, -1),
+ make_tuple(4, 4, sad4x4_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::ValuesIn(sse_tests));
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_sse = vp9_sad4x8x4d_sse;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse = vp9_sad4x4x4d_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::Values(
- make_tuple(4, 8, sad_4x8x4d_sse),
- make_tuple(4, 4, sad_4x4x4d_sse)));
+const SadMxNAvgFunc sad4x8_avg_sse = vpx_sad4x8_avg_sse;
+const SadMxNAvgFunc sad4x4_avg_sse = vpx_sad4x4_avg_sse;
+const SadMxNAvgParam avg_sse_tests[] = {
+ make_tuple(4, 8, sad4x8_avg_sse, -1),
+ make_tuple(4, 4, sad4x4_avg_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADavgTest, ::testing::ValuesIn(avg_sse_tests));
+
+const SadMxNx4Func sad4x8x4d_sse = vpx_sad4x8x4d_sse;
+const SadMxNx4Func sad4x4x4d_sse = vpx_sad4x4x4d_sse;
+const SadMxNx4Param x4d_sse_tests[] = {
+ make_tuple(4, 8, sad4x8x4d_sse, -1),
+ make_tuple(4, 4, sad4x4x4d_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::ValuesIn(x4d_sse_tests));
#endif // CONFIG_USE_X86INC
-#endif // CONFIG_VP9_ENCODER
#endif // HAVE_SSE
#if HAVE_SSE2
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_wmt = vp8_sad16x16_wmt;
-const sad_m_by_n_fn_t sad_8x16_wmt = vp8_sad8x16_wmt;
-const sad_m_by_n_fn_t sad_16x8_wmt = vp8_sad16x8_wmt;
-const sad_m_by_n_fn_t sad_8x8_wmt = vp8_sad8x8_wmt;
-const sad_m_by_n_fn_t sad_4x4_wmt = vp8_sad4x4_wmt;
-#endif
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_64x64_sse2_vp9 = vp9_sad64x64_sse2;
-const sad_m_by_n_fn_t sad_64x32_sse2_vp9 = vp9_sad64x32_sse2;
-const sad_m_by_n_fn_t sad_32x64_sse2_vp9 = vp9_sad32x64_sse2;
-const sad_m_by_n_fn_t sad_32x32_sse2_vp9 = vp9_sad32x32_sse2;
-const sad_m_by_n_fn_t sad_32x16_sse2_vp9 = vp9_sad32x16_sse2;
-const sad_m_by_n_fn_t sad_16x32_sse2_vp9 = vp9_sad16x32_sse2;
-const sad_m_by_n_fn_t sad_16x16_sse2_vp9 = vp9_sad16x16_sse2;
-const sad_m_by_n_fn_t sad_16x8_sse2_vp9 = vp9_sad16x8_sse2;
-const sad_m_by_n_fn_t sad_8x16_sse2_vp9 = vp9_sad8x16_sse2;
-const sad_m_by_n_fn_t sad_8x8_sse2_vp9 = vp9_sad8x8_sse2;
-const sad_m_by_n_fn_t sad_8x4_sse2_vp9 = vp9_sad8x4_sse2;
-#endif
-#endif
-const sad_m_by_n_test_param_t sse2_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_wmt),
- make_tuple(8, 16, sad_8x16_wmt),
- make_tuple(16, 8, sad_16x8_wmt),
- make_tuple(8, 8, sad_8x8_wmt),
- make_tuple(4, 4, sad_4x4_wmt),
-#endif
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
- make_tuple(64, 64, sad_64x64_sse2_vp9),
- make_tuple(64, 32, sad_64x32_sse2_vp9),
- make_tuple(32, 64, sad_32x64_sse2_vp9),
- make_tuple(32, 32, sad_32x32_sse2_vp9),
- make_tuple(32, 16, sad_32x16_sse2_vp9),
- make_tuple(16, 32, sad_16x32_sse2_vp9),
- make_tuple(16, 16, sad_16x16_sse2_vp9),
- make_tuple(16, 8, sad_16x8_sse2_vp9),
- make_tuple(8, 16, sad_8x16_sse2_vp9),
- make_tuple(8, 8, sad_8x8_sse2_vp9),
- make_tuple(8, 4, sad_8x4_sse2_vp9),
-#endif
-#endif
+const SadMxNFunc sad64x64_sse2 = vpx_sad64x64_sse2;
+const SadMxNFunc sad64x32_sse2 = vpx_sad64x32_sse2;
+const SadMxNFunc sad32x64_sse2 = vpx_sad32x64_sse2;
+const SadMxNFunc sad32x32_sse2 = vpx_sad32x32_sse2;
+const SadMxNFunc sad32x16_sse2 = vpx_sad32x16_sse2;
+const SadMxNFunc sad16x32_sse2 = vpx_sad16x32_sse2;
+const SadMxNFunc sad16x16_sse2 = vpx_sad16x16_sse2;
+const SadMxNFunc sad16x8_sse2 = vpx_sad16x8_sse2;
+const SadMxNFunc sad8x16_sse2 = vpx_sad8x16_sse2;
+const SadMxNFunc sad8x8_sse2 = vpx_sad8x8_sse2;
+const SadMxNFunc sad8x4_sse2 = vpx_sad8x4_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_sse2 = vpx_highbd_sad64x64_sse2;
+const SadMxNFunc highbd_sad64x32_sse2 = vpx_highbd_sad64x32_sse2;
+const SadMxNFunc highbd_sad32x64_sse2 = vpx_highbd_sad32x64_sse2;
+const SadMxNFunc highbd_sad32x32_sse2 = vpx_highbd_sad32x32_sse2;
+const SadMxNFunc highbd_sad32x16_sse2 = vpx_highbd_sad32x16_sse2;
+const SadMxNFunc highbd_sad16x32_sse2 = vpx_highbd_sad16x32_sse2;
+const SadMxNFunc highbd_sad16x16_sse2 = vpx_highbd_sad16x16_sse2;
+const SadMxNFunc highbd_sad16x8_sse2 = vpx_highbd_sad16x8_sse2;
+const SadMxNFunc highbd_sad8x16_sse2 = vpx_highbd_sad8x16_sse2;
+const SadMxNFunc highbd_sad8x8_sse2 = vpx_highbd_sad8x8_sse2;
+const SadMxNFunc highbd_sad8x4_sse2 = vpx_highbd_sad8x4_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_sse2, -1),
+ make_tuple(64, 32, sad64x32_sse2, -1),
+ make_tuple(32, 64, sad32x64_sse2, -1),
+ make_tuple(32, 32, sad32x32_sse2, -1),
+ make_tuple(32, 16, sad32x16_sse2, -1),
+ make_tuple(16, 32, sad16x32_sse2, -1),
+ make_tuple(16, 16, sad16x16_sse2, -1),
+ make_tuple(16, 8, sad16x8_sse2, -1),
+ make_tuple(8, 16, sad8x16_sse2, -1),
+ make_tuple(8, 8, sad8x8_sse2, -1),
+ make_tuple(8, 4, sad8x4_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_sse2 = vp9_sad64x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_sse2 = vp9_sad64x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_sse2 = vp9_sad32x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_sse2 = vp9_sad32x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_sse2 = vp9_sad32x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_sse2 = vp9_sad16x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse2 = vp9_sad16x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse2 = vp9_sad16x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse2 = vp9_sad8x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse2 = vp9_sad8x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_sse2 = vp9_sad8x4x4d_sse2;
-INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_sse2),
- make_tuple(64, 32, sad_64x32x4d_sse2),
- make_tuple(32, 64, sad_32x64x4d_sse2),
- make_tuple(32, 32, sad_32x32x4d_sse2),
- make_tuple(32, 16, sad_32x16x4d_sse2),
- make_tuple(16, 32, sad_16x32x4d_sse2),
- make_tuple(16, 16, sad_16x16x4d_sse2),
- make_tuple(16, 8, sad_16x8x4d_sse2),
- make_tuple(8, 16, sad_8x16x4d_sse2),
- make_tuple(8, 8, sad_8x8x4d_sse2),
- make_tuple(8, 4, sad_8x4x4d_sse2)));
-#endif
-#endif
-#endif
+const SadMxNAvgFunc sad64x64_avg_sse2 = vpx_sad64x64_avg_sse2;
+const SadMxNAvgFunc sad64x32_avg_sse2 = vpx_sad64x32_avg_sse2;
+const SadMxNAvgFunc sad32x64_avg_sse2 = vpx_sad32x64_avg_sse2;
+const SadMxNAvgFunc sad32x32_avg_sse2 = vpx_sad32x32_avg_sse2;
+const SadMxNAvgFunc sad32x16_avg_sse2 = vpx_sad32x16_avg_sse2;
+const SadMxNAvgFunc sad16x32_avg_sse2 = vpx_sad16x32_avg_sse2;
+const SadMxNAvgFunc sad16x16_avg_sse2 = vpx_sad16x16_avg_sse2;
+const SadMxNAvgFunc sad16x8_avg_sse2 = vpx_sad16x8_avg_sse2;
+const SadMxNAvgFunc sad8x16_avg_sse2 = vpx_sad8x16_avg_sse2;
+const SadMxNAvgFunc sad8x8_avg_sse2 = vpx_sad8x8_avg_sse2;
+const SadMxNAvgFunc sad8x4_avg_sse2 = vpx_sad8x4_avg_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_sse2 = vpx_highbd_sad64x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad64x32_avg_sse2 = vpx_highbd_sad64x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x64_avg_sse2 = vpx_highbd_sad32x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x32_avg_sse2 = vpx_highbd_sad32x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x16_avg_sse2 = vpx_highbd_sad32x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x32_avg_sse2 = vpx_highbd_sad16x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x16_avg_sse2 = vpx_highbd_sad16x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x8_avg_sse2 = vpx_highbd_sad16x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x16_avg_sse2 = vpx_highbd_sad8x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x8_avg_sse2 = vpx_highbd_sad8x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x4_avg_sse2 = vpx_highbd_sad8x4_avg_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_sse2, -1),
+ make_tuple(64, 32, sad64x32_avg_sse2, -1),
+ make_tuple(32, 64, sad32x64_avg_sse2, -1),
+ make_tuple(32, 32, sad32x32_avg_sse2, -1),
+ make_tuple(32, 16, sad32x16_avg_sse2, -1),
+ make_tuple(16, 32, sad16x32_avg_sse2, -1),
+ make_tuple(16, 16, sad16x16_avg_sse2, -1),
+ make_tuple(16, 8, sad16x8_avg_sse2, -1),
+ make_tuple(8, 16, sad8x16_avg_sse2, -1),
+ make_tuple(8, 8, sad8x8_avg_sse2, -1),
+ make_tuple(8, 4, sad8x4_avg_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
+
+const SadMxNx4Func sad64x64x4d_sse2 = vpx_sad64x64x4d_sse2;
+const SadMxNx4Func sad64x32x4d_sse2 = vpx_sad64x32x4d_sse2;
+const SadMxNx4Func sad32x64x4d_sse2 = vpx_sad32x64x4d_sse2;
+const SadMxNx4Func sad32x32x4d_sse2 = vpx_sad32x32x4d_sse2;
+const SadMxNx4Func sad32x16x4d_sse2 = vpx_sad32x16x4d_sse2;
+const SadMxNx4Func sad16x32x4d_sse2 = vpx_sad16x32x4d_sse2;
+const SadMxNx4Func sad16x16x4d_sse2 = vpx_sad16x16x4d_sse2;
+const SadMxNx4Func sad16x8x4d_sse2 = vpx_sad16x8x4d_sse2;
+const SadMxNx4Func sad8x16x4d_sse2 = vpx_sad8x16x4d_sse2;
+const SadMxNx4Func sad8x8x4d_sse2 = vpx_sad8x8x4d_sse2;
+const SadMxNx4Func sad8x4x4d_sse2 = vpx_sad8x4x4d_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_sse2 = vpx_highbd_sad64x64x4d_sse2;
+const SadMxNx4Func highbd_sad64x32x4d_sse2 = vpx_highbd_sad64x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x64x4d_sse2 = vpx_highbd_sad32x64x4d_sse2;
+const SadMxNx4Func highbd_sad32x32x4d_sse2 = vpx_highbd_sad32x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x16x4d_sse2 = vpx_highbd_sad32x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x32x4d_sse2 = vpx_highbd_sad16x32x4d_sse2;
+const SadMxNx4Func highbd_sad16x16x4d_sse2 = vpx_highbd_sad16x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x8x4d_sse2 = vpx_highbd_sad16x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x16x4d_sse2 = vpx_highbd_sad8x16x4d_sse2;
+const SadMxNx4Func highbd_sad8x8x4d_sse2 = vpx_highbd_sad8x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x4x4d_sse2 = vpx_highbd_sad8x4x4d_sse2;
+const SadMxNx4Func highbd_sad4x8x4d_sse2 = vpx_highbd_sad4x8x4d_sse2;
+const SadMxNx4Func highbd_sad4x4x4d_sse2 = vpx_highbd_sad4x4x4d_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_sse2, -1),
+ make_tuple(64, 32, sad64x32x4d_sse2, -1),
+ make_tuple(32, 64, sad32x64x4d_sse2, -1),
+ make_tuple(32, 32, sad32x32x4d_sse2, -1),
+ make_tuple(32, 16, sad32x16x4d_sse2, -1),
+ make_tuple(16, 32, sad16x32x4d_sse2, -1),
+ make_tuple(16, 16, sad16x16x4d_sse2, -1),
+ make_tuple(16, 8, sad16x8x4d_sse2, -1),
+ make_tuple(8, 16, sad8x16x4d_sse2, -1),
+ make_tuple(8, 8, sad8x8x4d_sse2, -1),
+ make_tuple(8, 4, sad8x4x4d_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSE2
#if HAVE_SSE3
-#if CONFIG_VP8_ENCODER
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse3 = vp8_sad16x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse3 = vp8_sad16x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse3 = vp8_sad8x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse3 = vp8_sad8x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse3 = vp8_sad4x4x4d_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADx4Test, ::testing::Values(
- make_tuple(16, 16, sad_16x16x4d_sse3),
- make_tuple(16, 8, sad_16x8x4d_sse3),
- make_tuple(8, 16, sad_8x16x4d_sse3),
- make_tuple(8, 8, sad_8x8x4d_sse3),
- make_tuple(4, 4, sad_4x4x4d_sse3)));
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSE3
#if HAVE_SSSE3
-#if CONFIG_USE_X86INC
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_sse3 = vp8_sad16x16_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_sse3)));
-#endif
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSSE3
+
+#if HAVE_SSE4_1
+// Only functions are x8, which do not have tests.
+#endif // HAVE_SSE4_1
+
+#if HAVE_AVX2
+const SadMxNFunc sad64x64_avx2 = vpx_sad64x64_avx2;
+const SadMxNFunc sad64x32_avx2 = vpx_sad64x32_avx2;
+const SadMxNFunc sad32x64_avx2 = vpx_sad32x64_avx2;
+const SadMxNFunc sad32x32_avx2 = vpx_sad32x32_avx2;
+const SadMxNFunc sad32x16_avx2 = vpx_sad32x16_avx2;
+const SadMxNParam avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avx2, -1),
+ make_tuple(64, 32, sad64x32_avx2, -1),
+ make_tuple(32, 64, sad32x64_avx2, -1),
+ make_tuple(32, 32, sad32x32_avx2, -1),
+ make_tuple(32, 16, sad32x16_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADTest, ::testing::ValuesIn(avx2_tests));
+
+const SadMxNAvgFunc sad64x64_avg_avx2 = vpx_sad64x64_avg_avx2;
+const SadMxNAvgFunc sad64x32_avg_avx2 = vpx_sad64x32_avg_avx2;
+const SadMxNAvgFunc sad32x64_avg_avx2 = vpx_sad32x64_avg_avx2;
+const SadMxNAvgFunc sad32x32_avg_avx2 = vpx_sad32x32_avg_avx2;
+const SadMxNAvgFunc sad32x16_avg_avx2 = vpx_sad32x16_avg_avx2;
+const SadMxNAvgParam avg_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_avx2, -1),
+ make_tuple(64, 32, sad64x32_avg_avx2, -1),
+ make_tuple(32, 64, sad32x64_avg_avx2, -1),
+ make_tuple(32, 32, sad32x32_avg_avx2, -1),
+ make_tuple(32, 16, sad32x16_avg_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests));
+
+const SadMxNx4Func sad64x64x4d_avx2 = vpx_sad64x64x4d_avx2;
+const SadMxNx4Func sad32x32x4d_avx2 = vpx_sad32x32x4d_avx2;
+const SadMxNx4Param x4d_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_avx2, -1),
+ make_tuple(32, 32, sad32x32x4d_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests));
+#endif // HAVE_AVX2
+
+//------------------------------------------------------------------------------
+// MIPS functions
+#if HAVE_MSA
+const SadMxNFunc sad64x64_msa = vpx_sad64x64_msa;
+const SadMxNFunc sad64x32_msa = vpx_sad64x32_msa;
+const SadMxNFunc sad32x64_msa = vpx_sad32x64_msa;
+const SadMxNFunc sad32x32_msa = vpx_sad32x32_msa;
+const SadMxNFunc sad32x16_msa = vpx_sad32x16_msa;
+const SadMxNFunc sad16x32_msa = vpx_sad16x32_msa;
+const SadMxNFunc sad16x16_msa = vpx_sad16x16_msa;
+const SadMxNFunc sad16x8_msa = vpx_sad16x8_msa;
+const SadMxNFunc sad8x16_msa = vpx_sad8x16_msa;
+const SadMxNFunc sad8x8_msa = vpx_sad8x8_msa;
+const SadMxNFunc sad8x4_msa = vpx_sad8x4_msa;
+const SadMxNFunc sad4x8_msa = vpx_sad4x8_msa;
+const SadMxNFunc sad4x4_msa = vpx_sad4x4_msa;
+const SadMxNParam msa_tests[] = {
+ make_tuple(64, 64, sad64x64_msa, -1),
+ make_tuple(64, 32, sad64x32_msa, -1),
+ make_tuple(32, 64, sad32x64_msa, -1),
+ make_tuple(32, 32, sad32x32_msa, -1),
+ make_tuple(32, 16, sad32x16_msa, -1),
+ make_tuple(16, 32, sad16x32_msa, -1),
+ make_tuple(16, 16, sad16x16_msa, -1),
+ make_tuple(16, 8, sad16x8_msa, -1),
+ make_tuple(8, 16, sad8x16_msa, -1),
+ make_tuple(8, 8, sad8x8_msa, -1),
+ make_tuple(8, 4, sad8x4_msa, -1),
+ make_tuple(4, 8, sad4x8_msa, -1),
+ make_tuple(4, 4, sad4x4_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests));
+
+const SadMxNAvgFunc sad64x64_avg_msa = vpx_sad64x64_avg_msa;
+const SadMxNAvgFunc sad64x32_avg_msa = vpx_sad64x32_avg_msa;
+const SadMxNAvgFunc sad32x64_avg_msa = vpx_sad32x64_avg_msa;
+const SadMxNAvgFunc sad32x32_avg_msa = vpx_sad32x32_avg_msa;
+const SadMxNAvgFunc sad32x16_avg_msa = vpx_sad32x16_avg_msa;
+const SadMxNAvgFunc sad16x32_avg_msa = vpx_sad16x32_avg_msa;
+const SadMxNAvgFunc sad16x16_avg_msa = vpx_sad16x16_avg_msa;
+const SadMxNAvgFunc sad16x8_avg_msa = vpx_sad16x8_avg_msa;
+const SadMxNAvgFunc sad8x16_avg_msa = vpx_sad8x16_avg_msa;
+const SadMxNAvgFunc sad8x8_avg_msa = vpx_sad8x8_avg_msa;
+const SadMxNAvgFunc sad8x4_avg_msa = vpx_sad8x4_avg_msa;
+const SadMxNAvgFunc sad4x8_avg_msa = vpx_sad4x8_avg_msa;
+const SadMxNAvgFunc sad4x4_avg_msa = vpx_sad4x4_avg_msa;
+const SadMxNAvgParam avg_msa_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_msa, -1),
+ make_tuple(64, 32, sad64x32_avg_msa, -1),
+ make_tuple(32, 64, sad32x64_avg_msa, -1),
+ make_tuple(32, 32, sad32x32_avg_msa, -1),
+ make_tuple(32, 16, sad32x16_avg_msa, -1),
+ make_tuple(16, 32, sad16x32_avg_msa, -1),
+ make_tuple(16, 16, sad16x16_avg_msa, -1),
+ make_tuple(16, 8, sad16x8_avg_msa, -1),
+ make_tuple(8, 16, sad8x16_avg_msa, -1),
+ make_tuple(8, 8, sad8x8_avg_msa, -1),
+ make_tuple(8, 4, sad8x4_avg_msa, -1),
+ make_tuple(4, 8, sad4x8_avg_msa, -1),
+ make_tuple(4, 4, sad4x4_avg_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests));
+
+const SadMxNx4Func sad64x64x4d_msa = vpx_sad64x64x4d_msa;
+const SadMxNx4Func sad64x32x4d_msa = vpx_sad64x32x4d_msa;
+const SadMxNx4Func sad32x64x4d_msa = vpx_sad32x64x4d_msa;
+const SadMxNx4Func sad32x32x4d_msa = vpx_sad32x32x4d_msa;
+const SadMxNx4Func sad32x16x4d_msa = vpx_sad32x16x4d_msa;
+const SadMxNx4Func sad16x32x4d_msa = vpx_sad16x32x4d_msa;
+const SadMxNx4Func sad16x16x4d_msa = vpx_sad16x16x4d_msa;
+const SadMxNx4Func sad16x8x4d_msa = vpx_sad16x8x4d_msa;
+const SadMxNx4Func sad8x16x4d_msa = vpx_sad8x16x4d_msa;
+const SadMxNx4Func sad8x8x4d_msa = vpx_sad8x8x4d_msa;
+const SadMxNx4Func sad8x4x4d_msa = vpx_sad8x4x4d_msa;
+const SadMxNx4Func sad4x8x4d_msa = vpx_sad4x8x4d_msa;
+const SadMxNx4Func sad4x4x4d_msa = vpx_sad4x4x4d_msa;
+const SadMxNx4Param x4d_msa_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_msa, -1),
+ make_tuple(64, 32, sad64x32x4d_msa, -1),
+ make_tuple(32, 64, sad32x64x4d_msa, -1),
+ make_tuple(32, 32, sad32x32x4d_msa, -1),
+ make_tuple(32, 16, sad32x16x4d_msa, -1),
+ make_tuple(16, 32, sad16x32x4d_msa, -1),
+ make_tuple(16, 16, sad16x16x4d_msa, -1),
+ make_tuple(16, 8, sad16x8x4d_msa, -1),
+ make_tuple(8, 16, sad8x16x4d_msa, -1),
+ make_tuple(8, 8, sad8x8x4d_msa, -1),
+ make_tuple(8, 4, sad8x4x4d_msa, -1),
+ make_tuple(4, 8, sad4x8x4d_msa, -1),
+ make_tuple(4, 4, sad4x4x4d_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
unsigned int reference_sad, exp_sad;
reference_sad = ReferenceSAD(max_sad);
exp_sad = SAD(max_sad);
if (reference_sad <= max_sad) {
ASSERT_EQ(exp_sad, reference_sad);
} else {
ASSERT_GE(exp_sad, reference_sad);
}
}
| null |
150,875 |
virtual uint8_t* GetReference(int block_idx) {
return reference_data_ + block_idx * kDataBlockSize;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual uint8_t* GetReference(int block_idx) {
virtual uint8_t *GetReference(int block_idx) {
#if CONFIG_VP9_HIGHBITDEPTH
if (use_high_bit_depth_)
return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
block_idx * kDataBlockSize);
#endif // CONFIG_VP9_HIGHBITDEPTH
return reference_data_ + block_idx * kDataBlockSize;
}
|
@@ -13,56 +13,74 @@
#include <limits.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#if CONFIG_VP8_ENCODER
-#include "./vp8_rtcd.h"
-#endif
-#if CONFIG_VP9_ENCODER
-#include "./vp9_rtcd.h"
-#endif
-#include "vpx_mem/vpx_mem.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_codec.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride);
+typedef std::tr1::tuple<int, int, SadMxNFunc, int> SadMxNParam;
-typedef unsigned int (*sad_m_by_n_fn_t)(const unsigned char *source_ptr,
- int source_stride,
- const unsigned char *reference_ptr,
- int reference_stride,
- unsigned int max_sad);
-typedef std::tr1::tuple<int, int, sad_m_by_n_fn_t> sad_m_by_n_test_param_t;
+typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ const uint8_t *second_pred);
+typedef std::tr1::tuple<int, int, SadMxNAvgFunc, int> SadMxNAvgParam;
-typedef void (*sad_n_by_n_by_4_fn_t)(const uint8_t *src_ptr,
- int src_stride,
- const unsigned char * const ref_ptr[],
- int ref_stride,
- unsigned int *sad_array);
-typedef std::tr1::tuple<int, int, sad_n_by_n_by_4_fn_t>
- sad_n_by_n_by_4_test_param_t;
+typedef void (*SadMxNx4Func)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[],
+ int ref_stride,
+ uint32_t *sad_array);
+typedef std::tr1::tuple<int, int, SadMxNx4Func, int> SadMxNx4Param;
using libvpx_test::ACMRandom;
namespace {
class SADTestBase : public ::testing::Test {
public:
- SADTestBase(int width, int height) : width_(width), height_(height) {}
+ SADTestBase(int width, int height, int bit_depth) :
+ width_(width), height_(height), bd_(bit_depth) {}
static void SetUpTestCase() {
- source_data_ = reinterpret_cast<uint8_t*>(
+ source_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize));
- reference_data_ = reinterpret_cast<uint8_t*>(
+ reference_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
+ second_pred8_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, 64*64));
+ source_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
+ reference_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
+ second_pred16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t)));
}
static void TearDownTestCase() {
- vpx_free(source_data_);
- source_data_ = NULL;
- vpx_free(reference_data_);
- reference_data_ = NULL;
+ vpx_free(source_data8_);
+ source_data8_ = NULL;
+ vpx_free(reference_data8_);
+ reference_data8_ = NULL;
+ vpx_free(second_pred8_);
+ second_pred8_ = NULL;
+ vpx_free(source_data16_);
+ source_data16_ = NULL;
+ vpx_free(reference_data16_);
+ reference_data16_ = NULL;
+ vpx_free(second_pred16_);
+ second_pred16_ = NULL;
}
virtual void TearDown() {
@@ -76,142 +94,335 @@
static const int kDataBufferSize = 4 * kDataBlockSize;
virtual void SetUp() {
+ if (bd_ == -1) {
+ use_high_bit_depth_ = false;
+ bit_depth_ = VPX_BITS_8;
+ source_data_ = source_data8_;
+ reference_data_ = reference_data8_;
+ second_pred_ = second_pred8_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ use_high_bit_depth_ = true;
+ bit_depth_ = static_cast<vpx_bit_depth_t>(bd_);
+ source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
+ reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
+ second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ mask_ = (1 << bit_depth_) - 1;
source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
- virtual uint8_t* GetReference(int block_idx) {
+ virtual uint8_t *GetReference(int block_idx) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_high_bit_depth_)
+ return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
+ block_idx * kDataBlockSize);
+#endif // CONFIG_VP9_HIGHBITDEPTH
return reference_data_ + block_idx * kDataBlockSize;
}
// Sum of Absolute Differences. Given two blocks, calculate the absolute
// difference between two pixels in the same relative location; accumulate.
- unsigned int ReferenceSAD(unsigned int max_sad, int block_idx = 0) {
+ unsigned int ReferenceSAD(int block_idx) {
unsigned int sad = 0;
- const uint8_t* const reference = GetReference(block_idx);
-
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- sad += abs(source_data_[h * source_stride_ + w]
- - reference[h * reference_stride_ + w]);
- }
- if (sad > max_sad) {
- break;
+ if (!use_high_bit_depth_) {
+ sad += abs(source8[h * source_stride_ + w] -
+ reference8[h * reference_stride_ + w]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ sad += abs(source16[h * source_stride_ + w] -
+ reference16[h * reference_stride_ + w]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
return sad;
}
- void FillConstant(uint8_t *data, int stride, uint8_t fill_constant) {
+ // Sum of Absolute Differences Average. Given two blocks, and a prediction
+ // calculate the absolute difference between one pixel and average of the
+ // corresponding and predicted pixels; accumulate.
+ unsigned int ReferenceSADavg(int block_idx) {
+ unsigned int sad = 0;
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+ const uint8_t *const second_pred8 = second_pred_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+ const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = fill_constant;
+ if (!use_high_bit_depth_) {
+ const int tmp = second_pred8[h * width_ + w] +
+ reference8[h * reference_stride_ + w];
+ const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source8[h * source_stride_ + w] - comp_pred);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ const int tmp = second_pred16[h * width_ + w] +
+ reference16[h * reference_stride_ + w];
+ const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source16[h * source_stride_ + w] - comp_pred);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ return sad;
+ }
+
+ void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ for (int h = 0; h < height_; ++h) {
+ for (int w = 0; w < width_; ++w) {
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = static_cast<uint8_t>(fill_constant);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = fill_constant;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
void FillRandom(uint8_t *data, int stride) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = rnd_.Rand8();
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = rnd_.Rand16() & mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
- int width_, height_;
- static uint8_t* source_data_;
+ int width_, height_, mask_, bd_;
+ vpx_bit_depth_t bit_depth_;
+ static uint8_t *source_data_;
+ static uint8_t *reference_data_;
+ static uint8_t *second_pred_;
int source_stride_;
- static uint8_t* reference_data_;
+ bool use_high_bit_depth_;
+ static uint8_t *source_data8_;
+ static uint8_t *reference_data8_;
+ static uint8_t *second_pred8_;
+ static uint16_t *source_data16_;
+ static uint16_t *reference_data16_;
+ static uint16_t *second_pred16_;
int reference_stride_;
ACMRandom rnd_;
};
-class SADTest : public SADTestBase,
- public ::testing::WithParamInterface<sad_m_by_n_test_param_t> {
+class SADx4Test
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNx4Param> {
public:
- SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
-
- protected:
- unsigned int SAD(unsigned int max_sad, int block_idx = 0) {
- unsigned int ret;
- const uint8_t* const reference = GetReference(block_idx);
-
- REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
- reference, reference_stride_,
- max_sad));
- return ret;
- }
-
- void CheckSad(unsigned int max_sad) {
- unsigned int reference_sad, exp_sad;
-
- reference_sad = ReferenceSAD(max_sad);
- exp_sad = SAD(max_sad);
-
- if (reference_sad <= max_sad) {
- ASSERT_EQ(exp_sad, reference_sad);
- } else {
- // Alternative implementations are not required to check max_sad
- ASSERT_GE(exp_sad, reference_sad);
- }
- }
-};
-
-class SADx4Test : public SADTestBase,
- public ::testing::WithParamInterface<sad_n_by_n_by_4_test_param_t> {
- public:
- SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
+ SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
protected:
void SADs(unsigned int *results) {
- const uint8_t* refs[] = {GetReference(0), GetReference(1),
- GetReference(2), GetReference(3)};
+ const uint8_t *references[] = {GetReference(0), GetReference(1),
+ GetReference(2), GetReference(3)};
- REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
- refs, reference_stride_,
- results));
+ ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
+ references, reference_stride_,
+ results));
}
void CheckSADs() {
unsigned int reference_sad, exp_sad[4];
SADs(exp_sad);
- for (int block = 0; block < 4; block++) {
- reference_sad = ReferenceSAD(UINT_MAX, block);
+ for (int block = 0; block < 4; ++block) {
+ reference_sad = ReferenceSAD(block);
- EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
+ EXPECT_EQ(reference_sad, exp_sad[block]) << "block " << block;
}
}
};
-uint8_t* SADTestBase::source_data_ = NULL;
-uint8_t* SADTestBase::reference_data_ = NULL;
+class SADTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNParam> {
+ public:
+ SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSAD(0);
+ const unsigned int exp_sad = SAD(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+class SADavgTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNAvgParam> {
+ public:
+ SADavgTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD_avg(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_,
+ second_pred_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSADavg(0);
+ const unsigned int exp_sad = SAD_avg(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+uint8_t *SADTestBase::source_data_ = NULL;
+uint8_t *SADTestBase::reference_data_ = NULL;
+uint8_t *SADTestBase::second_pred_ = NULL;
+uint8_t *SADTestBase::source_data8_ = NULL;
+uint8_t *SADTestBase::reference_data8_ = NULL;
+uint8_t *SADTestBase::second_pred8_ = NULL;
+uint16_t *SADTestBase::source_data16_ = NULL;
+uint16_t *SADTestBase::reference_data16_ = NULL;
+uint16_t *SADTestBase::second_pred16_ = NULL;
TEST_P(SADTest, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(reference_data_, reference_stride_, 255);
- CheckSad(UINT_MAX);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ CheckSAD();
+}
+
+TEST_P(SADTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, MaxRef) {
+ FillConstant(source_data_, source_stride_, 0);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+TEST_P(SADavgTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADavgTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
}
TEST_P(SADx4Test, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(GetReference(0), reference_stride_, 255);
- FillConstant(GetReference(1), reference_stride_, 255);
- FillConstant(GetReference(2), reference_stride_, 255);
- FillConstant(GetReference(3), reference_stride_, 255);
+ FillConstant(GetReference(0), reference_stride_, mask_);
+ FillConstant(GetReference(1), reference_stride_, mask_);
+ FillConstant(GetReference(2), reference_stride_, mask_);
+ FillConstant(GetReference(3), reference_stride_, mask_);
CheckSADs();
}
-TEST_P(SADTest, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(UINT_MAX);
-}
-
TEST_P(SADx4Test, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
+ FillConstant(source_data_, source_stride_, mask_);
FillConstant(GetReference(0), reference_stride_, 0);
FillConstant(GetReference(1), reference_stride_, 0);
FillConstant(GetReference(2), reference_stride_, 0);
@@ -219,15 +430,6 @@
CheckSADs();
}
-TEST_P(SADTest, ShortRef) {
- int tmp_stride = reference_stride_;
- reference_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortRef) {
int tmp_stride = reference_stride_;
reference_stride_ >>= 1;
@@ -240,17 +442,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, UnalignedRef) {
- // The reference frame, but not the source frame, may be unaligned for
- // certain types of searches.
- int tmp_stride = reference_stride_;
- reference_stride_ -= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, UnalignedRef) {
// The reference frame, but not the source frame, may be unaligned for
// certain types of searches.
@@ -265,15 +456,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, ShortSrc) {
- int tmp_stride = source_stride_;
- source_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- source_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortSrc) {
int tmp_stride = source_stride_;
source_stride_ >>= 1;
@@ -286,271 +468,743 @@
source_stride_ = tmp_stride;
}
-TEST_P(SADTest, MaxSAD) {
- // Verify that, when max_sad is set, the implementation does not return a
- // value lower than the reference.
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(128);
+TEST_P(SADx4Test, SrcAlignedByWidth) {
+ uint8_t * tmp_source_data = source_data_;
+ source_data_ += width_;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ source_data_ = tmp_source_data;
}
using std::tr1::make_tuple;
//------------------------------------------------------------------------------
// C functions
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_c = vp8_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c = vp8_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c = vp8_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c = vp8_sad8x8_c;
-const sad_m_by_n_fn_t sad_4x4_c = vp8_sad4x4_c;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_64x64_c_vp9 = vp9_sad64x64_c;
-const sad_m_by_n_fn_t sad_32x32_c_vp9 = vp9_sad32x32_c;
-const sad_m_by_n_fn_t sad_16x16_c_vp9 = vp9_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c_vp9 = vp9_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c_vp9 = vp9_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c_vp9 = vp9_sad8x8_c;
-const sad_m_by_n_fn_t sad_8x4_c_vp9 = vp9_sad8x4_c;
-const sad_m_by_n_fn_t sad_4x8_c_vp9 = vp9_sad4x8_c;
-const sad_m_by_n_fn_t sad_4x4_c_vp9 = vp9_sad4x4_c;
-#endif
-const sad_m_by_n_test_param_t c_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_c),
- make_tuple(8, 16, sad_8x16_c),
- make_tuple(16, 8, sad_16x8_c),
- make_tuple(8, 8, sad_8x8_c),
- make_tuple(4, 4, sad_4x4_c),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(64, 64, sad_64x64_c_vp9),
- make_tuple(32, 32, sad_32x32_c_vp9),
- make_tuple(16, 16, sad_16x16_c_vp9),
- make_tuple(8, 16, sad_8x16_c_vp9),
- make_tuple(16, 8, sad_16x8_c_vp9),
- make_tuple(8, 8, sad_8x8_c_vp9),
- make_tuple(8, 4, sad_8x4_c_vp9),
- make_tuple(4, 8, sad_4x8_c_vp9),
- make_tuple(4, 4, sad_4x4_c_vp9),
-#endif
+const SadMxNFunc sad64x64_c = vpx_sad64x64_c;
+const SadMxNFunc sad64x32_c = vpx_sad64x32_c;
+const SadMxNFunc sad32x64_c = vpx_sad32x64_c;
+const SadMxNFunc sad32x32_c = vpx_sad32x32_c;
+const SadMxNFunc sad32x16_c = vpx_sad32x16_c;
+const SadMxNFunc sad16x32_c = vpx_sad16x32_c;
+const SadMxNFunc sad16x16_c = vpx_sad16x16_c;
+const SadMxNFunc sad16x8_c = vpx_sad16x8_c;
+const SadMxNFunc sad8x16_c = vpx_sad8x16_c;
+const SadMxNFunc sad8x8_c = vpx_sad8x8_c;
+const SadMxNFunc sad8x4_c = vpx_sad8x4_c;
+const SadMxNFunc sad4x8_c = vpx_sad4x8_c;
+const SadMxNFunc sad4x4_c = vpx_sad4x4_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_c = vpx_highbd_sad64x64_c;
+const SadMxNFunc highbd_sad64x32_c = vpx_highbd_sad64x32_c;
+const SadMxNFunc highbd_sad32x64_c = vpx_highbd_sad32x64_c;
+const SadMxNFunc highbd_sad32x32_c = vpx_highbd_sad32x32_c;
+const SadMxNFunc highbd_sad32x16_c = vpx_highbd_sad32x16_c;
+const SadMxNFunc highbd_sad16x32_c = vpx_highbd_sad16x32_c;
+const SadMxNFunc highbd_sad16x16_c = vpx_highbd_sad16x16_c;
+const SadMxNFunc highbd_sad16x8_c = vpx_highbd_sad16x8_c;
+const SadMxNFunc highbd_sad8x16_c = vpx_highbd_sad8x16_c;
+const SadMxNFunc highbd_sad8x8_c = vpx_highbd_sad8x8_c;
+const SadMxNFunc highbd_sad8x4_c = vpx_highbd_sad8x4_c;
+const SadMxNFunc highbd_sad4x8_c = vpx_highbd_sad4x8_c;
+const SadMxNFunc highbd_sad4x4_c = vpx_highbd_sad4x4_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam c_tests[] = {
+ make_tuple(64, 64, sad64x64_c, -1),
+ make_tuple(64, 32, sad64x32_c, -1),
+ make_tuple(32, 64, sad32x64_c, -1),
+ make_tuple(32, 32, sad32x32_c, -1),
+ make_tuple(32, 16, sad32x16_c, -1),
+ make_tuple(16, 32, sad16x32_c, -1),
+ make_tuple(16, 16, sad16x16_c, -1),
+ make_tuple(16, 8, sad16x8_c, -1),
+ make_tuple(8, 16, sad8x16_c, -1),
+ make_tuple(8, 8, sad8x8_c, -1),
+ make_tuple(8, 4, sad8x4_c, -1),
+ make_tuple(4, 8, sad4x8_c, -1),
+ make_tuple(4, 4, sad4x4_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
-#if CONFIG_VP9_ENCODER
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_c = vp9_sad64x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_c = vp9_sad64x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_c = vp9_sad32x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_c = vp9_sad32x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_c = vp9_sad32x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_c = vp9_sad16x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_c = vp9_sad16x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_c = vp9_sad16x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_c = vp9_sad8x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_c = vp9_sad8x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_c = vp9_sad8x4x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_c = vp9_sad4x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_c = vp9_sad4x4x4d_c;
-INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_c),
- make_tuple(64, 32, sad_64x32x4d_c),
- make_tuple(32, 64, sad_32x64x4d_c),
- make_tuple(32, 32, sad_32x32x4d_c),
- make_tuple(32, 16, sad_32x16x4d_c),
- make_tuple(16, 32, sad_16x32x4d_c),
- make_tuple(16, 16, sad_16x16x4d_c),
- make_tuple(16, 8, sad_16x8x4d_c),
- make_tuple(8, 16, sad_8x16x4d_c),
- make_tuple(8, 8, sad_8x8x4d_c),
- make_tuple(8, 4, sad_8x4x4d_c),
- make_tuple(4, 8, sad_4x8x4d_c),
- make_tuple(4, 4, sad_4x4x4d_c)));
-#endif // CONFIG_VP9_ENCODER
+const SadMxNAvgFunc sad64x64_avg_c = vpx_sad64x64_avg_c;
+const SadMxNAvgFunc sad64x32_avg_c = vpx_sad64x32_avg_c;
+const SadMxNAvgFunc sad32x64_avg_c = vpx_sad32x64_avg_c;
+const SadMxNAvgFunc sad32x32_avg_c = vpx_sad32x32_avg_c;
+const SadMxNAvgFunc sad32x16_avg_c = vpx_sad32x16_avg_c;
+const SadMxNAvgFunc sad16x32_avg_c = vpx_sad16x32_avg_c;
+const SadMxNAvgFunc sad16x16_avg_c = vpx_sad16x16_avg_c;
+const SadMxNAvgFunc sad16x8_avg_c = vpx_sad16x8_avg_c;
+const SadMxNAvgFunc sad8x16_avg_c = vpx_sad8x16_avg_c;
+const SadMxNAvgFunc sad8x8_avg_c = vpx_sad8x8_avg_c;
+const SadMxNAvgFunc sad8x4_avg_c = vpx_sad8x4_avg_c;
+const SadMxNAvgFunc sad4x8_avg_c = vpx_sad4x8_avg_c;
+const SadMxNAvgFunc sad4x4_avg_c = vpx_sad4x4_avg_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_c = vpx_highbd_sad64x64_avg_c;
+const SadMxNAvgFunc highbd_sad64x32_avg_c = vpx_highbd_sad64x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x64_avg_c = vpx_highbd_sad32x64_avg_c;
+const SadMxNAvgFunc highbd_sad32x32_avg_c = vpx_highbd_sad32x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x16_avg_c = vpx_highbd_sad32x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x32_avg_c = vpx_highbd_sad16x32_avg_c;
+const SadMxNAvgFunc highbd_sad16x16_avg_c = vpx_highbd_sad16x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x8_avg_c = vpx_highbd_sad16x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x16_avg_c = vpx_highbd_sad8x16_avg_c;
+const SadMxNAvgFunc highbd_sad8x8_avg_c = vpx_highbd_sad8x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x4_avg_c = vpx_highbd_sad8x4_avg_c;
+const SadMxNAvgFunc highbd_sad4x8_avg_c = vpx_highbd_sad4x8_avg_c;
+const SadMxNAvgFunc highbd_sad4x4_avg_c = vpx_highbd_sad4x4_avg_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_c_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_c, -1),
+ make_tuple(64, 32, sad64x32_avg_c, -1),
+ make_tuple(32, 64, sad32x64_avg_c, -1),
+ make_tuple(32, 32, sad32x32_avg_c, -1),
+ make_tuple(32, 16, sad32x16_avg_c, -1),
+ make_tuple(16, 32, sad16x32_avg_c, -1),
+ make_tuple(16, 16, sad16x16_avg_c, -1),
+ make_tuple(16, 8, sad16x8_avg_c, -1),
+ make_tuple(8, 16, sad8x16_avg_c, -1),
+ make_tuple(8, 8, sad8x8_avg_c, -1),
+ make_tuple(8, 4, sad8x4_avg_c, -1),
+ make_tuple(4, 8, sad4x8_avg_c, -1),
+ make_tuple(4, 4, sad4x4_avg_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
+
+const SadMxNx4Func sad64x64x4d_c = vpx_sad64x64x4d_c;
+const SadMxNx4Func sad64x32x4d_c = vpx_sad64x32x4d_c;
+const SadMxNx4Func sad32x64x4d_c = vpx_sad32x64x4d_c;
+const SadMxNx4Func sad32x32x4d_c = vpx_sad32x32x4d_c;
+const SadMxNx4Func sad32x16x4d_c = vpx_sad32x16x4d_c;
+const SadMxNx4Func sad16x32x4d_c = vpx_sad16x32x4d_c;
+const SadMxNx4Func sad16x16x4d_c = vpx_sad16x16x4d_c;
+const SadMxNx4Func sad16x8x4d_c = vpx_sad16x8x4d_c;
+const SadMxNx4Func sad8x16x4d_c = vpx_sad8x16x4d_c;
+const SadMxNx4Func sad8x8x4d_c = vpx_sad8x8x4d_c;
+const SadMxNx4Func sad8x4x4d_c = vpx_sad8x4x4d_c;
+const SadMxNx4Func sad4x8x4d_c = vpx_sad4x8x4d_c;
+const SadMxNx4Func sad4x4x4d_c = vpx_sad4x4x4d_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_c = vpx_highbd_sad64x64x4d_c;
+const SadMxNx4Func highbd_sad64x32x4d_c = vpx_highbd_sad64x32x4d_c;
+const SadMxNx4Func highbd_sad32x64x4d_c = vpx_highbd_sad32x64x4d_c;
+const SadMxNx4Func highbd_sad32x32x4d_c = vpx_highbd_sad32x32x4d_c;
+const SadMxNx4Func highbd_sad32x16x4d_c = vpx_highbd_sad32x16x4d_c;
+const SadMxNx4Func highbd_sad16x32x4d_c = vpx_highbd_sad16x32x4d_c;
+const SadMxNx4Func highbd_sad16x16x4d_c = vpx_highbd_sad16x16x4d_c;
+const SadMxNx4Func highbd_sad16x8x4d_c = vpx_highbd_sad16x8x4d_c;
+const SadMxNx4Func highbd_sad8x16x4d_c = vpx_highbd_sad8x16x4d_c;
+const SadMxNx4Func highbd_sad8x8x4d_c = vpx_highbd_sad8x8x4d_c;
+const SadMxNx4Func highbd_sad8x4x4d_c = vpx_highbd_sad8x4x4d_c;
+const SadMxNx4Func highbd_sad4x8x4d_c = vpx_highbd_sad4x8x4d_c;
+const SadMxNx4Func highbd_sad4x4x4d_c = vpx_highbd_sad4x4x4d_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_c_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_c, -1),
+ make_tuple(64, 32, sad64x32x4d_c, -1),
+ make_tuple(32, 64, sad32x64x4d_c, -1),
+ make_tuple(32, 32, sad32x32x4d_c, -1),
+ make_tuple(32, 16, sad32x16x4d_c, -1),
+ make_tuple(16, 32, sad16x32x4d_c, -1),
+ make_tuple(16, 16, sad16x16x4d_c, -1),
+ make_tuple(16, 8, sad16x8x4d_c, -1),
+ make_tuple(8, 16, sad8x16x4d_c, -1),
+ make_tuple(8, 8, sad8x8x4d_c, -1),
+ make_tuple(8, 4, sad8x4x4d_c, -1),
+ make_tuple(4, 8, sad4x8x4d_c, -1),
+ make_tuple(4, 4, sad4x4x4d_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
//------------------------------------------------------------------------------
// ARM functions
#if HAVE_MEDIA
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_armv6 = vp8_sad16x16_armv6;
-INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_armv6)));
-#endif
-#endif
+const SadMxNFunc sad16x16_media = vpx_sad16x16_media;
+const SadMxNParam media_tests[] = {
+ make_tuple(16, 16, sad16x16_media, -1),
+};
+INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::ValuesIn(media_tests));
+#endif // HAVE_MEDIA
#if HAVE_NEON
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_neon = vp8_sad16x16_neon;
-const sad_m_by_n_fn_t sad_8x16_neon = vp8_sad8x16_neon;
-const sad_m_by_n_fn_t sad_16x8_neon = vp8_sad16x8_neon;
-const sad_m_by_n_fn_t sad_8x8_neon = vp8_sad8x8_neon;
-const sad_m_by_n_fn_t sad_4x4_neon = vp8_sad4x4_neon;
-INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_neon),
- make_tuple(8, 16, sad_8x16_neon),
- make_tuple(16, 8, sad_16x8_neon),
- make_tuple(8, 8, sad_8x8_neon),
- make_tuple(4, 4, sad_4x4_neon)));
-#endif
-#endif
+const SadMxNFunc sad64x64_neon = vpx_sad64x64_neon;
+const SadMxNFunc sad32x32_neon = vpx_sad32x32_neon;
+const SadMxNFunc sad16x16_neon = vpx_sad16x16_neon;
+const SadMxNFunc sad16x8_neon = vpx_sad16x8_neon;
+const SadMxNFunc sad8x16_neon = vpx_sad8x16_neon;
+const SadMxNFunc sad8x8_neon = vpx_sad8x8_neon;
+const SadMxNFunc sad4x4_neon = vpx_sad4x4_neon;
+
+const SadMxNParam neon_tests[] = {
+ make_tuple(64, 64, sad64x64_neon, -1),
+ make_tuple(32, 32, sad32x32_neon, -1),
+ make_tuple(16, 16, sad16x16_neon, -1),
+ make_tuple(16, 8, sad16x8_neon, -1),
+ make_tuple(8, 16, sad8x16_neon, -1),
+ make_tuple(8, 8, sad8x8_neon, -1),
+ make_tuple(4, 4, sad4x4_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
+
+const SadMxNx4Func sad64x64x4d_neon = vpx_sad64x64x4d_neon;
+const SadMxNx4Func sad32x32x4d_neon = vpx_sad32x32x4d_neon;
+const SadMxNx4Func sad16x16x4d_neon = vpx_sad16x16x4d_neon;
+const SadMxNx4Param x4d_neon_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_neon, -1),
+ make_tuple(32, 32, sad32x32x4d_neon, -1),
+ make_tuple(16, 16, sad16x16x4d_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests));
+#endif // HAVE_NEON
//------------------------------------------------------------------------------
// x86 functions
#if HAVE_MMX
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx = vp8_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx = vp8_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx = vp8_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx = vp8_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx = vp8_sad4x4_mmx;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx_vp9 = vp9_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx_vp9 = vp9_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx_vp9 = vp9_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx_vp9 = vp9_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx_vp9 = vp9_sad4x4_mmx;
-#endif
-
-const sad_m_by_n_test_param_t mmx_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_mmx),
- make_tuple(8, 16, sad_8x16_mmx),
- make_tuple(16, 8, sad_16x8_mmx),
- make_tuple(8, 8, sad_8x8_mmx),
- make_tuple(4, 4, sad_4x4_mmx),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(16, 16, sad_16x16_mmx_vp9),
- make_tuple(8, 16, sad_8x16_mmx_vp9),
- make_tuple(16, 8, sad_16x8_mmx_vp9),
- make_tuple(8, 8, sad_8x8_mmx_vp9),
- make_tuple(4, 4, sad_4x4_mmx_vp9),
-#endif
+const SadMxNFunc sad16x16_mmx = vpx_sad16x16_mmx;
+const SadMxNFunc sad16x8_mmx = vpx_sad16x8_mmx;
+const SadMxNFunc sad8x16_mmx = vpx_sad8x16_mmx;
+const SadMxNFunc sad8x8_mmx = vpx_sad8x8_mmx;
+const SadMxNFunc sad4x4_mmx = vpx_sad4x4_mmx;
+const SadMxNParam mmx_tests[] = {
+ make_tuple(16, 16, sad16x16_mmx, -1),
+ make_tuple(16, 8, sad16x8_mmx, -1),
+ make_tuple(8, 16, sad8x16_mmx, -1),
+ make_tuple(8, 8, sad8x8_mmx, -1),
+ make_tuple(4, 4, sad4x4_mmx, -1),
};
INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests));
-#endif
+#endif // HAVE_MMX
#if HAVE_SSE
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_4x4_sse_vp9 = vp9_sad4x4_sse;
-const sad_m_by_n_fn_t sad_4x8_sse_vp9 = vp9_sad4x8_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::Values(
- make_tuple(4, 4, sad_4x4_sse_vp9),
- make_tuple(4, 8, sad_4x8_sse_vp9)));
+const SadMxNFunc sad4x8_sse = vpx_sad4x8_sse;
+const SadMxNFunc sad4x4_sse = vpx_sad4x4_sse;
+const SadMxNParam sse_tests[] = {
+ make_tuple(4, 8, sad4x8_sse, -1),
+ make_tuple(4, 4, sad4x4_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::ValuesIn(sse_tests));
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_sse = vp9_sad4x8x4d_sse;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse = vp9_sad4x4x4d_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::Values(
- make_tuple(4, 8, sad_4x8x4d_sse),
- make_tuple(4, 4, sad_4x4x4d_sse)));
+const SadMxNAvgFunc sad4x8_avg_sse = vpx_sad4x8_avg_sse;
+const SadMxNAvgFunc sad4x4_avg_sse = vpx_sad4x4_avg_sse;
+const SadMxNAvgParam avg_sse_tests[] = {
+ make_tuple(4, 8, sad4x8_avg_sse, -1),
+ make_tuple(4, 4, sad4x4_avg_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADavgTest, ::testing::ValuesIn(avg_sse_tests));
+
+const SadMxNx4Func sad4x8x4d_sse = vpx_sad4x8x4d_sse;
+const SadMxNx4Func sad4x4x4d_sse = vpx_sad4x4x4d_sse;
+const SadMxNx4Param x4d_sse_tests[] = {
+ make_tuple(4, 8, sad4x8x4d_sse, -1),
+ make_tuple(4, 4, sad4x4x4d_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::ValuesIn(x4d_sse_tests));
#endif // CONFIG_USE_X86INC
-#endif // CONFIG_VP9_ENCODER
#endif // HAVE_SSE
#if HAVE_SSE2
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_wmt = vp8_sad16x16_wmt;
-const sad_m_by_n_fn_t sad_8x16_wmt = vp8_sad8x16_wmt;
-const sad_m_by_n_fn_t sad_16x8_wmt = vp8_sad16x8_wmt;
-const sad_m_by_n_fn_t sad_8x8_wmt = vp8_sad8x8_wmt;
-const sad_m_by_n_fn_t sad_4x4_wmt = vp8_sad4x4_wmt;
-#endif
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_64x64_sse2_vp9 = vp9_sad64x64_sse2;
-const sad_m_by_n_fn_t sad_64x32_sse2_vp9 = vp9_sad64x32_sse2;
-const sad_m_by_n_fn_t sad_32x64_sse2_vp9 = vp9_sad32x64_sse2;
-const sad_m_by_n_fn_t sad_32x32_sse2_vp9 = vp9_sad32x32_sse2;
-const sad_m_by_n_fn_t sad_32x16_sse2_vp9 = vp9_sad32x16_sse2;
-const sad_m_by_n_fn_t sad_16x32_sse2_vp9 = vp9_sad16x32_sse2;
-const sad_m_by_n_fn_t sad_16x16_sse2_vp9 = vp9_sad16x16_sse2;
-const sad_m_by_n_fn_t sad_16x8_sse2_vp9 = vp9_sad16x8_sse2;
-const sad_m_by_n_fn_t sad_8x16_sse2_vp9 = vp9_sad8x16_sse2;
-const sad_m_by_n_fn_t sad_8x8_sse2_vp9 = vp9_sad8x8_sse2;
-const sad_m_by_n_fn_t sad_8x4_sse2_vp9 = vp9_sad8x4_sse2;
-#endif
-#endif
-const sad_m_by_n_test_param_t sse2_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_wmt),
- make_tuple(8, 16, sad_8x16_wmt),
- make_tuple(16, 8, sad_16x8_wmt),
- make_tuple(8, 8, sad_8x8_wmt),
- make_tuple(4, 4, sad_4x4_wmt),
-#endif
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
- make_tuple(64, 64, sad_64x64_sse2_vp9),
- make_tuple(64, 32, sad_64x32_sse2_vp9),
- make_tuple(32, 64, sad_32x64_sse2_vp9),
- make_tuple(32, 32, sad_32x32_sse2_vp9),
- make_tuple(32, 16, sad_32x16_sse2_vp9),
- make_tuple(16, 32, sad_16x32_sse2_vp9),
- make_tuple(16, 16, sad_16x16_sse2_vp9),
- make_tuple(16, 8, sad_16x8_sse2_vp9),
- make_tuple(8, 16, sad_8x16_sse2_vp9),
- make_tuple(8, 8, sad_8x8_sse2_vp9),
- make_tuple(8, 4, sad_8x4_sse2_vp9),
-#endif
-#endif
+const SadMxNFunc sad64x64_sse2 = vpx_sad64x64_sse2;
+const SadMxNFunc sad64x32_sse2 = vpx_sad64x32_sse2;
+const SadMxNFunc sad32x64_sse2 = vpx_sad32x64_sse2;
+const SadMxNFunc sad32x32_sse2 = vpx_sad32x32_sse2;
+const SadMxNFunc sad32x16_sse2 = vpx_sad32x16_sse2;
+const SadMxNFunc sad16x32_sse2 = vpx_sad16x32_sse2;
+const SadMxNFunc sad16x16_sse2 = vpx_sad16x16_sse2;
+const SadMxNFunc sad16x8_sse2 = vpx_sad16x8_sse2;
+const SadMxNFunc sad8x16_sse2 = vpx_sad8x16_sse2;
+const SadMxNFunc sad8x8_sse2 = vpx_sad8x8_sse2;
+const SadMxNFunc sad8x4_sse2 = vpx_sad8x4_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_sse2 = vpx_highbd_sad64x64_sse2;
+const SadMxNFunc highbd_sad64x32_sse2 = vpx_highbd_sad64x32_sse2;
+const SadMxNFunc highbd_sad32x64_sse2 = vpx_highbd_sad32x64_sse2;
+const SadMxNFunc highbd_sad32x32_sse2 = vpx_highbd_sad32x32_sse2;
+const SadMxNFunc highbd_sad32x16_sse2 = vpx_highbd_sad32x16_sse2;
+const SadMxNFunc highbd_sad16x32_sse2 = vpx_highbd_sad16x32_sse2;
+const SadMxNFunc highbd_sad16x16_sse2 = vpx_highbd_sad16x16_sse2;
+const SadMxNFunc highbd_sad16x8_sse2 = vpx_highbd_sad16x8_sse2;
+const SadMxNFunc highbd_sad8x16_sse2 = vpx_highbd_sad8x16_sse2;
+const SadMxNFunc highbd_sad8x8_sse2 = vpx_highbd_sad8x8_sse2;
+const SadMxNFunc highbd_sad8x4_sse2 = vpx_highbd_sad8x4_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_sse2, -1),
+ make_tuple(64, 32, sad64x32_sse2, -1),
+ make_tuple(32, 64, sad32x64_sse2, -1),
+ make_tuple(32, 32, sad32x32_sse2, -1),
+ make_tuple(32, 16, sad32x16_sse2, -1),
+ make_tuple(16, 32, sad16x32_sse2, -1),
+ make_tuple(16, 16, sad16x16_sse2, -1),
+ make_tuple(16, 8, sad16x8_sse2, -1),
+ make_tuple(8, 16, sad8x16_sse2, -1),
+ make_tuple(8, 8, sad8x8_sse2, -1),
+ make_tuple(8, 4, sad8x4_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_sse2 = vp9_sad64x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_sse2 = vp9_sad64x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_sse2 = vp9_sad32x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_sse2 = vp9_sad32x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_sse2 = vp9_sad32x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_sse2 = vp9_sad16x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse2 = vp9_sad16x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse2 = vp9_sad16x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse2 = vp9_sad8x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse2 = vp9_sad8x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_sse2 = vp9_sad8x4x4d_sse2;
-INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_sse2),
- make_tuple(64, 32, sad_64x32x4d_sse2),
- make_tuple(32, 64, sad_32x64x4d_sse2),
- make_tuple(32, 32, sad_32x32x4d_sse2),
- make_tuple(32, 16, sad_32x16x4d_sse2),
- make_tuple(16, 32, sad_16x32x4d_sse2),
- make_tuple(16, 16, sad_16x16x4d_sse2),
- make_tuple(16, 8, sad_16x8x4d_sse2),
- make_tuple(8, 16, sad_8x16x4d_sse2),
- make_tuple(8, 8, sad_8x8x4d_sse2),
- make_tuple(8, 4, sad_8x4x4d_sse2)));
-#endif
-#endif
-#endif
+const SadMxNAvgFunc sad64x64_avg_sse2 = vpx_sad64x64_avg_sse2;
+const SadMxNAvgFunc sad64x32_avg_sse2 = vpx_sad64x32_avg_sse2;
+const SadMxNAvgFunc sad32x64_avg_sse2 = vpx_sad32x64_avg_sse2;
+const SadMxNAvgFunc sad32x32_avg_sse2 = vpx_sad32x32_avg_sse2;
+const SadMxNAvgFunc sad32x16_avg_sse2 = vpx_sad32x16_avg_sse2;
+const SadMxNAvgFunc sad16x32_avg_sse2 = vpx_sad16x32_avg_sse2;
+const SadMxNAvgFunc sad16x16_avg_sse2 = vpx_sad16x16_avg_sse2;
+const SadMxNAvgFunc sad16x8_avg_sse2 = vpx_sad16x8_avg_sse2;
+const SadMxNAvgFunc sad8x16_avg_sse2 = vpx_sad8x16_avg_sse2;
+const SadMxNAvgFunc sad8x8_avg_sse2 = vpx_sad8x8_avg_sse2;
+const SadMxNAvgFunc sad8x4_avg_sse2 = vpx_sad8x4_avg_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_sse2 = vpx_highbd_sad64x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad64x32_avg_sse2 = vpx_highbd_sad64x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x64_avg_sse2 = vpx_highbd_sad32x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x32_avg_sse2 = vpx_highbd_sad32x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x16_avg_sse2 = vpx_highbd_sad32x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x32_avg_sse2 = vpx_highbd_sad16x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x16_avg_sse2 = vpx_highbd_sad16x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x8_avg_sse2 = vpx_highbd_sad16x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x16_avg_sse2 = vpx_highbd_sad8x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x8_avg_sse2 = vpx_highbd_sad8x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x4_avg_sse2 = vpx_highbd_sad8x4_avg_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_sse2, -1),
+ make_tuple(64, 32, sad64x32_avg_sse2, -1),
+ make_tuple(32, 64, sad32x64_avg_sse2, -1),
+ make_tuple(32, 32, sad32x32_avg_sse2, -1),
+ make_tuple(32, 16, sad32x16_avg_sse2, -1),
+ make_tuple(16, 32, sad16x32_avg_sse2, -1),
+ make_tuple(16, 16, sad16x16_avg_sse2, -1),
+ make_tuple(16, 8, sad16x8_avg_sse2, -1),
+ make_tuple(8, 16, sad8x16_avg_sse2, -1),
+ make_tuple(8, 8, sad8x8_avg_sse2, -1),
+ make_tuple(8, 4, sad8x4_avg_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
+
+const SadMxNx4Func sad64x64x4d_sse2 = vpx_sad64x64x4d_sse2;
+const SadMxNx4Func sad64x32x4d_sse2 = vpx_sad64x32x4d_sse2;
+const SadMxNx4Func sad32x64x4d_sse2 = vpx_sad32x64x4d_sse2;
+const SadMxNx4Func sad32x32x4d_sse2 = vpx_sad32x32x4d_sse2;
+const SadMxNx4Func sad32x16x4d_sse2 = vpx_sad32x16x4d_sse2;
+const SadMxNx4Func sad16x32x4d_sse2 = vpx_sad16x32x4d_sse2;
+const SadMxNx4Func sad16x16x4d_sse2 = vpx_sad16x16x4d_sse2;
+const SadMxNx4Func sad16x8x4d_sse2 = vpx_sad16x8x4d_sse2;
+const SadMxNx4Func sad8x16x4d_sse2 = vpx_sad8x16x4d_sse2;
+const SadMxNx4Func sad8x8x4d_sse2 = vpx_sad8x8x4d_sse2;
+const SadMxNx4Func sad8x4x4d_sse2 = vpx_sad8x4x4d_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_sse2 = vpx_highbd_sad64x64x4d_sse2;
+const SadMxNx4Func highbd_sad64x32x4d_sse2 = vpx_highbd_sad64x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x64x4d_sse2 = vpx_highbd_sad32x64x4d_sse2;
+const SadMxNx4Func highbd_sad32x32x4d_sse2 = vpx_highbd_sad32x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x16x4d_sse2 = vpx_highbd_sad32x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x32x4d_sse2 = vpx_highbd_sad16x32x4d_sse2;
+const SadMxNx4Func highbd_sad16x16x4d_sse2 = vpx_highbd_sad16x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x8x4d_sse2 = vpx_highbd_sad16x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x16x4d_sse2 = vpx_highbd_sad8x16x4d_sse2;
+const SadMxNx4Func highbd_sad8x8x4d_sse2 = vpx_highbd_sad8x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x4x4d_sse2 = vpx_highbd_sad8x4x4d_sse2;
+const SadMxNx4Func highbd_sad4x8x4d_sse2 = vpx_highbd_sad4x8x4d_sse2;
+const SadMxNx4Func highbd_sad4x4x4d_sse2 = vpx_highbd_sad4x4x4d_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_sse2, -1),
+ make_tuple(64, 32, sad64x32x4d_sse2, -1),
+ make_tuple(32, 64, sad32x64x4d_sse2, -1),
+ make_tuple(32, 32, sad32x32x4d_sse2, -1),
+ make_tuple(32, 16, sad32x16x4d_sse2, -1),
+ make_tuple(16, 32, sad16x32x4d_sse2, -1),
+ make_tuple(16, 16, sad16x16x4d_sse2, -1),
+ make_tuple(16, 8, sad16x8x4d_sse2, -1),
+ make_tuple(8, 16, sad8x16x4d_sse2, -1),
+ make_tuple(8, 8, sad8x8x4d_sse2, -1),
+ make_tuple(8, 4, sad8x4x4d_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSE2
#if HAVE_SSE3
-#if CONFIG_VP8_ENCODER
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse3 = vp8_sad16x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse3 = vp8_sad16x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse3 = vp8_sad8x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse3 = vp8_sad8x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse3 = vp8_sad4x4x4d_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADx4Test, ::testing::Values(
- make_tuple(16, 16, sad_16x16x4d_sse3),
- make_tuple(16, 8, sad_16x8x4d_sse3),
- make_tuple(8, 16, sad_8x16x4d_sse3),
- make_tuple(8, 8, sad_8x8x4d_sse3),
- make_tuple(4, 4, sad_4x4x4d_sse3)));
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSE3
#if HAVE_SSSE3
-#if CONFIG_USE_X86INC
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_sse3 = vp8_sad16x16_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_sse3)));
-#endif
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSSE3
+
+#if HAVE_SSE4_1
+// Only functions are x8, which do not have tests.
+#endif // HAVE_SSE4_1
+
+#if HAVE_AVX2
+const SadMxNFunc sad64x64_avx2 = vpx_sad64x64_avx2;
+const SadMxNFunc sad64x32_avx2 = vpx_sad64x32_avx2;
+const SadMxNFunc sad32x64_avx2 = vpx_sad32x64_avx2;
+const SadMxNFunc sad32x32_avx2 = vpx_sad32x32_avx2;
+const SadMxNFunc sad32x16_avx2 = vpx_sad32x16_avx2;
+const SadMxNParam avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avx2, -1),
+ make_tuple(64, 32, sad64x32_avx2, -1),
+ make_tuple(32, 64, sad32x64_avx2, -1),
+ make_tuple(32, 32, sad32x32_avx2, -1),
+ make_tuple(32, 16, sad32x16_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADTest, ::testing::ValuesIn(avx2_tests));
+
+const SadMxNAvgFunc sad64x64_avg_avx2 = vpx_sad64x64_avg_avx2;
+const SadMxNAvgFunc sad64x32_avg_avx2 = vpx_sad64x32_avg_avx2;
+const SadMxNAvgFunc sad32x64_avg_avx2 = vpx_sad32x64_avg_avx2;
+const SadMxNAvgFunc sad32x32_avg_avx2 = vpx_sad32x32_avg_avx2;
+const SadMxNAvgFunc sad32x16_avg_avx2 = vpx_sad32x16_avg_avx2;
+const SadMxNAvgParam avg_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_avx2, -1),
+ make_tuple(64, 32, sad64x32_avg_avx2, -1),
+ make_tuple(32, 64, sad32x64_avg_avx2, -1),
+ make_tuple(32, 32, sad32x32_avg_avx2, -1),
+ make_tuple(32, 16, sad32x16_avg_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests));
+
+const SadMxNx4Func sad64x64x4d_avx2 = vpx_sad64x64x4d_avx2;
+const SadMxNx4Func sad32x32x4d_avx2 = vpx_sad32x32x4d_avx2;
+const SadMxNx4Param x4d_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_avx2, -1),
+ make_tuple(32, 32, sad32x32x4d_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests));
+#endif // HAVE_AVX2
+
+//------------------------------------------------------------------------------
+// MIPS functions
+#if HAVE_MSA
+const SadMxNFunc sad64x64_msa = vpx_sad64x64_msa;
+const SadMxNFunc sad64x32_msa = vpx_sad64x32_msa;
+const SadMxNFunc sad32x64_msa = vpx_sad32x64_msa;
+const SadMxNFunc sad32x32_msa = vpx_sad32x32_msa;
+const SadMxNFunc sad32x16_msa = vpx_sad32x16_msa;
+const SadMxNFunc sad16x32_msa = vpx_sad16x32_msa;
+const SadMxNFunc sad16x16_msa = vpx_sad16x16_msa;
+const SadMxNFunc sad16x8_msa = vpx_sad16x8_msa;
+const SadMxNFunc sad8x16_msa = vpx_sad8x16_msa;
+const SadMxNFunc sad8x8_msa = vpx_sad8x8_msa;
+const SadMxNFunc sad8x4_msa = vpx_sad8x4_msa;
+const SadMxNFunc sad4x8_msa = vpx_sad4x8_msa;
+const SadMxNFunc sad4x4_msa = vpx_sad4x4_msa;
+const SadMxNParam msa_tests[] = {
+ make_tuple(64, 64, sad64x64_msa, -1),
+ make_tuple(64, 32, sad64x32_msa, -1),
+ make_tuple(32, 64, sad32x64_msa, -1),
+ make_tuple(32, 32, sad32x32_msa, -1),
+ make_tuple(32, 16, sad32x16_msa, -1),
+ make_tuple(16, 32, sad16x32_msa, -1),
+ make_tuple(16, 16, sad16x16_msa, -1),
+ make_tuple(16, 8, sad16x8_msa, -1),
+ make_tuple(8, 16, sad8x16_msa, -1),
+ make_tuple(8, 8, sad8x8_msa, -1),
+ make_tuple(8, 4, sad8x4_msa, -1),
+ make_tuple(4, 8, sad4x8_msa, -1),
+ make_tuple(4, 4, sad4x4_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests));
+
+const SadMxNAvgFunc sad64x64_avg_msa = vpx_sad64x64_avg_msa;
+const SadMxNAvgFunc sad64x32_avg_msa = vpx_sad64x32_avg_msa;
+const SadMxNAvgFunc sad32x64_avg_msa = vpx_sad32x64_avg_msa;
+const SadMxNAvgFunc sad32x32_avg_msa = vpx_sad32x32_avg_msa;
+const SadMxNAvgFunc sad32x16_avg_msa = vpx_sad32x16_avg_msa;
+const SadMxNAvgFunc sad16x32_avg_msa = vpx_sad16x32_avg_msa;
+const SadMxNAvgFunc sad16x16_avg_msa = vpx_sad16x16_avg_msa;
+const SadMxNAvgFunc sad16x8_avg_msa = vpx_sad16x8_avg_msa;
+const SadMxNAvgFunc sad8x16_avg_msa = vpx_sad8x16_avg_msa;
+const SadMxNAvgFunc sad8x8_avg_msa = vpx_sad8x8_avg_msa;
+const SadMxNAvgFunc sad8x4_avg_msa = vpx_sad8x4_avg_msa;
+const SadMxNAvgFunc sad4x8_avg_msa = vpx_sad4x8_avg_msa;
+const SadMxNAvgFunc sad4x4_avg_msa = vpx_sad4x4_avg_msa;
+const SadMxNAvgParam avg_msa_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_msa, -1),
+ make_tuple(64, 32, sad64x32_avg_msa, -1),
+ make_tuple(32, 64, sad32x64_avg_msa, -1),
+ make_tuple(32, 32, sad32x32_avg_msa, -1),
+ make_tuple(32, 16, sad32x16_avg_msa, -1),
+ make_tuple(16, 32, sad16x32_avg_msa, -1),
+ make_tuple(16, 16, sad16x16_avg_msa, -1),
+ make_tuple(16, 8, sad16x8_avg_msa, -1),
+ make_tuple(8, 16, sad8x16_avg_msa, -1),
+ make_tuple(8, 8, sad8x8_avg_msa, -1),
+ make_tuple(8, 4, sad8x4_avg_msa, -1),
+ make_tuple(4, 8, sad4x8_avg_msa, -1),
+ make_tuple(4, 4, sad4x4_avg_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests));
+
+const SadMxNx4Func sad64x64x4d_msa = vpx_sad64x64x4d_msa;
+const SadMxNx4Func sad64x32x4d_msa = vpx_sad64x32x4d_msa;
+const SadMxNx4Func sad32x64x4d_msa = vpx_sad32x64x4d_msa;
+const SadMxNx4Func sad32x32x4d_msa = vpx_sad32x32x4d_msa;
+const SadMxNx4Func sad32x16x4d_msa = vpx_sad32x16x4d_msa;
+const SadMxNx4Func sad16x32x4d_msa = vpx_sad16x32x4d_msa;
+const SadMxNx4Func sad16x16x4d_msa = vpx_sad16x16x4d_msa;
+const SadMxNx4Func sad16x8x4d_msa = vpx_sad16x8x4d_msa;
+const SadMxNx4Func sad8x16x4d_msa = vpx_sad8x16x4d_msa;
+const SadMxNx4Func sad8x8x4d_msa = vpx_sad8x8x4d_msa;
+const SadMxNx4Func sad8x4x4d_msa = vpx_sad8x4x4d_msa;
+const SadMxNx4Func sad4x8x4d_msa = vpx_sad4x8x4d_msa;
+const SadMxNx4Func sad4x4x4d_msa = vpx_sad4x4x4d_msa;
+const SadMxNx4Param x4d_msa_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_msa, -1),
+ make_tuple(64, 32, sad64x32x4d_msa, -1),
+ make_tuple(32, 64, sad32x64x4d_msa, -1),
+ make_tuple(32, 32, sad32x32x4d_msa, -1),
+ make_tuple(32, 16, sad32x16x4d_msa, -1),
+ make_tuple(16, 32, sad16x32x4d_msa, -1),
+ make_tuple(16, 16, sad16x16x4d_msa, -1),
+ make_tuple(16, 8, sad16x8x4d_msa, -1),
+ make_tuple(8, 16, sad8x16x4d_msa, -1),
+ make_tuple(8, 8, sad8x8x4d_msa, -1),
+ make_tuple(8, 4, sad8x4x4d_msa, -1),
+ make_tuple(4, 8, sad4x8x4d_msa, -1),
+ make_tuple(4, 4, sad4x4x4d_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
| null |
virtual uint8_t *GetReference(int block_idx) {
#if CONFIG_VP9_HIGHBITDEPTH
if (use_high_bit_depth_)
return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
block_idx * kDataBlockSize);
#endif // CONFIG_VP9_HIGHBITDEPTH
|
150,876 |
void SADs(unsigned int *results) {
const uint8_t* refs[] = {GetReference(0), GetReference(1),
GetReference(2), GetReference(3)};
REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
refs, reference_stride_,
results));
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void SADs(unsigned int *results) {
const uint8_t *references[] = {GetReference(0), GetReference(1),
GetReference(2), GetReference(3)};
ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
references, reference_stride_,
results));
}
|
@@ -13,56 +13,74 @@
#include <limits.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#if CONFIG_VP8_ENCODER
-#include "./vp8_rtcd.h"
-#endif
-#if CONFIG_VP9_ENCODER
-#include "./vp9_rtcd.h"
-#endif
-#include "vpx_mem/vpx_mem.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_codec.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride);
+typedef std::tr1::tuple<int, int, SadMxNFunc, int> SadMxNParam;
-typedef unsigned int (*sad_m_by_n_fn_t)(const unsigned char *source_ptr,
- int source_stride,
- const unsigned char *reference_ptr,
- int reference_stride,
- unsigned int max_sad);
-typedef std::tr1::tuple<int, int, sad_m_by_n_fn_t> sad_m_by_n_test_param_t;
+typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ const uint8_t *second_pred);
+typedef std::tr1::tuple<int, int, SadMxNAvgFunc, int> SadMxNAvgParam;
-typedef void (*sad_n_by_n_by_4_fn_t)(const uint8_t *src_ptr,
- int src_stride,
- const unsigned char * const ref_ptr[],
- int ref_stride,
- unsigned int *sad_array);
-typedef std::tr1::tuple<int, int, sad_n_by_n_by_4_fn_t>
- sad_n_by_n_by_4_test_param_t;
+typedef void (*SadMxNx4Func)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[],
+ int ref_stride,
+ uint32_t *sad_array);
+typedef std::tr1::tuple<int, int, SadMxNx4Func, int> SadMxNx4Param;
using libvpx_test::ACMRandom;
namespace {
class SADTestBase : public ::testing::Test {
public:
- SADTestBase(int width, int height) : width_(width), height_(height) {}
+ SADTestBase(int width, int height, int bit_depth) :
+ width_(width), height_(height), bd_(bit_depth) {}
static void SetUpTestCase() {
- source_data_ = reinterpret_cast<uint8_t*>(
+ source_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize));
- reference_data_ = reinterpret_cast<uint8_t*>(
+ reference_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
+ second_pred8_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, 64*64));
+ source_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
+ reference_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
+ second_pred16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t)));
}
static void TearDownTestCase() {
- vpx_free(source_data_);
- source_data_ = NULL;
- vpx_free(reference_data_);
- reference_data_ = NULL;
+ vpx_free(source_data8_);
+ source_data8_ = NULL;
+ vpx_free(reference_data8_);
+ reference_data8_ = NULL;
+ vpx_free(second_pred8_);
+ second_pred8_ = NULL;
+ vpx_free(source_data16_);
+ source_data16_ = NULL;
+ vpx_free(reference_data16_);
+ reference_data16_ = NULL;
+ vpx_free(second_pred16_);
+ second_pred16_ = NULL;
}
virtual void TearDown() {
@@ -76,142 +94,335 @@
static const int kDataBufferSize = 4 * kDataBlockSize;
virtual void SetUp() {
+ if (bd_ == -1) {
+ use_high_bit_depth_ = false;
+ bit_depth_ = VPX_BITS_8;
+ source_data_ = source_data8_;
+ reference_data_ = reference_data8_;
+ second_pred_ = second_pred8_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ use_high_bit_depth_ = true;
+ bit_depth_ = static_cast<vpx_bit_depth_t>(bd_);
+ source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
+ reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
+ second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ mask_ = (1 << bit_depth_) - 1;
source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
- virtual uint8_t* GetReference(int block_idx) {
+ virtual uint8_t *GetReference(int block_idx) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_high_bit_depth_)
+ return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
+ block_idx * kDataBlockSize);
+#endif // CONFIG_VP9_HIGHBITDEPTH
return reference_data_ + block_idx * kDataBlockSize;
}
// Sum of Absolute Differences. Given two blocks, calculate the absolute
// difference between two pixels in the same relative location; accumulate.
- unsigned int ReferenceSAD(unsigned int max_sad, int block_idx = 0) {
+ unsigned int ReferenceSAD(int block_idx) {
unsigned int sad = 0;
- const uint8_t* const reference = GetReference(block_idx);
-
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- sad += abs(source_data_[h * source_stride_ + w]
- - reference[h * reference_stride_ + w]);
- }
- if (sad > max_sad) {
- break;
+ if (!use_high_bit_depth_) {
+ sad += abs(source8[h * source_stride_ + w] -
+ reference8[h * reference_stride_ + w]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ sad += abs(source16[h * source_stride_ + w] -
+ reference16[h * reference_stride_ + w]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
return sad;
}
- void FillConstant(uint8_t *data, int stride, uint8_t fill_constant) {
+ // Sum of Absolute Differences Average. Given two blocks, and a prediction
+ // calculate the absolute difference between one pixel and average of the
+ // corresponding and predicted pixels; accumulate.
+ unsigned int ReferenceSADavg(int block_idx) {
+ unsigned int sad = 0;
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+ const uint8_t *const second_pred8 = second_pred_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+ const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = fill_constant;
+ if (!use_high_bit_depth_) {
+ const int tmp = second_pred8[h * width_ + w] +
+ reference8[h * reference_stride_ + w];
+ const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source8[h * source_stride_ + w] - comp_pred);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ const int tmp = second_pred16[h * width_ + w] +
+ reference16[h * reference_stride_ + w];
+ const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source16[h * source_stride_ + w] - comp_pred);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ return sad;
+ }
+
+ void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ for (int h = 0; h < height_; ++h) {
+ for (int w = 0; w < width_; ++w) {
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = static_cast<uint8_t>(fill_constant);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = fill_constant;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
void FillRandom(uint8_t *data, int stride) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = rnd_.Rand8();
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = rnd_.Rand16() & mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
- int width_, height_;
- static uint8_t* source_data_;
+ int width_, height_, mask_, bd_;
+ vpx_bit_depth_t bit_depth_;
+ static uint8_t *source_data_;
+ static uint8_t *reference_data_;
+ static uint8_t *second_pred_;
int source_stride_;
- static uint8_t* reference_data_;
+ bool use_high_bit_depth_;
+ static uint8_t *source_data8_;
+ static uint8_t *reference_data8_;
+ static uint8_t *second_pred8_;
+ static uint16_t *source_data16_;
+ static uint16_t *reference_data16_;
+ static uint16_t *second_pred16_;
int reference_stride_;
ACMRandom rnd_;
};
-class SADTest : public SADTestBase,
- public ::testing::WithParamInterface<sad_m_by_n_test_param_t> {
+class SADx4Test
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNx4Param> {
public:
- SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
-
- protected:
- unsigned int SAD(unsigned int max_sad, int block_idx = 0) {
- unsigned int ret;
- const uint8_t* const reference = GetReference(block_idx);
-
- REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
- reference, reference_stride_,
- max_sad));
- return ret;
- }
-
- void CheckSad(unsigned int max_sad) {
- unsigned int reference_sad, exp_sad;
-
- reference_sad = ReferenceSAD(max_sad);
- exp_sad = SAD(max_sad);
-
- if (reference_sad <= max_sad) {
- ASSERT_EQ(exp_sad, reference_sad);
- } else {
- // Alternative implementations are not required to check max_sad
- ASSERT_GE(exp_sad, reference_sad);
- }
- }
-};
-
-class SADx4Test : public SADTestBase,
- public ::testing::WithParamInterface<sad_n_by_n_by_4_test_param_t> {
- public:
- SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
+ SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
protected:
void SADs(unsigned int *results) {
- const uint8_t* refs[] = {GetReference(0), GetReference(1),
- GetReference(2), GetReference(3)};
+ const uint8_t *references[] = {GetReference(0), GetReference(1),
+ GetReference(2), GetReference(3)};
- REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
- refs, reference_stride_,
- results));
+ ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
+ references, reference_stride_,
+ results));
}
void CheckSADs() {
unsigned int reference_sad, exp_sad[4];
SADs(exp_sad);
- for (int block = 0; block < 4; block++) {
- reference_sad = ReferenceSAD(UINT_MAX, block);
+ for (int block = 0; block < 4; ++block) {
+ reference_sad = ReferenceSAD(block);
- EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
+ EXPECT_EQ(reference_sad, exp_sad[block]) << "block " << block;
}
}
};
-uint8_t* SADTestBase::source_data_ = NULL;
-uint8_t* SADTestBase::reference_data_ = NULL;
+class SADTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNParam> {
+ public:
+ SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSAD(0);
+ const unsigned int exp_sad = SAD(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+class SADavgTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNAvgParam> {
+ public:
+ SADavgTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD_avg(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_,
+ second_pred_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSADavg(0);
+ const unsigned int exp_sad = SAD_avg(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+uint8_t *SADTestBase::source_data_ = NULL;
+uint8_t *SADTestBase::reference_data_ = NULL;
+uint8_t *SADTestBase::second_pred_ = NULL;
+uint8_t *SADTestBase::source_data8_ = NULL;
+uint8_t *SADTestBase::reference_data8_ = NULL;
+uint8_t *SADTestBase::second_pred8_ = NULL;
+uint16_t *SADTestBase::source_data16_ = NULL;
+uint16_t *SADTestBase::reference_data16_ = NULL;
+uint16_t *SADTestBase::second_pred16_ = NULL;
TEST_P(SADTest, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(reference_data_, reference_stride_, 255);
- CheckSad(UINT_MAX);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ CheckSAD();
+}
+
+TEST_P(SADTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, MaxRef) {
+ FillConstant(source_data_, source_stride_, 0);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+TEST_P(SADavgTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADavgTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
}
TEST_P(SADx4Test, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(GetReference(0), reference_stride_, 255);
- FillConstant(GetReference(1), reference_stride_, 255);
- FillConstant(GetReference(2), reference_stride_, 255);
- FillConstant(GetReference(3), reference_stride_, 255);
+ FillConstant(GetReference(0), reference_stride_, mask_);
+ FillConstant(GetReference(1), reference_stride_, mask_);
+ FillConstant(GetReference(2), reference_stride_, mask_);
+ FillConstant(GetReference(3), reference_stride_, mask_);
CheckSADs();
}
-TEST_P(SADTest, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(UINT_MAX);
-}
-
TEST_P(SADx4Test, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
+ FillConstant(source_data_, source_stride_, mask_);
FillConstant(GetReference(0), reference_stride_, 0);
FillConstant(GetReference(1), reference_stride_, 0);
FillConstant(GetReference(2), reference_stride_, 0);
@@ -219,15 +430,6 @@
CheckSADs();
}
-TEST_P(SADTest, ShortRef) {
- int tmp_stride = reference_stride_;
- reference_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortRef) {
int tmp_stride = reference_stride_;
reference_stride_ >>= 1;
@@ -240,17 +442,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, UnalignedRef) {
- // The reference frame, but not the source frame, may be unaligned for
- // certain types of searches.
- int tmp_stride = reference_stride_;
- reference_stride_ -= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, UnalignedRef) {
// The reference frame, but not the source frame, may be unaligned for
// certain types of searches.
@@ -265,15 +456,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, ShortSrc) {
- int tmp_stride = source_stride_;
- source_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- source_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortSrc) {
int tmp_stride = source_stride_;
source_stride_ >>= 1;
@@ -286,271 +468,743 @@
source_stride_ = tmp_stride;
}
-TEST_P(SADTest, MaxSAD) {
- // Verify that, when max_sad is set, the implementation does not return a
- // value lower than the reference.
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(128);
+TEST_P(SADx4Test, SrcAlignedByWidth) {
+ uint8_t * tmp_source_data = source_data_;
+ source_data_ += width_;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ source_data_ = tmp_source_data;
}
using std::tr1::make_tuple;
//------------------------------------------------------------------------------
// C functions
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_c = vp8_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c = vp8_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c = vp8_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c = vp8_sad8x8_c;
-const sad_m_by_n_fn_t sad_4x4_c = vp8_sad4x4_c;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_64x64_c_vp9 = vp9_sad64x64_c;
-const sad_m_by_n_fn_t sad_32x32_c_vp9 = vp9_sad32x32_c;
-const sad_m_by_n_fn_t sad_16x16_c_vp9 = vp9_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c_vp9 = vp9_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c_vp9 = vp9_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c_vp9 = vp9_sad8x8_c;
-const sad_m_by_n_fn_t sad_8x4_c_vp9 = vp9_sad8x4_c;
-const sad_m_by_n_fn_t sad_4x8_c_vp9 = vp9_sad4x8_c;
-const sad_m_by_n_fn_t sad_4x4_c_vp9 = vp9_sad4x4_c;
-#endif
-const sad_m_by_n_test_param_t c_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_c),
- make_tuple(8, 16, sad_8x16_c),
- make_tuple(16, 8, sad_16x8_c),
- make_tuple(8, 8, sad_8x8_c),
- make_tuple(4, 4, sad_4x4_c),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(64, 64, sad_64x64_c_vp9),
- make_tuple(32, 32, sad_32x32_c_vp9),
- make_tuple(16, 16, sad_16x16_c_vp9),
- make_tuple(8, 16, sad_8x16_c_vp9),
- make_tuple(16, 8, sad_16x8_c_vp9),
- make_tuple(8, 8, sad_8x8_c_vp9),
- make_tuple(8, 4, sad_8x4_c_vp9),
- make_tuple(4, 8, sad_4x8_c_vp9),
- make_tuple(4, 4, sad_4x4_c_vp9),
-#endif
+const SadMxNFunc sad64x64_c = vpx_sad64x64_c;
+const SadMxNFunc sad64x32_c = vpx_sad64x32_c;
+const SadMxNFunc sad32x64_c = vpx_sad32x64_c;
+const SadMxNFunc sad32x32_c = vpx_sad32x32_c;
+const SadMxNFunc sad32x16_c = vpx_sad32x16_c;
+const SadMxNFunc sad16x32_c = vpx_sad16x32_c;
+const SadMxNFunc sad16x16_c = vpx_sad16x16_c;
+const SadMxNFunc sad16x8_c = vpx_sad16x8_c;
+const SadMxNFunc sad8x16_c = vpx_sad8x16_c;
+const SadMxNFunc sad8x8_c = vpx_sad8x8_c;
+const SadMxNFunc sad8x4_c = vpx_sad8x4_c;
+const SadMxNFunc sad4x8_c = vpx_sad4x8_c;
+const SadMxNFunc sad4x4_c = vpx_sad4x4_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_c = vpx_highbd_sad64x64_c;
+const SadMxNFunc highbd_sad64x32_c = vpx_highbd_sad64x32_c;
+const SadMxNFunc highbd_sad32x64_c = vpx_highbd_sad32x64_c;
+const SadMxNFunc highbd_sad32x32_c = vpx_highbd_sad32x32_c;
+const SadMxNFunc highbd_sad32x16_c = vpx_highbd_sad32x16_c;
+const SadMxNFunc highbd_sad16x32_c = vpx_highbd_sad16x32_c;
+const SadMxNFunc highbd_sad16x16_c = vpx_highbd_sad16x16_c;
+const SadMxNFunc highbd_sad16x8_c = vpx_highbd_sad16x8_c;
+const SadMxNFunc highbd_sad8x16_c = vpx_highbd_sad8x16_c;
+const SadMxNFunc highbd_sad8x8_c = vpx_highbd_sad8x8_c;
+const SadMxNFunc highbd_sad8x4_c = vpx_highbd_sad8x4_c;
+const SadMxNFunc highbd_sad4x8_c = vpx_highbd_sad4x8_c;
+const SadMxNFunc highbd_sad4x4_c = vpx_highbd_sad4x4_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam c_tests[] = {
+ make_tuple(64, 64, sad64x64_c, -1),
+ make_tuple(64, 32, sad64x32_c, -1),
+ make_tuple(32, 64, sad32x64_c, -1),
+ make_tuple(32, 32, sad32x32_c, -1),
+ make_tuple(32, 16, sad32x16_c, -1),
+ make_tuple(16, 32, sad16x32_c, -1),
+ make_tuple(16, 16, sad16x16_c, -1),
+ make_tuple(16, 8, sad16x8_c, -1),
+ make_tuple(8, 16, sad8x16_c, -1),
+ make_tuple(8, 8, sad8x8_c, -1),
+ make_tuple(8, 4, sad8x4_c, -1),
+ make_tuple(4, 8, sad4x8_c, -1),
+ make_tuple(4, 4, sad4x4_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
-#if CONFIG_VP9_ENCODER
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_c = vp9_sad64x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_c = vp9_sad64x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_c = vp9_sad32x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_c = vp9_sad32x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_c = vp9_sad32x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_c = vp9_sad16x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_c = vp9_sad16x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_c = vp9_sad16x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_c = vp9_sad8x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_c = vp9_sad8x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_c = vp9_sad8x4x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_c = vp9_sad4x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_c = vp9_sad4x4x4d_c;
-INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_c),
- make_tuple(64, 32, sad_64x32x4d_c),
- make_tuple(32, 64, sad_32x64x4d_c),
- make_tuple(32, 32, sad_32x32x4d_c),
- make_tuple(32, 16, sad_32x16x4d_c),
- make_tuple(16, 32, sad_16x32x4d_c),
- make_tuple(16, 16, sad_16x16x4d_c),
- make_tuple(16, 8, sad_16x8x4d_c),
- make_tuple(8, 16, sad_8x16x4d_c),
- make_tuple(8, 8, sad_8x8x4d_c),
- make_tuple(8, 4, sad_8x4x4d_c),
- make_tuple(4, 8, sad_4x8x4d_c),
- make_tuple(4, 4, sad_4x4x4d_c)));
-#endif // CONFIG_VP9_ENCODER
+const SadMxNAvgFunc sad64x64_avg_c = vpx_sad64x64_avg_c;
+const SadMxNAvgFunc sad64x32_avg_c = vpx_sad64x32_avg_c;
+const SadMxNAvgFunc sad32x64_avg_c = vpx_sad32x64_avg_c;
+const SadMxNAvgFunc sad32x32_avg_c = vpx_sad32x32_avg_c;
+const SadMxNAvgFunc sad32x16_avg_c = vpx_sad32x16_avg_c;
+const SadMxNAvgFunc sad16x32_avg_c = vpx_sad16x32_avg_c;
+const SadMxNAvgFunc sad16x16_avg_c = vpx_sad16x16_avg_c;
+const SadMxNAvgFunc sad16x8_avg_c = vpx_sad16x8_avg_c;
+const SadMxNAvgFunc sad8x16_avg_c = vpx_sad8x16_avg_c;
+const SadMxNAvgFunc sad8x8_avg_c = vpx_sad8x8_avg_c;
+const SadMxNAvgFunc sad8x4_avg_c = vpx_sad8x4_avg_c;
+const SadMxNAvgFunc sad4x8_avg_c = vpx_sad4x8_avg_c;
+const SadMxNAvgFunc sad4x4_avg_c = vpx_sad4x4_avg_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_c = vpx_highbd_sad64x64_avg_c;
+const SadMxNAvgFunc highbd_sad64x32_avg_c = vpx_highbd_sad64x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x64_avg_c = vpx_highbd_sad32x64_avg_c;
+const SadMxNAvgFunc highbd_sad32x32_avg_c = vpx_highbd_sad32x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x16_avg_c = vpx_highbd_sad32x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x32_avg_c = vpx_highbd_sad16x32_avg_c;
+const SadMxNAvgFunc highbd_sad16x16_avg_c = vpx_highbd_sad16x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x8_avg_c = vpx_highbd_sad16x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x16_avg_c = vpx_highbd_sad8x16_avg_c;
+const SadMxNAvgFunc highbd_sad8x8_avg_c = vpx_highbd_sad8x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x4_avg_c = vpx_highbd_sad8x4_avg_c;
+const SadMxNAvgFunc highbd_sad4x8_avg_c = vpx_highbd_sad4x8_avg_c;
+const SadMxNAvgFunc highbd_sad4x4_avg_c = vpx_highbd_sad4x4_avg_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_c_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_c, -1),
+ make_tuple(64, 32, sad64x32_avg_c, -1),
+ make_tuple(32, 64, sad32x64_avg_c, -1),
+ make_tuple(32, 32, sad32x32_avg_c, -1),
+ make_tuple(32, 16, sad32x16_avg_c, -1),
+ make_tuple(16, 32, sad16x32_avg_c, -1),
+ make_tuple(16, 16, sad16x16_avg_c, -1),
+ make_tuple(16, 8, sad16x8_avg_c, -1),
+ make_tuple(8, 16, sad8x16_avg_c, -1),
+ make_tuple(8, 8, sad8x8_avg_c, -1),
+ make_tuple(8, 4, sad8x4_avg_c, -1),
+ make_tuple(4, 8, sad4x8_avg_c, -1),
+ make_tuple(4, 4, sad4x4_avg_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
+
+const SadMxNx4Func sad64x64x4d_c = vpx_sad64x64x4d_c;
+const SadMxNx4Func sad64x32x4d_c = vpx_sad64x32x4d_c;
+const SadMxNx4Func sad32x64x4d_c = vpx_sad32x64x4d_c;
+const SadMxNx4Func sad32x32x4d_c = vpx_sad32x32x4d_c;
+const SadMxNx4Func sad32x16x4d_c = vpx_sad32x16x4d_c;
+const SadMxNx4Func sad16x32x4d_c = vpx_sad16x32x4d_c;
+const SadMxNx4Func sad16x16x4d_c = vpx_sad16x16x4d_c;
+const SadMxNx4Func sad16x8x4d_c = vpx_sad16x8x4d_c;
+const SadMxNx4Func sad8x16x4d_c = vpx_sad8x16x4d_c;
+const SadMxNx4Func sad8x8x4d_c = vpx_sad8x8x4d_c;
+const SadMxNx4Func sad8x4x4d_c = vpx_sad8x4x4d_c;
+const SadMxNx4Func sad4x8x4d_c = vpx_sad4x8x4d_c;
+const SadMxNx4Func sad4x4x4d_c = vpx_sad4x4x4d_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_c = vpx_highbd_sad64x64x4d_c;
+const SadMxNx4Func highbd_sad64x32x4d_c = vpx_highbd_sad64x32x4d_c;
+const SadMxNx4Func highbd_sad32x64x4d_c = vpx_highbd_sad32x64x4d_c;
+const SadMxNx4Func highbd_sad32x32x4d_c = vpx_highbd_sad32x32x4d_c;
+const SadMxNx4Func highbd_sad32x16x4d_c = vpx_highbd_sad32x16x4d_c;
+const SadMxNx4Func highbd_sad16x32x4d_c = vpx_highbd_sad16x32x4d_c;
+const SadMxNx4Func highbd_sad16x16x4d_c = vpx_highbd_sad16x16x4d_c;
+const SadMxNx4Func highbd_sad16x8x4d_c = vpx_highbd_sad16x8x4d_c;
+const SadMxNx4Func highbd_sad8x16x4d_c = vpx_highbd_sad8x16x4d_c;
+const SadMxNx4Func highbd_sad8x8x4d_c = vpx_highbd_sad8x8x4d_c;
+const SadMxNx4Func highbd_sad8x4x4d_c = vpx_highbd_sad8x4x4d_c;
+const SadMxNx4Func highbd_sad4x8x4d_c = vpx_highbd_sad4x8x4d_c;
+const SadMxNx4Func highbd_sad4x4x4d_c = vpx_highbd_sad4x4x4d_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_c_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_c, -1),
+ make_tuple(64, 32, sad64x32x4d_c, -1),
+ make_tuple(32, 64, sad32x64x4d_c, -1),
+ make_tuple(32, 32, sad32x32x4d_c, -1),
+ make_tuple(32, 16, sad32x16x4d_c, -1),
+ make_tuple(16, 32, sad16x32x4d_c, -1),
+ make_tuple(16, 16, sad16x16x4d_c, -1),
+ make_tuple(16, 8, sad16x8x4d_c, -1),
+ make_tuple(8, 16, sad8x16x4d_c, -1),
+ make_tuple(8, 8, sad8x8x4d_c, -1),
+ make_tuple(8, 4, sad8x4x4d_c, -1),
+ make_tuple(4, 8, sad4x8x4d_c, -1),
+ make_tuple(4, 4, sad4x4x4d_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
//------------------------------------------------------------------------------
// ARM functions
#if HAVE_MEDIA
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_armv6 = vp8_sad16x16_armv6;
-INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_armv6)));
-#endif
-#endif
+const SadMxNFunc sad16x16_media = vpx_sad16x16_media;
+const SadMxNParam media_tests[] = {
+ make_tuple(16, 16, sad16x16_media, -1),
+};
+INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::ValuesIn(media_tests));
+#endif // HAVE_MEDIA
#if HAVE_NEON
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_neon = vp8_sad16x16_neon;
-const sad_m_by_n_fn_t sad_8x16_neon = vp8_sad8x16_neon;
-const sad_m_by_n_fn_t sad_16x8_neon = vp8_sad16x8_neon;
-const sad_m_by_n_fn_t sad_8x8_neon = vp8_sad8x8_neon;
-const sad_m_by_n_fn_t sad_4x4_neon = vp8_sad4x4_neon;
-INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_neon),
- make_tuple(8, 16, sad_8x16_neon),
- make_tuple(16, 8, sad_16x8_neon),
- make_tuple(8, 8, sad_8x8_neon),
- make_tuple(4, 4, sad_4x4_neon)));
-#endif
-#endif
+const SadMxNFunc sad64x64_neon = vpx_sad64x64_neon;
+const SadMxNFunc sad32x32_neon = vpx_sad32x32_neon;
+const SadMxNFunc sad16x16_neon = vpx_sad16x16_neon;
+const SadMxNFunc sad16x8_neon = vpx_sad16x8_neon;
+const SadMxNFunc sad8x16_neon = vpx_sad8x16_neon;
+const SadMxNFunc sad8x8_neon = vpx_sad8x8_neon;
+const SadMxNFunc sad4x4_neon = vpx_sad4x4_neon;
+
+const SadMxNParam neon_tests[] = {
+ make_tuple(64, 64, sad64x64_neon, -1),
+ make_tuple(32, 32, sad32x32_neon, -1),
+ make_tuple(16, 16, sad16x16_neon, -1),
+ make_tuple(16, 8, sad16x8_neon, -1),
+ make_tuple(8, 16, sad8x16_neon, -1),
+ make_tuple(8, 8, sad8x8_neon, -1),
+ make_tuple(4, 4, sad4x4_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
+
+const SadMxNx4Func sad64x64x4d_neon = vpx_sad64x64x4d_neon;
+const SadMxNx4Func sad32x32x4d_neon = vpx_sad32x32x4d_neon;
+const SadMxNx4Func sad16x16x4d_neon = vpx_sad16x16x4d_neon;
+const SadMxNx4Param x4d_neon_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_neon, -1),
+ make_tuple(32, 32, sad32x32x4d_neon, -1),
+ make_tuple(16, 16, sad16x16x4d_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests));
+#endif // HAVE_NEON
//------------------------------------------------------------------------------
// x86 functions
#if HAVE_MMX
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx = vp8_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx = vp8_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx = vp8_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx = vp8_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx = vp8_sad4x4_mmx;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx_vp9 = vp9_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx_vp9 = vp9_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx_vp9 = vp9_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx_vp9 = vp9_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx_vp9 = vp9_sad4x4_mmx;
-#endif
-
-const sad_m_by_n_test_param_t mmx_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_mmx),
- make_tuple(8, 16, sad_8x16_mmx),
- make_tuple(16, 8, sad_16x8_mmx),
- make_tuple(8, 8, sad_8x8_mmx),
- make_tuple(4, 4, sad_4x4_mmx),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(16, 16, sad_16x16_mmx_vp9),
- make_tuple(8, 16, sad_8x16_mmx_vp9),
- make_tuple(16, 8, sad_16x8_mmx_vp9),
- make_tuple(8, 8, sad_8x8_mmx_vp9),
- make_tuple(4, 4, sad_4x4_mmx_vp9),
-#endif
+const SadMxNFunc sad16x16_mmx = vpx_sad16x16_mmx;
+const SadMxNFunc sad16x8_mmx = vpx_sad16x8_mmx;
+const SadMxNFunc sad8x16_mmx = vpx_sad8x16_mmx;
+const SadMxNFunc sad8x8_mmx = vpx_sad8x8_mmx;
+const SadMxNFunc sad4x4_mmx = vpx_sad4x4_mmx;
+const SadMxNParam mmx_tests[] = {
+ make_tuple(16, 16, sad16x16_mmx, -1),
+ make_tuple(16, 8, sad16x8_mmx, -1),
+ make_tuple(8, 16, sad8x16_mmx, -1),
+ make_tuple(8, 8, sad8x8_mmx, -1),
+ make_tuple(4, 4, sad4x4_mmx, -1),
};
INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests));
-#endif
+#endif // HAVE_MMX
#if HAVE_SSE
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_4x4_sse_vp9 = vp9_sad4x4_sse;
-const sad_m_by_n_fn_t sad_4x8_sse_vp9 = vp9_sad4x8_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::Values(
- make_tuple(4, 4, sad_4x4_sse_vp9),
- make_tuple(4, 8, sad_4x8_sse_vp9)));
+const SadMxNFunc sad4x8_sse = vpx_sad4x8_sse;
+const SadMxNFunc sad4x4_sse = vpx_sad4x4_sse;
+const SadMxNParam sse_tests[] = {
+ make_tuple(4, 8, sad4x8_sse, -1),
+ make_tuple(4, 4, sad4x4_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::ValuesIn(sse_tests));
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_sse = vp9_sad4x8x4d_sse;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse = vp9_sad4x4x4d_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::Values(
- make_tuple(4, 8, sad_4x8x4d_sse),
- make_tuple(4, 4, sad_4x4x4d_sse)));
+const SadMxNAvgFunc sad4x8_avg_sse = vpx_sad4x8_avg_sse;
+const SadMxNAvgFunc sad4x4_avg_sse = vpx_sad4x4_avg_sse;
+const SadMxNAvgParam avg_sse_tests[] = {
+ make_tuple(4, 8, sad4x8_avg_sse, -1),
+ make_tuple(4, 4, sad4x4_avg_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADavgTest, ::testing::ValuesIn(avg_sse_tests));
+
+const SadMxNx4Func sad4x8x4d_sse = vpx_sad4x8x4d_sse;
+const SadMxNx4Func sad4x4x4d_sse = vpx_sad4x4x4d_sse;
+const SadMxNx4Param x4d_sse_tests[] = {
+ make_tuple(4, 8, sad4x8x4d_sse, -1),
+ make_tuple(4, 4, sad4x4x4d_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::ValuesIn(x4d_sse_tests));
#endif // CONFIG_USE_X86INC
-#endif // CONFIG_VP9_ENCODER
#endif // HAVE_SSE
#if HAVE_SSE2
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_wmt = vp8_sad16x16_wmt;
-const sad_m_by_n_fn_t sad_8x16_wmt = vp8_sad8x16_wmt;
-const sad_m_by_n_fn_t sad_16x8_wmt = vp8_sad16x8_wmt;
-const sad_m_by_n_fn_t sad_8x8_wmt = vp8_sad8x8_wmt;
-const sad_m_by_n_fn_t sad_4x4_wmt = vp8_sad4x4_wmt;
-#endif
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_64x64_sse2_vp9 = vp9_sad64x64_sse2;
-const sad_m_by_n_fn_t sad_64x32_sse2_vp9 = vp9_sad64x32_sse2;
-const sad_m_by_n_fn_t sad_32x64_sse2_vp9 = vp9_sad32x64_sse2;
-const sad_m_by_n_fn_t sad_32x32_sse2_vp9 = vp9_sad32x32_sse2;
-const sad_m_by_n_fn_t sad_32x16_sse2_vp9 = vp9_sad32x16_sse2;
-const sad_m_by_n_fn_t sad_16x32_sse2_vp9 = vp9_sad16x32_sse2;
-const sad_m_by_n_fn_t sad_16x16_sse2_vp9 = vp9_sad16x16_sse2;
-const sad_m_by_n_fn_t sad_16x8_sse2_vp9 = vp9_sad16x8_sse2;
-const sad_m_by_n_fn_t sad_8x16_sse2_vp9 = vp9_sad8x16_sse2;
-const sad_m_by_n_fn_t sad_8x8_sse2_vp9 = vp9_sad8x8_sse2;
-const sad_m_by_n_fn_t sad_8x4_sse2_vp9 = vp9_sad8x4_sse2;
-#endif
-#endif
-const sad_m_by_n_test_param_t sse2_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_wmt),
- make_tuple(8, 16, sad_8x16_wmt),
- make_tuple(16, 8, sad_16x8_wmt),
- make_tuple(8, 8, sad_8x8_wmt),
- make_tuple(4, 4, sad_4x4_wmt),
-#endif
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
- make_tuple(64, 64, sad_64x64_sse2_vp9),
- make_tuple(64, 32, sad_64x32_sse2_vp9),
- make_tuple(32, 64, sad_32x64_sse2_vp9),
- make_tuple(32, 32, sad_32x32_sse2_vp9),
- make_tuple(32, 16, sad_32x16_sse2_vp9),
- make_tuple(16, 32, sad_16x32_sse2_vp9),
- make_tuple(16, 16, sad_16x16_sse2_vp9),
- make_tuple(16, 8, sad_16x8_sse2_vp9),
- make_tuple(8, 16, sad_8x16_sse2_vp9),
- make_tuple(8, 8, sad_8x8_sse2_vp9),
- make_tuple(8, 4, sad_8x4_sse2_vp9),
-#endif
-#endif
+const SadMxNFunc sad64x64_sse2 = vpx_sad64x64_sse2;
+const SadMxNFunc sad64x32_sse2 = vpx_sad64x32_sse2;
+const SadMxNFunc sad32x64_sse2 = vpx_sad32x64_sse2;
+const SadMxNFunc sad32x32_sse2 = vpx_sad32x32_sse2;
+const SadMxNFunc sad32x16_sse2 = vpx_sad32x16_sse2;
+const SadMxNFunc sad16x32_sse2 = vpx_sad16x32_sse2;
+const SadMxNFunc sad16x16_sse2 = vpx_sad16x16_sse2;
+const SadMxNFunc sad16x8_sse2 = vpx_sad16x8_sse2;
+const SadMxNFunc sad8x16_sse2 = vpx_sad8x16_sse2;
+const SadMxNFunc sad8x8_sse2 = vpx_sad8x8_sse2;
+const SadMxNFunc sad8x4_sse2 = vpx_sad8x4_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_sse2 = vpx_highbd_sad64x64_sse2;
+const SadMxNFunc highbd_sad64x32_sse2 = vpx_highbd_sad64x32_sse2;
+const SadMxNFunc highbd_sad32x64_sse2 = vpx_highbd_sad32x64_sse2;
+const SadMxNFunc highbd_sad32x32_sse2 = vpx_highbd_sad32x32_sse2;
+const SadMxNFunc highbd_sad32x16_sse2 = vpx_highbd_sad32x16_sse2;
+const SadMxNFunc highbd_sad16x32_sse2 = vpx_highbd_sad16x32_sse2;
+const SadMxNFunc highbd_sad16x16_sse2 = vpx_highbd_sad16x16_sse2;
+const SadMxNFunc highbd_sad16x8_sse2 = vpx_highbd_sad16x8_sse2;
+const SadMxNFunc highbd_sad8x16_sse2 = vpx_highbd_sad8x16_sse2;
+const SadMxNFunc highbd_sad8x8_sse2 = vpx_highbd_sad8x8_sse2;
+const SadMxNFunc highbd_sad8x4_sse2 = vpx_highbd_sad8x4_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_sse2, -1),
+ make_tuple(64, 32, sad64x32_sse2, -1),
+ make_tuple(32, 64, sad32x64_sse2, -1),
+ make_tuple(32, 32, sad32x32_sse2, -1),
+ make_tuple(32, 16, sad32x16_sse2, -1),
+ make_tuple(16, 32, sad16x32_sse2, -1),
+ make_tuple(16, 16, sad16x16_sse2, -1),
+ make_tuple(16, 8, sad16x8_sse2, -1),
+ make_tuple(8, 16, sad8x16_sse2, -1),
+ make_tuple(8, 8, sad8x8_sse2, -1),
+ make_tuple(8, 4, sad8x4_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_sse2 = vp9_sad64x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_sse2 = vp9_sad64x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_sse2 = vp9_sad32x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_sse2 = vp9_sad32x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_sse2 = vp9_sad32x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_sse2 = vp9_sad16x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse2 = vp9_sad16x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse2 = vp9_sad16x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse2 = vp9_sad8x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse2 = vp9_sad8x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_sse2 = vp9_sad8x4x4d_sse2;
-INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_sse2),
- make_tuple(64, 32, sad_64x32x4d_sse2),
- make_tuple(32, 64, sad_32x64x4d_sse2),
- make_tuple(32, 32, sad_32x32x4d_sse2),
- make_tuple(32, 16, sad_32x16x4d_sse2),
- make_tuple(16, 32, sad_16x32x4d_sse2),
- make_tuple(16, 16, sad_16x16x4d_sse2),
- make_tuple(16, 8, sad_16x8x4d_sse2),
- make_tuple(8, 16, sad_8x16x4d_sse2),
- make_tuple(8, 8, sad_8x8x4d_sse2),
- make_tuple(8, 4, sad_8x4x4d_sse2)));
-#endif
-#endif
-#endif
+const SadMxNAvgFunc sad64x64_avg_sse2 = vpx_sad64x64_avg_sse2;
+const SadMxNAvgFunc sad64x32_avg_sse2 = vpx_sad64x32_avg_sse2;
+const SadMxNAvgFunc sad32x64_avg_sse2 = vpx_sad32x64_avg_sse2;
+const SadMxNAvgFunc sad32x32_avg_sse2 = vpx_sad32x32_avg_sse2;
+const SadMxNAvgFunc sad32x16_avg_sse2 = vpx_sad32x16_avg_sse2;
+const SadMxNAvgFunc sad16x32_avg_sse2 = vpx_sad16x32_avg_sse2;
+const SadMxNAvgFunc sad16x16_avg_sse2 = vpx_sad16x16_avg_sse2;
+const SadMxNAvgFunc sad16x8_avg_sse2 = vpx_sad16x8_avg_sse2;
+const SadMxNAvgFunc sad8x16_avg_sse2 = vpx_sad8x16_avg_sse2;
+const SadMxNAvgFunc sad8x8_avg_sse2 = vpx_sad8x8_avg_sse2;
+const SadMxNAvgFunc sad8x4_avg_sse2 = vpx_sad8x4_avg_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_sse2 = vpx_highbd_sad64x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad64x32_avg_sse2 = vpx_highbd_sad64x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x64_avg_sse2 = vpx_highbd_sad32x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x32_avg_sse2 = vpx_highbd_sad32x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x16_avg_sse2 = vpx_highbd_sad32x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x32_avg_sse2 = vpx_highbd_sad16x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x16_avg_sse2 = vpx_highbd_sad16x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x8_avg_sse2 = vpx_highbd_sad16x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x16_avg_sse2 = vpx_highbd_sad8x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x8_avg_sse2 = vpx_highbd_sad8x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x4_avg_sse2 = vpx_highbd_sad8x4_avg_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_sse2, -1),
+ make_tuple(64, 32, sad64x32_avg_sse2, -1),
+ make_tuple(32, 64, sad32x64_avg_sse2, -1),
+ make_tuple(32, 32, sad32x32_avg_sse2, -1),
+ make_tuple(32, 16, sad32x16_avg_sse2, -1),
+ make_tuple(16, 32, sad16x32_avg_sse2, -1),
+ make_tuple(16, 16, sad16x16_avg_sse2, -1),
+ make_tuple(16, 8, sad16x8_avg_sse2, -1),
+ make_tuple(8, 16, sad8x16_avg_sse2, -1),
+ make_tuple(8, 8, sad8x8_avg_sse2, -1),
+ make_tuple(8, 4, sad8x4_avg_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
+
+const SadMxNx4Func sad64x64x4d_sse2 = vpx_sad64x64x4d_sse2;
+const SadMxNx4Func sad64x32x4d_sse2 = vpx_sad64x32x4d_sse2;
+const SadMxNx4Func sad32x64x4d_sse2 = vpx_sad32x64x4d_sse2;
+const SadMxNx4Func sad32x32x4d_sse2 = vpx_sad32x32x4d_sse2;
+const SadMxNx4Func sad32x16x4d_sse2 = vpx_sad32x16x4d_sse2;
+const SadMxNx4Func sad16x32x4d_sse2 = vpx_sad16x32x4d_sse2;
+const SadMxNx4Func sad16x16x4d_sse2 = vpx_sad16x16x4d_sse2;
+const SadMxNx4Func sad16x8x4d_sse2 = vpx_sad16x8x4d_sse2;
+const SadMxNx4Func sad8x16x4d_sse2 = vpx_sad8x16x4d_sse2;
+const SadMxNx4Func sad8x8x4d_sse2 = vpx_sad8x8x4d_sse2;
+const SadMxNx4Func sad8x4x4d_sse2 = vpx_sad8x4x4d_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_sse2 = vpx_highbd_sad64x64x4d_sse2;
+const SadMxNx4Func highbd_sad64x32x4d_sse2 = vpx_highbd_sad64x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x64x4d_sse2 = vpx_highbd_sad32x64x4d_sse2;
+const SadMxNx4Func highbd_sad32x32x4d_sse2 = vpx_highbd_sad32x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x16x4d_sse2 = vpx_highbd_sad32x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x32x4d_sse2 = vpx_highbd_sad16x32x4d_sse2;
+const SadMxNx4Func highbd_sad16x16x4d_sse2 = vpx_highbd_sad16x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x8x4d_sse2 = vpx_highbd_sad16x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x16x4d_sse2 = vpx_highbd_sad8x16x4d_sse2;
+const SadMxNx4Func highbd_sad8x8x4d_sse2 = vpx_highbd_sad8x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x4x4d_sse2 = vpx_highbd_sad8x4x4d_sse2;
+const SadMxNx4Func highbd_sad4x8x4d_sse2 = vpx_highbd_sad4x8x4d_sse2;
+const SadMxNx4Func highbd_sad4x4x4d_sse2 = vpx_highbd_sad4x4x4d_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_sse2, -1),
+ make_tuple(64, 32, sad64x32x4d_sse2, -1),
+ make_tuple(32, 64, sad32x64x4d_sse2, -1),
+ make_tuple(32, 32, sad32x32x4d_sse2, -1),
+ make_tuple(32, 16, sad32x16x4d_sse2, -1),
+ make_tuple(16, 32, sad16x32x4d_sse2, -1),
+ make_tuple(16, 16, sad16x16x4d_sse2, -1),
+ make_tuple(16, 8, sad16x8x4d_sse2, -1),
+ make_tuple(8, 16, sad8x16x4d_sse2, -1),
+ make_tuple(8, 8, sad8x8x4d_sse2, -1),
+ make_tuple(8, 4, sad8x4x4d_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSE2
#if HAVE_SSE3
-#if CONFIG_VP8_ENCODER
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse3 = vp8_sad16x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse3 = vp8_sad16x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse3 = vp8_sad8x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse3 = vp8_sad8x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse3 = vp8_sad4x4x4d_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADx4Test, ::testing::Values(
- make_tuple(16, 16, sad_16x16x4d_sse3),
- make_tuple(16, 8, sad_16x8x4d_sse3),
- make_tuple(8, 16, sad_8x16x4d_sse3),
- make_tuple(8, 8, sad_8x8x4d_sse3),
- make_tuple(4, 4, sad_4x4x4d_sse3)));
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSE3
#if HAVE_SSSE3
-#if CONFIG_USE_X86INC
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_sse3 = vp8_sad16x16_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_sse3)));
-#endif
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSSE3
+
+#if HAVE_SSE4_1
+// Only functions are x8, which do not have tests.
+#endif // HAVE_SSE4_1
+
+#if HAVE_AVX2
+const SadMxNFunc sad64x64_avx2 = vpx_sad64x64_avx2;
+const SadMxNFunc sad64x32_avx2 = vpx_sad64x32_avx2;
+const SadMxNFunc sad32x64_avx2 = vpx_sad32x64_avx2;
+const SadMxNFunc sad32x32_avx2 = vpx_sad32x32_avx2;
+const SadMxNFunc sad32x16_avx2 = vpx_sad32x16_avx2;
+const SadMxNParam avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avx2, -1),
+ make_tuple(64, 32, sad64x32_avx2, -1),
+ make_tuple(32, 64, sad32x64_avx2, -1),
+ make_tuple(32, 32, sad32x32_avx2, -1),
+ make_tuple(32, 16, sad32x16_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADTest, ::testing::ValuesIn(avx2_tests));
+
+const SadMxNAvgFunc sad64x64_avg_avx2 = vpx_sad64x64_avg_avx2;
+const SadMxNAvgFunc sad64x32_avg_avx2 = vpx_sad64x32_avg_avx2;
+const SadMxNAvgFunc sad32x64_avg_avx2 = vpx_sad32x64_avg_avx2;
+const SadMxNAvgFunc sad32x32_avg_avx2 = vpx_sad32x32_avg_avx2;
+const SadMxNAvgFunc sad32x16_avg_avx2 = vpx_sad32x16_avg_avx2;
+const SadMxNAvgParam avg_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_avx2, -1),
+ make_tuple(64, 32, sad64x32_avg_avx2, -1),
+ make_tuple(32, 64, sad32x64_avg_avx2, -1),
+ make_tuple(32, 32, sad32x32_avg_avx2, -1),
+ make_tuple(32, 16, sad32x16_avg_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests));
+
+const SadMxNx4Func sad64x64x4d_avx2 = vpx_sad64x64x4d_avx2;
+const SadMxNx4Func sad32x32x4d_avx2 = vpx_sad32x32x4d_avx2;
+const SadMxNx4Param x4d_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_avx2, -1),
+ make_tuple(32, 32, sad32x32x4d_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests));
+#endif // HAVE_AVX2
+
+//------------------------------------------------------------------------------
+// MIPS functions
+#if HAVE_MSA
+const SadMxNFunc sad64x64_msa = vpx_sad64x64_msa;
+const SadMxNFunc sad64x32_msa = vpx_sad64x32_msa;
+const SadMxNFunc sad32x64_msa = vpx_sad32x64_msa;
+const SadMxNFunc sad32x32_msa = vpx_sad32x32_msa;
+const SadMxNFunc sad32x16_msa = vpx_sad32x16_msa;
+const SadMxNFunc sad16x32_msa = vpx_sad16x32_msa;
+const SadMxNFunc sad16x16_msa = vpx_sad16x16_msa;
+const SadMxNFunc sad16x8_msa = vpx_sad16x8_msa;
+const SadMxNFunc sad8x16_msa = vpx_sad8x16_msa;
+const SadMxNFunc sad8x8_msa = vpx_sad8x8_msa;
+const SadMxNFunc sad8x4_msa = vpx_sad8x4_msa;
+const SadMxNFunc sad4x8_msa = vpx_sad4x8_msa;
+const SadMxNFunc sad4x4_msa = vpx_sad4x4_msa;
+const SadMxNParam msa_tests[] = {
+ make_tuple(64, 64, sad64x64_msa, -1),
+ make_tuple(64, 32, sad64x32_msa, -1),
+ make_tuple(32, 64, sad32x64_msa, -1),
+ make_tuple(32, 32, sad32x32_msa, -1),
+ make_tuple(32, 16, sad32x16_msa, -1),
+ make_tuple(16, 32, sad16x32_msa, -1),
+ make_tuple(16, 16, sad16x16_msa, -1),
+ make_tuple(16, 8, sad16x8_msa, -1),
+ make_tuple(8, 16, sad8x16_msa, -1),
+ make_tuple(8, 8, sad8x8_msa, -1),
+ make_tuple(8, 4, sad8x4_msa, -1),
+ make_tuple(4, 8, sad4x8_msa, -1),
+ make_tuple(4, 4, sad4x4_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests));
+
+const SadMxNAvgFunc sad64x64_avg_msa = vpx_sad64x64_avg_msa;
+const SadMxNAvgFunc sad64x32_avg_msa = vpx_sad64x32_avg_msa;
+const SadMxNAvgFunc sad32x64_avg_msa = vpx_sad32x64_avg_msa;
+const SadMxNAvgFunc sad32x32_avg_msa = vpx_sad32x32_avg_msa;
+const SadMxNAvgFunc sad32x16_avg_msa = vpx_sad32x16_avg_msa;
+const SadMxNAvgFunc sad16x32_avg_msa = vpx_sad16x32_avg_msa;
+const SadMxNAvgFunc sad16x16_avg_msa = vpx_sad16x16_avg_msa;
+const SadMxNAvgFunc sad16x8_avg_msa = vpx_sad16x8_avg_msa;
+const SadMxNAvgFunc sad8x16_avg_msa = vpx_sad8x16_avg_msa;
+const SadMxNAvgFunc sad8x8_avg_msa = vpx_sad8x8_avg_msa;
+const SadMxNAvgFunc sad8x4_avg_msa = vpx_sad8x4_avg_msa;
+const SadMxNAvgFunc sad4x8_avg_msa = vpx_sad4x8_avg_msa;
+const SadMxNAvgFunc sad4x4_avg_msa = vpx_sad4x4_avg_msa;
+const SadMxNAvgParam avg_msa_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_msa, -1),
+ make_tuple(64, 32, sad64x32_avg_msa, -1),
+ make_tuple(32, 64, sad32x64_avg_msa, -1),
+ make_tuple(32, 32, sad32x32_avg_msa, -1),
+ make_tuple(32, 16, sad32x16_avg_msa, -1),
+ make_tuple(16, 32, sad16x32_avg_msa, -1),
+ make_tuple(16, 16, sad16x16_avg_msa, -1),
+ make_tuple(16, 8, sad16x8_avg_msa, -1),
+ make_tuple(8, 16, sad8x16_avg_msa, -1),
+ make_tuple(8, 8, sad8x8_avg_msa, -1),
+ make_tuple(8, 4, sad8x4_avg_msa, -1),
+ make_tuple(4, 8, sad4x8_avg_msa, -1),
+ make_tuple(4, 4, sad4x4_avg_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests));
+
+const SadMxNx4Func sad64x64x4d_msa = vpx_sad64x64x4d_msa;
+const SadMxNx4Func sad64x32x4d_msa = vpx_sad64x32x4d_msa;
+const SadMxNx4Func sad32x64x4d_msa = vpx_sad32x64x4d_msa;
+const SadMxNx4Func sad32x32x4d_msa = vpx_sad32x32x4d_msa;
+const SadMxNx4Func sad32x16x4d_msa = vpx_sad32x16x4d_msa;
+const SadMxNx4Func sad16x32x4d_msa = vpx_sad16x32x4d_msa;
+const SadMxNx4Func sad16x16x4d_msa = vpx_sad16x16x4d_msa;
+const SadMxNx4Func sad16x8x4d_msa = vpx_sad16x8x4d_msa;
+const SadMxNx4Func sad8x16x4d_msa = vpx_sad8x16x4d_msa;
+const SadMxNx4Func sad8x8x4d_msa = vpx_sad8x8x4d_msa;
+const SadMxNx4Func sad8x4x4d_msa = vpx_sad8x4x4d_msa;
+const SadMxNx4Func sad4x8x4d_msa = vpx_sad4x8x4d_msa;
+const SadMxNx4Func sad4x4x4d_msa = vpx_sad4x4x4d_msa;
+const SadMxNx4Param x4d_msa_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_msa, -1),
+ make_tuple(64, 32, sad64x32x4d_msa, -1),
+ make_tuple(32, 64, sad32x64x4d_msa, -1),
+ make_tuple(32, 32, sad32x32x4d_msa, -1),
+ make_tuple(32, 16, sad32x16x4d_msa, -1),
+ make_tuple(16, 32, sad16x32x4d_msa, -1),
+ make_tuple(16, 16, sad16x16x4d_msa, -1),
+ make_tuple(16, 8, sad16x8x4d_msa, -1),
+ make_tuple(8, 16, sad8x16x4d_msa, -1),
+ make_tuple(8, 8, sad8x8x4d_msa, -1),
+ make_tuple(8, 4, sad8x4x4d_msa, -1),
+ make_tuple(4, 8, sad4x8x4d_msa, -1),
+ make_tuple(4, 4, sad4x4x4d_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
const uint8_t* refs[] = {GetReference(0), GetReference(1),
GetReference(2), GetReference(3)};
REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
refs, reference_stride_,
results));
|
const uint8_t *references[] = {GetReference(0), GetReference(1),
GetReference(2), GetReference(3)};
ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
references, reference_stride_,
results));
|
150,877 |
virtual void SetUp() {
source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
if (bd_ == -1) {
use_high_bit_depth_ = false;
bit_depth_ = VPX_BITS_8;
source_data_ = source_data8_;
reference_data_ = reference_data8_;
second_pred_ = second_pred8_;
#if CONFIG_VP9_HIGHBITDEPTH
} else {
use_high_bit_depth_ = true;
bit_depth_ = static_cast<vpx_bit_depth_t>(bd_);
source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
mask_ = (1 << bit_depth_) - 1;
source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
|
@@ -13,56 +13,74 @@
#include <limits.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#if CONFIG_VP8_ENCODER
-#include "./vp8_rtcd.h"
-#endif
-#if CONFIG_VP9_ENCODER
-#include "./vp9_rtcd.h"
-#endif
-#include "vpx_mem/vpx_mem.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_codec.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride);
+typedef std::tr1::tuple<int, int, SadMxNFunc, int> SadMxNParam;
-typedef unsigned int (*sad_m_by_n_fn_t)(const unsigned char *source_ptr,
- int source_stride,
- const unsigned char *reference_ptr,
- int reference_stride,
- unsigned int max_sad);
-typedef std::tr1::tuple<int, int, sad_m_by_n_fn_t> sad_m_by_n_test_param_t;
+typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ const uint8_t *second_pred);
+typedef std::tr1::tuple<int, int, SadMxNAvgFunc, int> SadMxNAvgParam;
-typedef void (*sad_n_by_n_by_4_fn_t)(const uint8_t *src_ptr,
- int src_stride,
- const unsigned char * const ref_ptr[],
- int ref_stride,
- unsigned int *sad_array);
-typedef std::tr1::tuple<int, int, sad_n_by_n_by_4_fn_t>
- sad_n_by_n_by_4_test_param_t;
+typedef void (*SadMxNx4Func)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[],
+ int ref_stride,
+ uint32_t *sad_array);
+typedef std::tr1::tuple<int, int, SadMxNx4Func, int> SadMxNx4Param;
using libvpx_test::ACMRandom;
namespace {
class SADTestBase : public ::testing::Test {
public:
- SADTestBase(int width, int height) : width_(width), height_(height) {}
+ SADTestBase(int width, int height, int bit_depth) :
+ width_(width), height_(height), bd_(bit_depth) {}
static void SetUpTestCase() {
- source_data_ = reinterpret_cast<uint8_t*>(
+ source_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize));
- reference_data_ = reinterpret_cast<uint8_t*>(
+ reference_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
+ second_pred8_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, 64*64));
+ source_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
+ reference_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
+ second_pred16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t)));
}
static void TearDownTestCase() {
- vpx_free(source_data_);
- source_data_ = NULL;
- vpx_free(reference_data_);
- reference_data_ = NULL;
+ vpx_free(source_data8_);
+ source_data8_ = NULL;
+ vpx_free(reference_data8_);
+ reference_data8_ = NULL;
+ vpx_free(second_pred8_);
+ second_pred8_ = NULL;
+ vpx_free(source_data16_);
+ source_data16_ = NULL;
+ vpx_free(reference_data16_);
+ reference_data16_ = NULL;
+ vpx_free(second_pred16_);
+ second_pred16_ = NULL;
}
virtual void TearDown() {
@@ -76,142 +94,335 @@
static const int kDataBufferSize = 4 * kDataBlockSize;
virtual void SetUp() {
+ if (bd_ == -1) {
+ use_high_bit_depth_ = false;
+ bit_depth_ = VPX_BITS_8;
+ source_data_ = source_data8_;
+ reference_data_ = reference_data8_;
+ second_pred_ = second_pred8_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ use_high_bit_depth_ = true;
+ bit_depth_ = static_cast<vpx_bit_depth_t>(bd_);
+ source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
+ reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
+ second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ mask_ = (1 << bit_depth_) - 1;
source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
- virtual uint8_t* GetReference(int block_idx) {
+ virtual uint8_t *GetReference(int block_idx) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_high_bit_depth_)
+ return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
+ block_idx * kDataBlockSize);
+#endif // CONFIG_VP9_HIGHBITDEPTH
return reference_data_ + block_idx * kDataBlockSize;
}
// Sum of Absolute Differences. Given two blocks, calculate the absolute
// difference between two pixels in the same relative location; accumulate.
- unsigned int ReferenceSAD(unsigned int max_sad, int block_idx = 0) {
+ unsigned int ReferenceSAD(int block_idx) {
unsigned int sad = 0;
- const uint8_t* const reference = GetReference(block_idx);
-
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- sad += abs(source_data_[h * source_stride_ + w]
- - reference[h * reference_stride_ + w]);
- }
- if (sad > max_sad) {
- break;
+ if (!use_high_bit_depth_) {
+ sad += abs(source8[h * source_stride_ + w] -
+ reference8[h * reference_stride_ + w]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ sad += abs(source16[h * source_stride_ + w] -
+ reference16[h * reference_stride_ + w]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
return sad;
}
- void FillConstant(uint8_t *data, int stride, uint8_t fill_constant) {
+ // Sum of Absolute Differences Average. Given two blocks, and a prediction
+ // calculate the absolute difference between one pixel and average of the
+ // corresponding and predicted pixels; accumulate.
+ unsigned int ReferenceSADavg(int block_idx) {
+ unsigned int sad = 0;
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+ const uint8_t *const second_pred8 = second_pred_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+ const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = fill_constant;
+ if (!use_high_bit_depth_) {
+ const int tmp = second_pred8[h * width_ + w] +
+ reference8[h * reference_stride_ + w];
+ const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source8[h * source_stride_ + w] - comp_pred);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ const int tmp = second_pred16[h * width_ + w] +
+ reference16[h * reference_stride_ + w];
+ const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source16[h * source_stride_ + w] - comp_pred);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ return sad;
+ }
+
+ void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ for (int h = 0; h < height_; ++h) {
+ for (int w = 0; w < width_; ++w) {
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = static_cast<uint8_t>(fill_constant);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = fill_constant;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
void FillRandom(uint8_t *data, int stride) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = rnd_.Rand8();
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = rnd_.Rand16() & mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
- int width_, height_;
- static uint8_t* source_data_;
+ int width_, height_, mask_, bd_;
+ vpx_bit_depth_t bit_depth_;
+ static uint8_t *source_data_;
+ static uint8_t *reference_data_;
+ static uint8_t *second_pred_;
int source_stride_;
- static uint8_t* reference_data_;
+ bool use_high_bit_depth_;
+ static uint8_t *source_data8_;
+ static uint8_t *reference_data8_;
+ static uint8_t *second_pred8_;
+ static uint16_t *source_data16_;
+ static uint16_t *reference_data16_;
+ static uint16_t *second_pred16_;
int reference_stride_;
ACMRandom rnd_;
};
-class SADTest : public SADTestBase,
- public ::testing::WithParamInterface<sad_m_by_n_test_param_t> {
+class SADx4Test
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNx4Param> {
public:
- SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
-
- protected:
- unsigned int SAD(unsigned int max_sad, int block_idx = 0) {
- unsigned int ret;
- const uint8_t* const reference = GetReference(block_idx);
-
- REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
- reference, reference_stride_,
- max_sad));
- return ret;
- }
-
- void CheckSad(unsigned int max_sad) {
- unsigned int reference_sad, exp_sad;
-
- reference_sad = ReferenceSAD(max_sad);
- exp_sad = SAD(max_sad);
-
- if (reference_sad <= max_sad) {
- ASSERT_EQ(exp_sad, reference_sad);
- } else {
- // Alternative implementations are not required to check max_sad
- ASSERT_GE(exp_sad, reference_sad);
- }
- }
-};
-
-class SADx4Test : public SADTestBase,
- public ::testing::WithParamInterface<sad_n_by_n_by_4_test_param_t> {
- public:
- SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
+ SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
protected:
void SADs(unsigned int *results) {
- const uint8_t* refs[] = {GetReference(0), GetReference(1),
- GetReference(2), GetReference(3)};
+ const uint8_t *references[] = {GetReference(0), GetReference(1),
+ GetReference(2), GetReference(3)};
- REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
- refs, reference_stride_,
- results));
+ ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
+ references, reference_stride_,
+ results));
}
void CheckSADs() {
unsigned int reference_sad, exp_sad[4];
SADs(exp_sad);
- for (int block = 0; block < 4; block++) {
- reference_sad = ReferenceSAD(UINT_MAX, block);
+ for (int block = 0; block < 4; ++block) {
+ reference_sad = ReferenceSAD(block);
- EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
+ EXPECT_EQ(reference_sad, exp_sad[block]) << "block " << block;
}
}
};
-uint8_t* SADTestBase::source_data_ = NULL;
-uint8_t* SADTestBase::reference_data_ = NULL;
+class SADTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNParam> {
+ public:
+ SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSAD(0);
+ const unsigned int exp_sad = SAD(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+class SADavgTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNAvgParam> {
+ public:
+ SADavgTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD_avg(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_,
+ second_pred_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSADavg(0);
+ const unsigned int exp_sad = SAD_avg(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+uint8_t *SADTestBase::source_data_ = NULL;
+uint8_t *SADTestBase::reference_data_ = NULL;
+uint8_t *SADTestBase::second_pred_ = NULL;
+uint8_t *SADTestBase::source_data8_ = NULL;
+uint8_t *SADTestBase::reference_data8_ = NULL;
+uint8_t *SADTestBase::second_pred8_ = NULL;
+uint16_t *SADTestBase::source_data16_ = NULL;
+uint16_t *SADTestBase::reference_data16_ = NULL;
+uint16_t *SADTestBase::second_pred16_ = NULL;
TEST_P(SADTest, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(reference_data_, reference_stride_, 255);
- CheckSad(UINT_MAX);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ CheckSAD();
+}
+
+TEST_P(SADTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, MaxRef) {
+ FillConstant(source_data_, source_stride_, 0);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+TEST_P(SADavgTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADavgTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
}
TEST_P(SADx4Test, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(GetReference(0), reference_stride_, 255);
- FillConstant(GetReference(1), reference_stride_, 255);
- FillConstant(GetReference(2), reference_stride_, 255);
- FillConstant(GetReference(3), reference_stride_, 255);
+ FillConstant(GetReference(0), reference_stride_, mask_);
+ FillConstant(GetReference(1), reference_stride_, mask_);
+ FillConstant(GetReference(2), reference_stride_, mask_);
+ FillConstant(GetReference(3), reference_stride_, mask_);
CheckSADs();
}
-TEST_P(SADTest, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(UINT_MAX);
-}
-
TEST_P(SADx4Test, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
+ FillConstant(source_data_, source_stride_, mask_);
FillConstant(GetReference(0), reference_stride_, 0);
FillConstant(GetReference(1), reference_stride_, 0);
FillConstant(GetReference(2), reference_stride_, 0);
@@ -219,15 +430,6 @@
CheckSADs();
}
-TEST_P(SADTest, ShortRef) {
- int tmp_stride = reference_stride_;
- reference_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortRef) {
int tmp_stride = reference_stride_;
reference_stride_ >>= 1;
@@ -240,17 +442,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, UnalignedRef) {
- // The reference frame, but not the source frame, may be unaligned for
- // certain types of searches.
- int tmp_stride = reference_stride_;
- reference_stride_ -= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, UnalignedRef) {
// The reference frame, but not the source frame, may be unaligned for
// certain types of searches.
@@ -265,15 +456,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, ShortSrc) {
- int tmp_stride = source_stride_;
- source_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- source_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortSrc) {
int tmp_stride = source_stride_;
source_stride_ >>= 1;
@@ -286,271 +468,743 @@
source_stride_ = tmp_stride;
}
-TEST_P(SADTest, MaxSAD) {
- // Verify that, when max_sad is set, the implementation does not return a
- // value lower than the reference.
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(128);
+TEST_P(SADx4Test, SrcAlignedByWidth) {
+ uint8_t * tmp_source_data = source_data_;
+ source_data_ += width_;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ source_data_ = tmp_source_data;
}
using std::tr1::make_tuple;
//------------------------------------------------------------------------------
// C functions
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_c = vp8_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c = vp8_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c = vp8_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c = vp8_sad8x8_c;
-const sad_m_by_n_fn_t sad_4x4_c = vp8_sad4x4_c;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_64x64_c_vp9 = vp9_sad64x64_c;
-const sad_m_by_n_fn_t sad_32x32_c_vp9 = vp9_sad32x32_c;
-const sad_m_by_n_fn_t sad_16x16_c_vp9 = vp9_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c_vp9 = vp9_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c_vp9 = vp9_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c_vp9 = vp9_sad8x8_c;
-const sad_m_by_n_fn_t sad_8x4_c_vp9 = vp9_sad8x4_c;
-const sad_m_by_n_fn_t sad_4x8_c_vp9 = vp9_sad4x8_c;
-const sad_m_by_n_fn_t sad_4x4_c_vp9 = vp9_sad4x4_c;
-#endif
-const sad_m_by_n_test_param_t c_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_c),
- make_tuple(8, 16, sad_8x16_c),
- make_tuple(16, 8, sad_16x8_c),
- make_tuple(8, 8, sad_8x8_c),
- make_tuple(4, 4, sad_4x4_c),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(64, 64, sad_64x64_c_vp9),
- make_tuple(32, 32, sad_32x32_c_vp9),
- make_tuple(16, 16, sad_16x16_c_vp9),
- make_tuple(8, 16, sad_8x16_c_vp9),
- make_tuple(16, 8, sad_16x8_c_vp9),
- make_tuple(8, 8, sad_8x8_c_vp9),
- make_tuple(8, 4, sad_8x4_c_vp9),
- make_tuple(4, 8, sad_4x8_c_vp9),
- make_tuple(4, 4, sad_4x4_c_vp9),
-#endif
+const SadMxNFunc sad64x64_c = vpx_sad64x64_c;
+const SadMxNFunc sad64x32_c = vpx_sad64x32_c;
+const SadMxNFunc sad32x64_c = vpx_sad32x64_c;
+const SadMxNFunc sad32x32_c = vpx_sad32x32_c;
+const SadMxNFunc sad32x16_c = vpx_sad32x16_c;
+const SadMxNFunc sad16x32_c = vpx_sad16x32_c;
+const SadMxNFunc sad16x16_c = vpx_sad16x16_c;
+const SadMxNFunc sad16x8_c = vpx_sad16x8_c;
+const SadMxNFunc sad8x16_c = vpx_sad8x16_c;
+const SadMxNFunc sad8x8_c = vpx_sad8x8_c;
+const SadMxNFunc sad8x4_c = vpx_sad8x4_c;
+const SadMxNFunc sad4x8_c = vpx_sad4x8_c;
+const SadMxNFunc sad4x4_c = vpx_sad4x4_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_c = vpx_highbd_sad64x64_c;
+const SadMxNFunc highbd_sad64x32_c = vpx_highbd_sad64x32_c;
+const SadMxNFunc highbd_sad32x64_c = vpx_highbd_sad32x64_c;
+const SadMxNFunc highbd_sad32x32_c = vpx_highbd_sad32x32_c;
+const SadMxNFunc highbd_sad32x16_c = vpx_highbd_sad32x16_c;
+const SadMxNFunc highbd_sad16x32_c = vpx_highbd_sad16x32_c;
+const SadMxNFunc highbd_sad16x16_c = vpx_highbd_sad16x16_c;
+const SadMxNFunc highbd_sad16x8_c = vpx_highbd_sad16x8_c;
+const SadMxNFunc highbd_sad8x16_c = vpx_highbd_sad8x16_c;
+const SadMxNFunc highbd_sad8x8_c = vpx_highbd_sad8x8_c;
+const SadMxNFunc highbd_sad8x4_c = vpx_highbd_sad8x4_c;
+const SadMxNFunc highbd_sad4x8_c = vpx_highbd_sad4x8_c;
+const SadMxNFunc highbd_sad4x4_c = vpx_highbd_sad4x4_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam c_tests[] = {
+ make_tuple(64, 64, sad64x64_c, -1),
+ make_tuple(64, 32, sad64x32_c, -1),
+ make_tuple(32, 64, sad32x64_c, -1),
+ make_tuple(32, 32, sad32x32_c, -1),
+ make_tuple(32, 16, sad32x16_c, -1),
+ make_tuple(16, 32, sad16x32_c, -1),
+ make_tuple(16, 16, sad16x16_c, -1),
+ make_tuple(16, 8, sad16x8_c, -1),
+ make_tuple(8, 16, sad8x16_c, -1),
+ make_tuple(8, 8, sad8x8_c, -1),
+ make_tuple(8, 4, sad8x4_c, -1),
+ make_tuple(4, 8, sad4x8_c, -1),
+ make_tuple(4, 4, sad4x4_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
-#if CONFIG_VP9_ENCODER
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_c = vp9_sad64x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_c = vp9_sad64x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_c = vp9_sad32x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_c = vp9_sad32x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_c = vp9_sad32x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_c = vp9_sad16x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_c = vp9_sad16x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_c = vp9_sad16x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_c = vp9_sad8x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_c = vp9_sad8x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_c = vp9_sad8x4x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_c = vp9_sad4x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_c = vp9_sad4x4x4d_c;
-INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_c),
- make_tuple(64, 32, sad_64x32x4d_c),
- make_tuple(32, 64, sad_32x64x4d_c),
- make_tuple(32, 32, sad_32x32x4d_c),
- make_tuple(32, 16, sad_32x16x4d_c),
- make_tuple(16, 32, sad_16x32x4d_c),
- make_tuple(16, 16, sad_16x16x4d_c),
- make_tuple(16, 8, sad_16x8x4d_c),
- make_tuple(8, 16, sad_8x16x4d_c),
- make_tuple(8, 8, sad_8x8x4d_c),
- make_tuple(8, 4, sad_8x4x4d_c),
- make_tuple(4, 8, sad_4x8x4d_c),
- make_tuple(4, 4, sad_4x4x4d_c)));
-#endif // CONFIG_VP9_ENCODER
+const SadMxNAvgFunc sad64x64_avg_c = vpx_sad64x64_avg_c;
+const SadMxNAvgFunc sad64x32_avg_c = vpx_sad64x32_avg_c;
+const SadMxNAvgFunc sad32x64_avg_c = vpx_sad32x64_avg_c;
+const SadMxNAvgFunc sad32x32_avg_c = vpx_sad32x32_avg_c;
+const SadMxNAvgFunc sad32x16_avg_c = vpx_sad32x16_avg_c;
+const SadMxNAvgFunc sad16x32_avg_c = vpx_sad16x32_avg_c;
+const SadMxNAvgFunc sad16x16_avg_c = vpx_sad16x16_avg_c;
+const SadMxNAvgFunc sad16x8_avg_c = vpx_sad16x8_avg_c;
+const SadMxNAvgFunc sad8x16_avg_c = vpx_sad8x16_avg_c;
+const SadMxNAvgFunc sad8x8_avg_c = vpx_sad8x8_avg_c;
+const SadMxNAvgFunc sad8x4_avg_c = vpx_sad8x4_avg_c;
+const SadMxNAvgFunc sad4x8_avg_c = vpx_sad4x8_avg_c;
+const SadMxNAvgFunc sad4x4_avg_c = vpx_sad4x4_avg_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_c = vpx_highbd_sad64x64_avg_c;
+const SadMxNAvgFunc highbd_sad64x32_avg_c = vpx_highbd_sad64x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x64_avg_c = vpx_highbd_sad32x64_avg_c;
+const SadMxNAvgFunc highbd_sad32x32_avg_c = vpx_highbd_sad32x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x16_avg_c = vpx_highbd_sad32x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x32_avg_c = vpx_highbd_sad16x32_avg_c;
+const SadMxNAvgFunc highbd_sad16x16_avg_c = vpx_highbd_sad16x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x8_avg_c = vpx_highbd_sad16x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x16_avg_c = vpx_highbd_sad8x16_avg_c;
+const SadMxNAvgFunc highbd_sad8x8_avg_c = vpx_highbd_sad8x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x4_avg_c = vpx_highbd_sad8x4_avg_c;
+const SadMxNAvgFunc highbd_sad4x8_avg_c = vpx_highbd_sad4x8_avg_c;
+const SadMxNAvgFunc highbd_sad4x4_avg_c = vpx_highbd_sad4x4_avg_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_c_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_c, -1),
+ make_tuple(64, 32, sad64x32_avg_c, -1),
+ make_tuple(32, 64, sad32x64_avg_c, -1),
+ make_tuple(32, 32, sad32x32_avg_c, -1),
+ make_tuple(32, 16, sad32x16_avg_c, -1),
+ make_tuple(16, 32, sad16x32_avg_c, -1),
+ make_tuple(16, 16, sad16x16_avg_c, -1),
+ make_tuple(16, 8, sad16x8_avg_c, -1),
+ make_tuple(8, 16, sad8x16_avg_c, -1),
+ make_tuple(8, 8, sad8x8_avg_c, -1),
+ make_tuple(8, 4, sad8x4_avg_c, -1),
+ make_tuple(4, 8, sad4x8_avg_c, -1),
+ make_tuple(4, 4, sad4x4_avg_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
+
+const SadMxNx4Func sad64x64x4d_c = vpx_sad64x64x4d_c;
+const SadMxNx4Func sad64x32x4d_c = vpx_sad64x32x4d_c;
+const SadMxNx4Func sad32x64x4d_c = vpx_sad32x64x4d_c;
+const SadMxNx4Func sad32x32x4d_c = vpx_sad32x32x4d_c;
+const SadMxNx4Func sad32x16x4d_c = vpx_sad32x16x4d_c;
+const SadMxNx4Func sad16x32x4d_c = vpx_sad16x32x4d_c;
+const SadMxNx4Func sad16x16x4d_c = vpx_sad16x16x4d_c;
+const SadMxNx4Func sad16x8x4d_c = vpx_sad16x8x4d_c;
+const SadMxNx4Func sad8x16x4d_c = vpx_sad8x16x4d_c;
+const SadMxNx4Func sad8x8x4d_c = vpx_sad8x8x4d_c;
+const SadMxNx4Func sad8x4x4d_c = vpx_sad8x4x4d_c;
+const SadMxNx4Func sad4x8x4d_c = vpx_sad4x8x4d_c;
+const SadMxNx4Func sad4x4x4d_c = vpx_sad4x4x4d_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_c = vpx_highbd_sad64x64x4d_c;
+const SadMxNx4Func highbd_sad64x32x4d_c = vpx_highbd_sad64x32x4d_c;
+const SadMxNx4Func highbd_sad32x64x4d_c = vpx_highbd_sad32x64x4d_c;
+const SadMxNx4Func highbd_sad32x32x4d_c = vpx_highbd_sad32x32x4d_c;
+const SadMxNx4Func highbd_sad32x16x4d_c = vpx_highbd_sad32x16x4d_c;
+const SadMxNx4Func highbd_sad16x32x4d_c = vpx_highbd_sad16x32x4d_c;
+const SadMxNx4Func highbd_sad16x16x4d_c = vpx_highbd_sad16x16x4d_c;
+const SadMxNx4Func highbd_sad16x8x4d_c = vpx_highbd_sad16x8x4d_c;
+const SadMxNx4Func highbd_sad8x16x4d_c = vpx_highbd_sad8x16x4d_c;
+const SadMxNx4Func highbd_sad8x8x4d_c = vpx_highbd_sad8x8x4d_c;
+const SadMxNx4Func highbd_sad8x4x4d_c = vpx_highbd_sad8x4x4d_c;
+const SadMxNx4Func highbd_sad4x8x4d_c = vpx_highbd_sad4x8x4d_c;
+const SadMxNx4Func highbd_sad4x4x4d_c = vpx_highbd_sad4x4x4d_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_c_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_c, -1),
+ make_tuple(64, 32, sad64x32x4d_c, -1),
+ make_tuple(32, 64, sad32x64x4d_c, -1),
+ make_tuple(32, 32, sad32x32x4d_c, -1),
+ make_tuple(32, 16, sad32x16x4d_c, -1),
+ make_tuple(16, 32, sad16x32x4d_c, -1),
+ make_tuple(16, 16, sad16x16x4d_c, -1),
+ make_tuple(16, 8, sad16x8x4d_c, -1),
+ make_tuple(8, 16, sad8x16x4d_c, -1),
+ make_tuple(8, 8, sad8x8x4d_c, -1),
+ make_tuple(8, 4, sad8x4x4d_c, -1),
+ make_tuple(4, 8, sad4x8x4d_c, -1),
+ make_tuple(4, 4, sad4x4x4d_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
//------------------------------------------------------------------------------
// ARM functions
#if HAVE_MEDIA
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_armv6 = vp8_sad16x16_armv6;
-INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_armv6)));
-#endif
-#endif
+const SadMxNFunc sad16x16_media = vpx_sad16x16_media;
+const SadMxNParam media_tests[] = {
+ make_tuple(16, 16, sad16x16_media, -1),
+};
+INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::ValuesIn(media_tests));
+#endif // HAVE_MEDIA
#if HAVE_NEON
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_neon = vp8_sad16x16_neon;
-const sad_m_by_n_fn_t sad_8x16_neon = vp8_sad8x16_neon;
-const sad_m_by_n_fn_t sad_16x8_neon = vp8_sad16x8_neon;
-const sad_m_by_n_fn_t sad_8x8_neon = vp8_sad8x8_neon;
-const sad_m_by_n_fn_t sad_4x4_neon = vp8_sad4x4_neon;
-INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_neon),
- make_tuple(8, 16, sad_8x16_neon),
- make_tuple(16, 8, sad_16x8_neon),
- make_tuple(8, 8, sad_8x8_neon),
- make_tuple(4, 4, sad_4x4_neon)));
-#endif
-#endif
+const SadMxNFunc sad64x64_neon = vpx_sad64x64_neon;
+const SadMxNFunc sad32x32_neon = vpx_sad32x32_neon;
+const SadMxNFunc sad16x16_neon = vpx_sad16x16_neon;
+const SadMxNFunc sad16x8_neon = vpx_sad16x8_neon;
+const SadMxNFunc sad8x16_neon = vpx_sad8x16_neon;
+const SadMxNFunc sad8x8_neon = vpx_sad8x8_neon;
+const SadMxNFunc sad4x4_neon = vpx_sad4x4_neon;
+
+const SadMxNParam neon_tests[] = {
+ make_tuple(64, 64, sad64x64_neon, -1),
+ make_tuple(32, 32, sad32x32_neon, -1),
+ make_tuple(16, 16, sad16x16_neon, -1),
+ make_tuple(16, 8, sad16x8_neon, -1),
+ make_tuple(8, 16, sad8x16_neon, -1),
+ make_tuple(8, 8, sad8x8_neon, -1),
+ make_tuple(4, 4, sad4x4_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
+
+const SadMxNx4Func sad64x64x4d_neon = vpx_sad64x64x4d_neon;
+const SadMxNx4Func sad32x32x4d_neon = vpx_sad32x32x4d_neon;
+const SadMxNx4Func sad16x16x4d_neon = vpx_sad16x16x4d_neon;
+const SadMxNx4Param x4d_neon_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_neon, -1),
+ make_tuple(32, 32, sad32x32x4d_neon, -1),
+ make_tuple(16, 16, sad16x16x4d_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests));
+#endif // HAVE_NEON
//------------------------------------------------------------------------------
// x86 functions
#if HAVE_MMX
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx = vp8_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx = vp8_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx = vp8_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx = vp8_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx = vp8_sad4x4_mmx;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx_vp9 = vp9_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx_vp9 = vp9_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx_vp9 = vp9_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx_vp9 = vp9_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx_vp9 = vp9_sad4x4_mmx;
-#endif
-
-const sad_m_by_n_test_param_t mmx_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_mmx),
- make_tuple(8, 16, sad_8x16_mmx),
- make_tuple(16, 8, sad_16x8_mmx),
- make_tuple(8, 8, sad_8x8_mmx),
- make_tuple(4, 4, sad_4x4_mmx),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(16, 16, sad_16x16_mmx_vp9),
- make_tuple(8, 16, sad_8x16_mmx_vp9),
- make_tuple(16, 8, sad_16x8_mmx_vp9),
- make_tuple(8, 8, sad_8x8_mmx_vp9),
- make_tuple(4, 4, sad_4x4_mmx_vp9),
-#endif
+const SadMxNFunc sad16x16_mmx = vpx_sad16x16_mmx;
+const SadMxNFunc sad16x8_mmx = vpx_sad16x8_mmx;
+const SadMxNFunc sad8x16_mmx = vpx_sad8x16_mmx;
+const SadMxNFunc sad8x8_mmx = vpx_sad8x8_mmx;
+const SadMxNFunc sad4x4_mmx = vpx_sad4x4_mmx;
+const SadMxNParam mmx_tests[] = {
+ make_tuple(16, 16, sad16x16_mmx, -1),
+ make_tuple(16, 8, sad16x8_mmx, -1),
+ make_tuple(8, 16, sad8x16_mmx, -1),
+ make_tuple(8, 8, sad8x8_mmx, -1),
+ make_tuple(4, 4, sad4x4_mmx, -1),
};
INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests));
-#endif
+#endif // HAVE_MMX
#if HAVE_SSE
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_4x4_sse_vp9 = vp9_sad4x4_sse;
-const sad_m_by_n_fn_t sad_4x8_sse_vp9 = vp9_sad4x8_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::Values(
- make_tuple(4, 4, sad_4x4_sse_vp9),
- make_tuple(4, 8, sad_4x8_sse_vp9)));
+const SadMxNFunc sad4x8_sse = vpx_sad4x8_sse;
+const SadMxNFunc sad4x4_sse = vpx_sad4x4_sse;
+const SadMxNParam sse_tests[] = {
+ make_tuple(4, 8, sad4x8_sse, -1),
+ make_tuple(4, 4, sad4x4_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::ValuesIn(sse_tests));
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_sse = vp9_sad4x8x4d_sse;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse = vp9_sad4x4x4d_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::Values(
- make_tuple(4, 8, sad_4x8x4d_sse),
- make_tuple(4, 4, sad_4x4x4d_sse)));
+const SadMxNAvgFunc sad4x8_avg_sse = vpx_sad4x8_avg_sse;
+const SadMxNAvgFunc sad4x4_avg_sse = vpx_sad4x4_avg_sse;
+const SadMxNAvgParam avg_sse_tests[] = {
+ make_tuple(4, 8, sad4x8_avg_sse, -1),
+ make_tuple(4, 4, sad4x4_avg_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADavgTest, ::testing::ValuesIn(avg_sse_tests));
+
+const SadMxNx4Func sad4x8x4d_sse = vpx_sad4x8x4d_sse;
+const SadMxNx4Func sad4x4x4d_sse = vpx_sad4x4x4d_sse;
+const SadMxNx4Param x4d_sse_tests[] = {
+ make_tuple(4, 8, sad4x8x4d_sse, -1),
+ make_tuple(4, 4, sad4x4x4d_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::ValuesIn(x4d_sse_tests));
#endif // CONFIG_USE_X86INC
-#endif // CONFIG_VP9_ENCODER
#endif // HAVE_SSE
#if HAVE_SSE2
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_wmt = vp8_sad16x16_wmt;
-const sad_m_by_n_fn_t sad_8x16_wmt = vp8_sad8x16_wmt;
-const sad_m_by_n_fn_t sad_16x8_wmt = vp8_sad16x8_wmt;
-const sad_m_by_n_fn_t sad_8x8_wmt = vp8_sad8x8_wmt;
-const sad_m_by_n_fn_t sad_4x4_wmt = vp8_sad4x4_wmt;
-#endif
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_64x64_sse2_vp9 = vp9_sad64x64_sse2;
-const sad_m_by_n_fn_t sad_64x32_sse2_vp9 = vp9_sad64x32_sse2;
-const sad_m_by_n_fn_t sad_32x64_sse2_vp9 = vp9_sad32x64_sse2;
-const sad_m_by_n_fn_t sad_32x32_sse2_vp9 = vp9_sad32x32_sse2;
-const sad_m_by_n_fn_t sad_32x16_sse2_vp9 = vp9_sad32x16_sse2;
-const sad_m_by_n_fn_t sad_16x32_sse2_vp9 = vp9_sad16x32_sse2;
-const sad_m_by_n_fn_t sad_16x16_sse2_vp9 = vp9_sad16x16_sse2;
-const sad_m_by_n_fn_t sad_16x8_sse2_vp9 = vp9_sad16x8_sse2;
-const sad_m_by_n_fn_t sad_8x16_sse2_vp9 = vp9_sad8x16_sse2;
-const sad_m_by_n_fn_t sad_8x8_sse2_vp9 = vp9_sad8x8_sse2;
-const sad_m_by_n_fn_t sad_8x4_sse2_vp9 = vp9_sad8x4_sse2;
-#endif
-#endif
-const sad_m_by_n_test_param_t sse2_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_wmt),
- make_tuple(8, 16, sad_8x16_wmt),
- make_tuple(16, 8, sad_16x8_wmt),
- make_tuple(8, 8, sad_8x8_wmt),
- make_tuple(4, 4, sad_4x4_wmt),
-#endif
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
- make_tuple(64, 64, sad_64x64_sse2_vp9),
- make_tuple(64, 32, sad_64x32_sse2_vp9),
- make_tuple(32, 64, sad_32x64_sse2_vp9),
- make_tuple(32, 32, sad_32x32_sse2_vp9),
- make_tuple(32, 16, sad_32x16_sse2_vp9),
- make_tuple(16, 32, sad_16x32_sse2_vp9),
- make_tuple(16, 16, sad_16x16_sse2_vp9),
- make_tuple(16, 8, sad_16x8_sse2_vp9),
- make_tuple(8, 16, sad_8x16_sse2_vp9),
- make_tuple(8, 8, sad_8x8_sse2_vp9),
- make_tuple(8, 4, sad_8x4_sse2_vp9),
-#endif
-#endif
+const SadMxNFunc sad64x64_sse2 = vpx_sad64x64_sse2;
+const SadMxNFunc sad64x32_sse2 = vpx_sad64x32_sse2;
+const SadMxNFunc sad32x64_sse2 = vpx_sad32x64_sse2;
+const SadMxNFunc sad32x32_sse2 = vpx_sad32x32_sse2;
+const SadMxNFunc sad32x16_sse2 = vpx_sad32x16_sse2;
+const SadMxNFunc sad16x32_sse2 = vpx_sad16x32_sse2;
+const SadMxNFunc sad16x16_sse2 = vpx_sad16x16_sse2;
+const SadMxNFunc sad16x8_sse2 = vpx_sad16x8_sse2;
+const SadMxNFunc sad8x16_sse2 = vpx_sad8x16_sse2;
+const SadMxNFunc sad8x8_sse2 = vpx_sad8x8_sse2;
+const SadMxNFunc sad8x4_sse2 = vpx_sad8x4_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_sse2 = vpx_highbd_sad64x64_sse2;
+const SadMxNFunc highbd_sad64x32_sse2 = vpx_highbd_sad64x32_sse2;
+const SadMxNFunc highbd_sad32x64_sse2 = vpx_highbd_sad32x64_sse2;
+const SadMxNFunc highbd_sad32x32_sse2 = vpx_highbd_sad32x32_sse2;
+const SadMxNFunc highbd_sad32x16_sse2 = vpx_highbd_sad32x16_sse2;
+const SadMxNFunc highbd_sad16x32_sse2 = vpx_highbd_sad16x32_sse2;
+const SadMxNFunc highbd_sad16x16_sse2 = vpx_highbd_sad16x16_sse2;
+const SadMxNFunc highbd_sad16x8_sse2 = vpx_highbd_sad16x8_sse2;
+const SadMxNFunc highbd_sad8x16_sse2 = vpx_highbd_sad8x16_sse2;
+const SadMxNFunc highbd_sad8x8_sse2 = vpx_highbd_sad8x8_sse2;
+const SadMxNFunc highbd_sad8x4_sse2 = vpx_highbd_sad8x4_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_sse2, -1),
+ make_tuple(64, 32, sad64x32_sse2, -1),
+ make_tuple(32, 64, sad32x64_sse2, -1),
+ make_tuple(32, 32, sad32x32_sse2, -1),
+ make_tuple(32, 16, sad32x16_sse2, -1),
+ make_tuple(16, 32, sad16x32_sse2, -1),
+ make_tuple(16, 16, sad16x16_sse2, -1),
+ make_tuple(16, 8, sad16x8_sse2, -1),
+ make_tuple(8, 16, sad8x16_sse2, -1),
+ make_tuple(8, 8, sad8x8_sse2, -1),
+ make_tuple(8, 4, sad8x4_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_sse2 = vp9_sad64x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_sse2 = vp9_sad64x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_sse2 = vp9_sad32x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_sse2 = vp9_sad32x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_sse2 = vp9_sad32x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_sse2 = vp9_sad16x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse2 = vp9_sad16x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse2 = vp9_sad16x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse2 = vp9_sad8x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse2 = vp9_sad8x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_sse2 = vp9_sad8x4x4d_sse2;
-INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_sse2),
- make_tuple(64, 32, sad_64x32x4d_sse2),
- make_tuple(32, 64, sad_32x64x4d_sse2),
- make_tuple(32, 32, sad_32x32x4d_sse2),
- make_tuple(32, 16, sad_32x16x4d_sse2),
- make_tuple(16, 32, sad_16x32x4d_sse2),
- make_tuple(16, 16, sad_16x16x4d_sse2),
- make_tuple(16, 8, sad_16x8x4d_sse2),
- make_tuple(8, 16, sad_8x16x4d_sse2),
- make_tuple(8, 8, sad_8x8x4d_sse2),
- make_tuple(8, 4, sad_8x4x4d_sse2)));
-#endif
-#endif
-#endif
+const SadMxNAvgFunc sad64x64_avg_sse2 = vpx_sad64x64_avg_sse2;
+const SadMxNAvgFunc sad64x32_avg_sse2 = vpx_sad64x32_avg_sse2;
+const SadMxNAvgFunc sad32x64_avg_sse2 = vpx_sad32x64_avg_sse2;
+const SadMxNAvgFunc sad32x32_avg_sse2 = vpx_sad32x32_avg_sse2;
+const SadMxNAvgFunc sad32x16_avg_sse2 = vpx_sad32x16_avg_sse2;
+const SadMxNAvgFunc sad16x32_avg_sse2 = vpx_sad16x32_avg_sse2;
+const SadMxNAvgFunc sad16x16_avg_sse2 = vpx_sad16x16_avg_sse2;
+const SadMxNAvgFunc sad16x8_avg_sse2 = vpx_sad16x8_avg_sse2;
+const SadMxNAvgFunc sad8x16_avg_sse2 = vpx_sad8x16_avg_sse2;
+const SadMxNAvgFunc sad8x8_avg_sse2 = vpx_sad8x8_avg_sse2;
+const SadMxNAvgFunc sad8x4_avg_sse2 = vpx_sad8x4_avg_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_sse2 = vpx_highbd_sad64x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad64x32_avg_sse2 = vpx_highbd_sad64x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x64_avg_sse2 = vpx_highbd_sad32x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x32_avg_sse2 = vpx_highbd_sad32x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x16_avg_sse2 = vpx_highbd_sad32x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x32_avg_sse2 = vpx_highbd_sad16x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x16_avg_sse2 = vpx_highbd_sad16x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x8_avg_sse2 = vpx_highbd_sad16x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x16_avg_sse2 = vpx_highbd_sad8x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x8_avg_sse2 = vpx_highbd_sad8x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x4_avg_sse2 = vpx_highbd_sad8x4_avg_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_sse2, -1),
+ make_tuple(64, 32, sad64x32_avg_sse2, -1),
+ make_tuple(32, 64, sad32x64_avg_sse2, -1),
+ make_tuple(32, 32, sad32x32_avg_sse2, -1),
+ make_tuple(32, 16, sad32x16_avg_sse2, -1),
+ make_tuple(16, 32, sad16x32_avg_sse2, -1),
+ make_tuple(16, 16, sad16x16_avg_sse2, -1),
+ make_tuple(16, 8, sad16x8_avg_sse2, -1),
+ make_tuple(8, 16, sad8x16_avg_sse2, -1),
+ make_tuple(8, 8, sad8x8_avg_sse2, -1),
+ make_tuple(8, 4, sad8x4_avg_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
+
+const SadMxNx4Func sad64x64x4d_sse2 = vpx_sad64x64x4d_sse2;
+const SadMxNx4Func sad64x32x4d_sse2 = vpx_sad64x32x4d_sse2;
+const SadMxNx4Func sad32x64x4d_sse2 = vpx_sad32x64x4d_sse2;
+const SadMxNx4Func sad32x32x4d_sse2 = vpx_sad32x32x4d_sse2;
+const SadMxNx4Func sad32x16x4d_sse2 = vpx_sad32x16x4d_sse2;
+const SadMxNx4Func sad16x32x4d_sse2 = vpx_sad16x32x4d_sse2;
+const SadMxNx4Func sad16x16x4d_sse2 = vpx_sad16x16x4d_sse2;
+const SadMxNx4Func sad16x8x4d_sse2 = vpx_sad16x8x4d_sse2;
+const SadMxNx4Func sad8x16x4d_sse2 = vpx_sad8x16x4d_sse2;
+const SadMxNx4Func sad8x8x4d_sse2 = vpx_sad8x8x4d_sse2;
+const SadMxNx4Func sad8x4x4d_sse2 = vpx_sad8x4x4d_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_sse2 = vpx_highbd_sad64x64x4d_sse2;
+const SadMxNx4Func highbd_sad64x32x4d_sse2 = vpx_highbd_sad64x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x64x4d_sse2 = vpx_highbd_sad32x64x4d_sse2;
+const SadMxNx4Func highbd_sad32x32x4d_sse2 = vpx_highbd_sad32x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x16x4d_sse2 = vpx_highbd_sad32x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x32x4d_sse2 = vpx_highbd_sad16x32x4d_sse2;
+const SadMxNx4Func highbd_sad16x16x4d_sse2 = vpx_highbd_sad16x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x8x4d_sse2 = vpx_highbd_sad16x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x16x4d_sse2 = vpx_highbd_sad8x16x4d_sse2;
+const SadMxNx4Func highbd_sad8x8x4d_sse2 = vpx_highbd_sad8x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x4x4d_sse2 = vpx_highbd_sad8x4x4d_sse2;
+const SadMxNx4Func highbd_sad4x8x4d_sse2 = vpx_highbd_sad4x8x4d_sse2;
+const SadMxNx4Func highbd_sad4x4x4d_sse2 = vpx_highbd_sad4x4x4d_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_sse2, -1),
+ make_tuple(64, 32, sad64x32x4d_sse2, -1),
+ make_tuple(32, 64, sad32x64x4d_sse2, -1),
+ make_tuple(32, 32, sad32x32x4d_sse2, -1),
+ make_tuple(32, 16, sad32x16x4d_sse2, -1),
+ make_tuple(16, 32, sad16x32x4d_sse2, -1),
+ make_tuple(16, 16, sad16x16x4d_sse2, -1),
+ make_tuple(16, 8, sad16x8x4d_sse2, -1),
+ make_tuple(8, 16, sad8x16x4d_sse2, -1),
+ make_tuple(8, 8, sad8x8x4d_sse2, -1),
+ make_tuple(8, 4, sad8x4x4d_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSE2
#if HAVE_SSE3
-#if CONFIG_VP8_ENCODER
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse3 = vp8_sad16x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse3 = vp8_sad16x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse3 = vp8_sad8x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse3 = vp8_sad8x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse3 = vp8_sad4x4x4d_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADx4Test, ::testing::Values(
- make_tuple(16, 16, sad_16x16x4d_sse3),
- make_tuple(16, 8, sad_16x8x4d_sse3),
- make_tuple(8, 16, sad_8x16x4d_sse3),
- make_tuple(8, 8, sad_8x8x4d_sse3),
- make_tuple(4, 4, sad_4x4x4d_sse3)));
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSE3
#if HAVE_SSSE3
-#if CONFIG_USE_X86INC
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_sse3 = vp8_sad16x16_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_sse3)));
-#endif
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSSE3
+
+#if HAVE_SSE4_1
+// Only functions are x8, which do not have tests.
+#endif // HAVE_SSE4_1
+
+#if HAVE_AVX2
+const SadMxNFunc sad64x64_avx2 = vpx_sad64x64_avx2;
+const SadMxNFunc sad64x32_avx2 = vpx_sad64x32_avx2;
+const SadMxNFunc sad32x64_avx2 = vpx_sad32x64_avx2;
+const SadMxNFunc sad32x32_avx2 = vpx_sad32x32_avx2;
+const SadMxNFunc sad32x16_avx2 = vpx_sad32x16_avx2;
+const SadMxNParam avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avx2, -1),
+ make_tuple(64, 32, sad64x32_avx2, -1),
+ make_tuple(32, 64, sad32x64_avx2, -1),
+ make_tuple(32, 32, sad32x32_avx2, -1),
+ make_tuple(32, 16, sad32x16_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADTest, ::testing::ValuesIn(avx2_tests));
+
+const SadMxNAvgFunc sad64x64_avg_avx2 = vpx_sad64x64_avg_avx2;
+const SadMxNAvgFunc sad64x32_avg_avx2 = vpx_sad64x32_avg_avx2;
+const SadMxNAvgFunc sad32x64_avg_avx2 = vpx_sad32x64_avg_avx2;
+const SadMxNAvgFunc sad32x32_avg_avx2 = vpx_sad32x32_avg_avx2;
+const SadMxNAvgFunc sad32x16_avg_avx2 = vpx_sad32x16_avg_avx2;
+const SadMxNAvgParam avg_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_avx2, -1),
+ make_tuple(64, 32, sad64x32_avg_avx2, -1),
+ make_tuple(32, 64, sad32x64_avg_avx2, -1),
+ make_tuple(32, 32, sad32x32_avg_avx2, -1),
+ make_tuple(32, 16, sad32x16_avg_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests));
+
+const SadMxNx4Func sad64x64x4d_avx2 = vpx_sad64x64x4d_avx2;
+const SadMxNx4Func sad32x32x4d_avx2 = vpx_sad32x32x4d_avx2;
+const SadMxNx4Param x4d_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_avx2, -1),
+ make_tuple(32, 32, sad32x32x4d_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests));
+#endif // HAVE_AVX2
+
+//------------------------------------------------------------------------------
+// MIPS functions
+#if HAVE_MSA
+const SadMxNFunc sad64x64_msa = vpx_sad64x64_msa;
+const SadMxNFunc sad64x32_msa = vpx_sad64x32_msa;
+const SadMxNFunc sad32x64_msa = vpx_sad32x64_msa;
+const SadMxNFunc sad32x32_msa = vpx_sad32x32_msa;
+const SadMxNFunc sad32x16_msa = vpx_sad32x16_msa;
+const SadMxNFunc sad16x32_msa = vpx_sad16x32_msa;
+const SadMxNFunc sad16x16_msa = vpx_sad16x16_msa;
+const SadMxNFunc sad16x8_msa = vpx_sad16x8_msa;
+const SadMxNFunc sad8x16_msa = vpx_sad8x16_msa;
+const SadMxNFunc sad8x8_msa = vpx_sad8x8_msa;
+const SadMxNFunc sad8x4_msa = vpx_sad8x4_msa;
+const SadMxNFunc sad4x8_msa = vpx_sad4x8_msa;
+const SadMxNFunc sad4x4_msa = vpx_sad4x4_msa;
+const SadMxNParam msa_tests[] = {
+ make_tuple(64, 64, sad64x64_msa, -1),
+ make_tuple(64, 32, sad64x32_msa, -1),
+ make_tuple(32, 64, sad32x64_msa, -1),
+ make_tuple(32, 32, sad32x32_msa, -1),
+ make_tuple(32, 16, sad32x16_msa, -1),
+ make_tuple(16, 32, sad16x32_msa, -1),
+ make_tuple(16, 16, sad16x16_msa, -1),
+ make_tuple(16, 8, sad16x8_msa, -1),
+ make_tuple(8, 16, sad8x16_msa, -1),
+ make_tuple(8, 8, sad8x8_msa, -1),
+ make_tuple(8, 4, sad8x4_msa, -1),
+ make_tuple(4, 8, sad4x8_msa, -1),
+ make_tuple(4, 4, sad4x4_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests));
+
+const SadMxNAvgFunc sad64x64_avg_msa = vpx_sad64x64_avg_msa;
+const SadMxNAvgFunc sad64x32_avg_msa = vpx_sad64x32_avg_msa;
+const SadMxNAvgFunc sad32x64_avg_msa = vpx_sad32x64_avg_msa;
+const SadMxNAvgFunc sad32x32_avg_msa = vpx_sad32x32_avg_msa;
+const SadMxNAvgFunc sad32x16_avg_msa = vpx_sad32x16_avg_msa;
+const SadMxNAvgFunc sad16x32_avg_msa = vpx_sad16x32_avg_msa;
+const SadMxNAvgFunc sad16x16_avg_msa = vpx_sad16x16_avg_msa;
+const SadMxNAvgFunc sad16x8_avg_msa = vpx_sad16x8_avg_msa;
+const SadMxNAvgFunc sad8x16_avg_msa = vpx_sad8x16_avg_msa;
+const SadMxNAvgFunc sad8x8_avg_msa = vpx_sad8x8_avg_msa;
+const SadMxNAvgFunc sad8x4_avg_msa = vpx_sad8x4_avg_msa;
+const SadMxNAvgFunc sad4x8_avg_msa = vpx_sad4x8_avg_msa;
+const SadMxNAvgFunc sad4x4_avg_msa = vpx_sad4x4_avg_msa;
+const SadMxNAvgParam avg_msa_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_msa, -1),
+ make_tuple(64, 32, sad64x32_avg_msa, -1),
+ make_tuple(32, 64, sad32x64_avg_msa, -1),
+ make_tuple(32, 32, sad32x32_avg_msa, -1),
+ make_tuple(32, 16, sad32x16_avg_msa, -1),
+ make_tuple(16, 32, sad16x32_avg_msa, -1),
+ make_tuple(16, 16, sad16x16_avg_msa, -1),
+ make_tuple(16, 8, sad16x8_avg_msa, -1),
+ make_tuple(8, 16, sad8x16_avg_msa, -1),
+ make_tuple(8, 8, sad8x8_avg_msa, -1),
+ make_tuple(8, 4, sad8x4_avg_msa, -1),
+ make_tuple(4, 8, sad4x8_avg_msa, -1),
+ make_tuple(4, 4, sad4x4_avg_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests));
+
+const SadMxNx4Func sad64x64x4d_msa = vpx_sad64x64x4d_msa;
+const SadMxNx4Func sad64x32x4d_msa = vpx_sad64x32x4d_msa;
+const SadMxNx4Func sad32x64x4d_msa = vpx_sad32x64x4d_msa;
+const SadMxNx4Func sad32x32x4d_msa = vpx_sad32x32x4d_msa;
+const SadMxNx4Func sad32x16x4d_msa = vpx_sad32x16x4d_msa;
+const SadMxNx4Func sad16x32x4d_msa = vpx_sad16x32x4d_msa;
+const SadMxNx4Func sad16x16x4d_msa = vpx_sad16x16x4d_msa;
+const SadMxNx4Func sad16x8x4d_msa = vpx_sad16x8x4d_msa;
+const SadMxNx4Func sad8x16x4d_msa = vpx_sad8x16x4d_msa;
+const SadMxNx4Func sad8x8x4d_msa = vpx_sad8x8x4d_msa;
+const SadMxNx4Func sad8x4x4d_msa = vpx_sad8x4x4d_msa;
+const SadMxNx4Func sad4x8x4d_msa = vpx_sad4x8x4d_msa;
+const SadMxNx4Func sad4x4x4d_msa = vpx_sad4x4x4d_msa;
+const SadMxNx4Param x4d_msa_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_msa, -1),
+ make_tuple(64, 32, sad64x32x4d_msa, -1),
+ make_tuple(32, 64, sad32x64x4d_msa, -1),
+ make_tuple(32, 32, sad32x32x4d_msa, -1),
+ make_tuple(32, 16, sad32x16x4d_msa, -1),
+ make_tuple(16, 32, sad16x32x4d_msa, -1),
+ make_tuple(16, 16, sad16x16x4d_msa, -1),
+ make_tuple(16, 8, sad16x8x4d_msa, -1),
+ make_tuple(8, 16, sad8x16x4d_msa, -1),
+ make_tuple(8, 8, sad8x8x4d_msa, -1),
+ make_tuple(8, 4, sad8x4x4d_msa, -1),
+ make_tuple(4, 8, sad4x8x4d_msa, -1),
+ make_tuple(4, 4, sad4x4x4d_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
| null |
if (bd_ == -1) {
use_high_bit_depth_ = false;
bit_depth_ = VPX_BITS_8;
source_data_ = source_data8_;
reference_data_ = reference_data8_;
second_pred_ = second_pred8_;
#if CONFIG_VP9_HIGHBITDEPTH
} else {
use_high_bit_depth_ = true;
bit_depth_ = static_cast<vpx_bit_depth_t>(bd_);
source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
mask_ = (1 << bit_depth_) - 1;
|
150,878 |
static void SetUpTestCase() {
source_data_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize));
reference_data_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static void SetUpTestCase() {
source_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize));
reference_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
second_pred8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, 64*64));
source_data16_ = reinterpret_cast<uint16_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
reference_data16_ = reinterpret_cast<uint16_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
second_pred16_ = reinterpret_cast<uint16_t*>(
vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t)));
}
|
@@ -13,56 +13,74 @@
#include <limits.h>
#include <stdio.h>
-#include "./vpx_config.h"
-#if CONFIG_VP8_ENCODER
-#include "./vp8_rtcd.h"
-#endif
-#if CONFIG_VP9_ENCODER
-#include "./vp9_rtcd.h"
-#endif
-#include "vpx_mem/vpx_mem.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_codec.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride);
+typedef std::tr1::tuple<int, int, SadMxNFunc, int> SadMxNParam;
-typedef unsigned int (*sad_m_by_n_fn_t)(const unsigned char *source_ptr,
- int source_stride,
- const unsigned char *reference_ptr,
- int reference_stride,
- unsigned int max_sad);
-typedef std::tr1::tuple<int, int, sad_m_by_n_fn_t> sad_m_by_n_test_param_t;
+typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ const uint8_t *second_pred);
+typedef std::tr1::tuple<int, int, SadMxNAvgFunc, int> SadMxNAvgParam;
-typedef void (*sad_n_by_n_by_4_fn_t)(const uint8_t *src_ptr,
- int src_stride,
- const unsigned char * const ref_ptr[],
- int ref_stride,
- unsigned int *sad_array);
-typedef std::tr1::tuple<int, int, sad_n_by_n_by_4_fn_t>
- sad_n_by_n_by_4_test_param_t;
+typedef void (*SadMxNx4Func)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[],
+ int ref_stride,
+ uint32_t *sad_array);
+typedef std::tr1::tuple<int, int, SadMxNx4Func, int> SadMxNx4Param;
using libvpx_test::ACMRandom;
namespace {
class SADTestBase : public ::testing::Test {
public:
- SADTestBase(int width, int height) : width_(width), height_(height) {}
+ SADTestBase(int width, int height, int bit_depth) :
+ width_(width), height_(height), bd_(bit_depth) {}
static void SetUpTestCase() {
- source_data_ = reinterpret_cast<uint8_t*>(
+ source_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize));
- reference_data_ = reinterpret_cast<uint8_t*>(
+ reference_data8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
+ second_pred8_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, 64*64));
+ source_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
+ reference_data16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
+ second_pred16_ = reinterpret_cast<uint16_t*>(
+ vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t)));
}
static void TearDownTestCase() {
- vpx_free(source_data_);
- source_data_ = NULL;
- vpx_free(reference_data_);
- reference_data_ = NULL;
+ vpx_free(source_data8_);
+ source_data8_ = NULL;
+ vpx_free(reference_data8_);
+ reference_data8_ = NULL;
+ vpx_free(second_pred8_);
+ second_pred8_ = NULL;
+ vpx_free(source_data16_);
+ source_data16_ = NULL;
+ vpx_free(reference_data16_);
+ reference_data16_ = NULL;
+ vpx_free(second_pred16_);
+ second_pred16_ = NULL;
}
virtual void TearDown() {
@@ -76,142 +94,335 @@
static const int kDataBufferSize = 4 * kDataBlockSize;
virtual void SetUp() {
+ if (bd_ == -1) {
+ use_high_bit_depth_ = false;
+ bit_depth_ = VPX_BITS_8;
+ source_data_ = source_data8_;
+ reference_data_ = reference_data8_;
+ second_pred_ = second_pred8_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ use_high_bit_depth_ = true;
+ bit_depth_ = static_cast<vpx_bit_depth_t>(bd_);
+ source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
+ reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
+ second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ mask_ = (1 << bit_depth_) - 1;
source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
- virtual uint8_t* GetReference(int block_idx) {
+ virtual uint8_t *GetReference(int block_idx) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_high_bit_depth_)
+ return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
+ block_idx * kDataBlockSize);
+#endif // CONFIG_VP9_HIGHBITDEPTH
return reference_data_ + block_idx * kDataBlockSize;
}
// Sum of Absolute Differences. Given two blocks, calculate the absolute
// difference between two pixels in the same relative location; accumulate.
- unsigned int ReferenceSAD(unsigned int max_sad, int block_idx = 0) {
+ unsigned int ReferenceSAD(int block_idx) {
unsigned int sad = 0;
- const uint8_t* const reference = GetReference(block_idx);
-
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- sad += abs(source_data_[h * source_stride_ + w]
- - reference[h * reference_stride_ + w]);
- }
- if (sad > max_sad) {
- break;
+ if (!use_high_bit_depth_) {
+ sad += abs(source8[h * source_stride_ + w] -
+ reference8[h * reference_stride_ + w]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ sad += abs(source16[h * source_stride_ + w] -
+ reference16[h * reference_stride_ + w]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
return sad;
}
- void FillConstant(uint8_t *data, int stride, uint8_t fill_constant) {
+ // Sum of Absolute Differences Average. Given two blocks, and a prediction
+ // calculate the absolute difference between one pixel and average of the
+ // corresponding and predicted pixels; accumulate.
+ unsigned int ReferenceSADavg(int block_idx) {
+ unsigned int sad = 0;
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
+ const uint8_t *const second_pred8 = second_pred_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+ const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = fill_constant;
+ if (!use_high_bit_depth_) {
+ const int tmp = second_pred8[h * width_ + w] +
+ reference8[h * reference_stride_ + w];
+ const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source8[h * source_stride_ + w] - comp_pred);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ const int tmp = second_pred16[h * width_ + w] +
+ reference16[h * reference_stride_ + w];
+ const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
+ sad += abs(source16[h * source_stride_ + w] - comp_pred);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ return sad;
+ }
+
+ void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ for (int h = 0; h < height_; ++h) {
+ for (int w = 0; w < width_; ++w) {
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = static_cast<uint8_t>(fill_constant);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = fill_constant;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
void FillRandom(uint8_t *data, int stride) {
+ uint8_t *data8 = data;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
+#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
- data[h * stride + w] = rnd_.Rand8();
+ if (!use_high_bit_depth_) {
+ data8[h * stride + w] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ data16[h * stride + w] = rnd_.Rand16() & mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
}
- int width_, height_;
- static uint8_t* source_data_;
+ int width_, height_, mask_, bd_;
+ vpx_bit_depth_t bit_depth_;
+ static uint8_t *source_data_;
+ static uint8_t *reference_data_;
+ static uint8_t *second_pred_;
int source_stride_;
- static uint8_t* reference_data_;
+ bool use_high_bit_depth_;
+ static uint8_t *source_data8_;
+ static uint8_t *reference_data8_;
+ static uint8_t *second_pred8_;
+ static uint16_t *source_data16_;
+ static uint16_t *reference_data16_;
+ static uint16_t *second_pred16_;
int reference_stride_;
ACMRandom rnd_;
};
-class SADTest : public SADTestBase,
- public ::testing::WithParamInterface<sad_m_by_n_test_param_t> {
+class SADx4Test
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNx4Param> {
public:
- SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
-
- protected:
- unsigned int SAD(unsigned int max_sad, int block_idx = 0) {
- unsigned int ret;
- const uint8_t* const reference = GetReference(block_idx);
-
- REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
- reference, reference_stride_,
- max_sad));
- return ret;
- }
-
- void CheckSad(unsigned int max_sad) {
- unsigned int reference_sad, exp_sad;
-
- reference_sad = ReferenceSAD(max_sad);
- exp_sad = SAD(max_sad);
-
- if (reference_sad <= max_sad) {
- ASSERT_EQ(exp_sad, reference_sad);
- } else {
- // Alternative implementations are not required to check max_sad
- ASSERT_GE(exp_sad, reference_sad);
- }
- }
-};
-
-class SADx4Test : public SADTestBase,
- public ::testing::WithParamInterface<sad_n_by_n_by_4_test_param_t> {
- public:
- SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
+ SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
protected:
void SADs(unsigned int *results) {
- const uint8_t* refs[] = {GetReference(0), GetReference(1),
- GetReference(2), GetReference(3)};
+ const uint8_t *references[] = {GetReference(0), GetReference(1),
+ GetReference(2), GetReference(3)};
- REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
- refs, reference_stride_,
- results));
+ ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
+ references, reference_stride_,
+ results));
}
void CheckSADs() {
unsigned int reference_sad, exp_sad[4];
SADs(exp_sad);
- for (int block = 0; block < 4; block++) {
- reference_sad = ReferenceSAD(UINT_MAX, block);
+ for (int block = 0; block < 4; ++block) {
+ reference_sad = ReferenceSAD(block);
- EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
+ EXPECT_EQ(reference_sad, exp_sad[block]) << "block " << block;
}
}
};
-uint8_t* SADTestBase::source_data_ = NULL;
-uint8_t* SADTestBase::reference_data_ = NULL;
+class SADTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNParam> {
+ public:
+ SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSAD(0);
+ const unsigned int exp_sad = SAD(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+class SADavgTest
+ : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNAvgParam> {
+ public:
+ SADavgTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
+
+ protected:
+ unsigned int SAD_avg(int block_idx) {
+ unsigned int ret;
+ const uint8_t *const reference = GetReference(block_idx);
+
+ ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_,
+ second_pred_));
+ return ret;
+ }
+
+ void CheckSAD() {
+ const unsigned int reference_sad = ReferenceSADavg(0);
+ const unsigned int exp_sad = SAD_avg(0);
+
+ ASSERT_EQ(reference_sad, exp_sad);
+ }
+};
+
+uint8_t *SADTestBase::source_data_ = NULL;
+uint8_t *SADTestBase::reference_data_ = NULL;
+uint8_t *SADTestBase::second_pred_ = NULL;
+uint8_t *SADTestBase::source_data8_ = NULL;
+uint8_t *SADTestBase::reference_data8_ = NULL;
+uint8_t *SADTestBase::second_pred8_ = NULL;
+uint16_t *SADTestBase::source_data16_ = NULL;
+uint16_t *SADTestBase::reference_data16_ = NULL;
+uint16_t *SADTestBase::second_pred16_ = NULL;
TEST_P(SADTest, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(reference_data_, reference_stride_, 255);
- CheckSad(UINT_MAX);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ CheckSAD();
+}
+
+TEST_P(SADTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, MaxRef) {
+ FillConstant(source_data_, source_stride_, 0);
+ FillConstant(reference_data_, reference_stride_, mask_);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+TEST_P(SADavgTest, MaxSrc) {
+ FillConstant(source_data_, source_stride_, mask_);
+ FillConstant(reference_data_, reference_stride_, 0);
+ FillConstant(second_pred_, width_, 0);
+ CheckSAD();
+}
+
+TEST_P(SADavgTest, ShortRef) {
+ const int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ const int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ reference_stride_ = tmp_stride;
+}
+
+TEST_P(SADavgTest, ShortSrc) {
+ const int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(reference_data_, reference_stride_);
+ FillRandom(second_pred_, width_);
+ CheckSAD();
+ source_stride_ = tmp_stride;
}
TEST_P(SADx4Test, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
- FillConstant(GetReference(0), reference_stride_, 255);
- FillConstant(GetReference(1), reference_stride_, 255);
- FillConstant(GetReference(2), reference_stride_, 255);
- FillConstant(GetReference(3), reference_stride_, 255);
+ FillConstant(GetReference(0), reference_stride_, mask_);
+ FillConstant(GetReference(1), reference_stride_, mask_);
+ FillConstant(GetReference(2), reference_stride_, mask_);
+ FillConstant(GetReference(3), reference_stride_, mask_);
CheckSADs();
}
-TEST_P(SADTest, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(UINT_MAX);
-}
-
TEST_P(SADx4Test, MaxSrc) {
- FillConstant(source_data_, source_stride_, 255);
+ FillConstant(source_data_, source_stride_, mask_);
FillConstant(GetReference(0), reference_stride_, 0);
FillConstant(GetReference(1), reference_stride_, 0);
FillConstant(GetReference(2), reference_stride_, 0);
@@ -219,15 +430,6 @@
CheckSADs();
}
-TEST_P(SADTest, ShortRef) {
- int tmp_stride = reference_stride_;
- reference_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortRef) {
int tmp_stride = reference_stride_;
reference_stride_ >>= 1;
@@ -240,17 +442,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, UnalignedRef) {
- // The reference frame, but not the source frame, may be unaligned for
- // certain types of searches.
- int tmp_stride = reference_stride_;
- reference_stride_ -= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- reference_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, UnalignedRef) {
// The reference frame, but not the source frame, may be unaligned for
// certain types of searches.
@@ -265,15 +456,6 @@
reference_stride_ = tmp_stride;
}
-TEST_P(SADTest, ShortSrc) {
- int tmp_stride = source_stride_;
- source_stride_ >>= 1;
- FillRandom(source_data_, source_stride_);
- FillRandom(reference_data_, reference_stride_);
- CheckSad(UINT_MAX);
- source_stride_ = tmp_stride;
-}
-
TEST_P(SADx4Test, ShortSrc) {
int tmp_stride = source_stride_;
source_stride_ >>= 1;
@@ -286,271 +468,743 @@
source_stride_ = tmp_stride;
}
-TEST_P(SADTest, MaxSAD) {
- // Verify that, when max_sad is set, the implementation does not return a
- // value lower than the reference.
- FillConstant(source_data_, source_stride_, 255);
- FillConstant(reference_data_, reference_stride_, 0);
- CheckSad(128);
+TEST_P(SADx4Test, SrcAlignedByWidth) {
+ uint8_t * tmp_source_data = source_data_;
+ source_data_ += width_;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ source_data_ = tmp_source_data;
}
using std::tr1::make_tuple;
//------------------------------------------------------------------------------
// C functions
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_c = vp8_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c = vp8_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c = vp8_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c = vp8_sad8x8_c;
-const sad_m_by_n_fn_t sad_4x4_c = vp8_sad4x4_c;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_64x64_c_vp9 = vp9_sad64x64_c;
-const sad_m_by_n_fn_t sad_32x32_c_vp9 = vp9_sad32x32_c;
-const sad_m_by_n_fn_t sad_16x16_c_vp9 = vp9_sad16x16_c;
-const sad_m_by_n_fn_t sad_8x16_c_vp9 = vp9_sad8x16_c;
-const sad_m_by_n_fn_t sad_16x8_c_vp9 = vp9_sad16x8_c;
-const sad_m_by_n_fn_t sad_8x8_c_vp9 = vp9_sad8x8_c;
-const sad_m_by_n_fn_t sad_8x4_c_vp9 = vp9_sad8x4_c;
-const sad_m_by_n_fn_t sad_4x8_c_vp9 = vp9_sad4x8_c;
-const sad_m_by_n_fn_t sad_4x4_c_vp9 = vp9_sad4x4_c;
-#endif
-const sad_m_by_n_test_param_t c_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_c),
- make_tuple(8, 16, sad_8x16_c),
- make_tuple(16, 8, sad_16x8_c),
- make_tuple(8, 8, sad_8x8_c),
- make_tuple(4, 4, sad_4x4_c),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(64, 64, sad_64x64_c_vp9),
- make_tuple(32, 32, sad_32x32_c_vp9),
- make_tuple(16, 16, sad_16x16_c_vp9),
- make_tuple(8, 16, sad_8x16_c_vp9),
- make_tuple(16, 8, sad_16x8_c_vp9),
- make_tuple(8, 8, sad_8x8_c_vp9),
- make_tuple(8, 4, sad_8x4_c_vp9),
- make_tuple(4, 8, sad_4x8_c_vp9),
- make_tuple(4, 4, sad_4x4_c_vp9),
-#endif
+const SadMxNFunc sad64x64_c = vpx_sad64x64_c;
+const SadMxNFunc sad64x32_c = vpx_sad64x32_c;
+const SadMxNFunc sad32x64_c = vpx_sad32x64_c;
+const SadMxNFunc sad32x32_c = vpx_sad32x32_c;
+const SadMxNFunc sad32x16_c = vpx_sad32x16_c;
+const SadMxNFunc sad16x32_c = vpx_sad16x32_c;
+const SadMxNFunc sad16x16_c = vpx_sad16x16_c;
+const SadMxNFunc sad16x8_c = vpx_sad16x8_c;
+const SadMxNFunc sad8x16_c = vpx_sad8x16_c;
+const SadMxNFunc sad8x8_c = vpx_sad8x8_c;
+const SadMxNFunc sad8x4_c = vpx_sad8x4_c;
+const SadMxNFunc sad4x8_c = vpx_sad4x8_c;
+const SadMxNFunc sad4x4_c = vpx_sad4x4_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_c = vpx_highbd_sad64x64_c;
+const SadMxNFunc highbd_sad64x32_c = vpx_highbd_sad64x32_c;
+const SadMxNFunc highbd_sad32x64_c = vpx_highbd_sad32x64_c;
+const SadMxNFunc highbd_sad32x32_c = vpx_highbd_sad32x32_c;
+const SadMxNFunc highbd_sad32x16_c = vpx_highbd_sad32x16_c;
+const SadMxNFunc highbd_sad16x32_c = vpx_highbd_sad16x32_c;
+const SadMxNFunc highbd_sad16x16_c = vpx_highbd_sad16x16_c;
+const SadMxNFunc highbd_sad16x8_c = vpx_highbd_sad16x8_c;
+const SadMxNFunc highbd_sad8x16_c = vpx_highbd_sad8x16_c;
+const SadMxNFunc highbd_sad8x8_c = vpx_highbd_sad8x8_c;
+const SadMxNFunc highbd_sad8x4_c = vpx_highbd_sad8x4_c;
+const SadMxNFunc highbd_sad4x8_c = vpx_highbd_sad4x8_c;
+const SadMxNFunc highbd_sad4x4_c = vpx_highbd_sad4x4_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam c_tests[] = {
+ make_tuple(64, 64, sad64x64_c, -1),
+ make_tuple(64, 32, sad64x32_c, -1),
+ make_tuple(32, 64, sad32x64_c, -1),
+ make_tuple(32, 32, sad32x32_c, -1),
+ make_tuple(32, 16, sad32x16_c, -1),
+ make_tuple(16, 32, sad16x32_c, -1),
+ make_tuple(16, 16, sad16x16_c, -1),
+ make_tuple(16, 8, sad16x8_c, -1),
+ make_tuple(8, 16, sad8x16_c, -1),
+ make_tuple(8, 8, sad8x8_c, -1),
+ make_tuple(8, 4, sad8x4_c, -1),
+ make_tuple(4, 8, sad4x8_c, -1),
+ make_tuple(4, 4, sad4x4_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
-#if CONFIG_VP9_ENCODER
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_c = vp9_sad64x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_c = vp9_sad64x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_c = vp9_sad32x64x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_c = vp9_sad32x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_c = vp9_sad32x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_c = vp9_sad16x32x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_c = vp9_sad16x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_c = vp9_sad16x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_c = vp9_sad8x16x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_c = vp9_sad8x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_c = vp9_sad8x4x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_c = vp9_sad4x8x4d_c;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_c = vp9_sad4x4x4d_c;
-INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_c),
- make_tuple(64, 32, sad_64x32x4d_c),
- make_tuple(32, 64, sad_32x64x4d_c),
- make_tuple(32, 32, sad_32x32x4d_c),
- make_tuple(32, 16, sad_32x16x4d_c),
- make_tuple(16, 32, sad_16x32x4d_c),
- make_tuple(16, 16, sad_16x16x4d_c),
- make_tuple(16, 8, sad_16x8x4d_c),
- make_tuple(8, 16, sad_8x16x4d_c),
- make_tuple(8, 8, sad_8x8x4d_c),
- make_tuple(8, 4, sad_8x4x4d_c),
- make_tuple(4, 8, sad_4x8x4d_c),
- make_tuple(4, 4, sad_4x4x4d_c)));
-#endif // CONFIG_VP9_ENCODER
+const SadMxNAvgFunc sad64x64_avg_c = vpx_sad64x64_avg_c;
+const SadMxNAvgFunc sad64x32_avg_c = vpx_sad64x32_avg_c;
+const SadMxNAvgFunc sad32x64_avg_c = vpx_sad32x64_avg_c;
+const SadMxNAvgFunc sad32x32_avg_c = vpx_sad32x32_avg_c;
+const SadMxNAvgFunc sad32x16_avg_c = vpx_sad32x16_avg_c;
+const SadMxNAvgFunc sad16x32_avg_c = vpx_sad16x32_avg_c;
+const SadMxNAvgFunc sad16x16_avg_c = vpx_sad16x16_avg_c;
+const SadMxNAvgFunc sad16x8_avg_c = vpx_sad16x8_avg_c;
+const SadMxNAvgFunc sad8x16_avg_c = vpx_sad8x16_avg_c;
+const SadMxNAvgFunc sad8x8_avg_c = vpx_sad8x8_avg_c;
+const SadMxNAvgFunc sad8x4_avg_c = vpx_sad8x4_avg_c;
+const SadMxNAvgFunc sad4x8_avg_c = vpx_sad4x8_avg_c;
+const SadMxNAvgFunc sad4x4_avg_c = vpx_sad4x4_avg_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_c = vpx_highbd_sad64x64_avg_c;
+const SadMxNAvgFunc highbd_sad64x32_avg_c = vpx_highbd_sad64x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x64_avg_c = vpx_highbd_sad32x64_avg_c;
+const SadMxNAvgFunc highbd_sad32x32_avg_c = vpx_highbd_sad32x32_avg_c;
+const SadMxNAvgFunc highbd_sad32x16_avg_c = vpx_highbd_sad32x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x32_avg_c = vpx_highbd_sad16x32_avg_c;
+const SadMxNAvgFunc highbd_sad16x16_avg_c = vpx_highbd_sad16x16_avg_c;
+const SadMxNAvgFunc highbd_sad16x8_avg_c = vpx_highbd_sad16x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x16_avg_c = vpx_highbd_sad8x16_avg_c;
+const SadMxNAvgFunc highbd_sad8x8_avg_c = vpx_highbd_sad8x8_avg_c;
+const SadMxNAvgFunc highbd_sad8x4_avg_c = vpx_highbd_sad8x4_avg_c;
+const SadMxNAvgFunc highbd_sad4x8_avg_c = vpx_highbd_sad4x8_avg_c;
+const SadMxNAvgFunc highbd_sad4x4_avg_c = vpx_highbd_sad4x4_avg_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_c_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_c, -1),
+ make_tuple(64, 32, sad64x32_avg_c, -1),
+ make_tuple(32, 64, sad32x64_avg_c, -1),
+ make_tuple(32, 32, sad32x32_avg_c, -1),
+ make_tuple(32, 16, sad32x16_avg_c, -1),
+ make_tuple(16, 32, sad16x32_avg_c, -1),
+ make_tuple(16, 16, sad16x16_avg_c, -1),
+ make_tuple(16, 8, sad16x8_avg_c, -1),
+ make_tuple(8, 16, sad8x16_avg_c, -1),
+ make_tuple(8, 8, sad8x8_avg_c, -1),
+ make_tuple(8, 4, sad8x4_avg_c, -1),
+ make_tuple(4, 8, sad4x8_avg_c, -1),
+ make_tuple(4, 4, sad4x4_avg_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 8),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 8),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 10),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 10),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_c, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_c, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_c, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_c, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_c, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_c, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_c, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_c, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_c, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_c, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_c, 12),
+ make_tuple(4, 8, highbd_sad4x8_avg_c, 12),
+ make_tuple(4, 4, highbd_sad4x4_avg_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
+
+const SadMxNx4Func sad64x64x4d_c = vpx_sad64x64x4d_c;
+const SadMxNx4Func sad64x32x4d_c = vpx_sad64x32x4d_c;
+const SadMxNx4Func sad32x64x4d_c = vpx_sad32x64x4d_c;
+const SadMxNx4Func sad32x32x4d_c = vpx_sad32x32x4d_c;
+const SadMxNx4Func sad32x16x4d_c = vpx_sad32x16x4d_c;
+const SadMxNx4Func sad16x32x4d_c = vpx_sad16x32x4d_c;
+const SadMxNx4Func sad16x16x4d_c = vpx_sad16x16x4d_c;
+const SadMxNx4Func sad16x8x4d_c = vpx_sad16x8x4d_c;
+const SadMxNx4Func sad8x16x4d_c = vpx_sad8x16x4d_c;
+const SadMxNx4Func sad8x8x4d_c = vpx_sad8x8x4d_c;
+const SadMxNx4Func sad8x4x4d_c = vpx_sad8x4x4d_c;
+const SadMxNx4Func sad4x8x4d_c = vpx_sad4x8x4d_c;
+const SadMxNx4Func sad4x4x4d_c = vpx_sad4x4x4d_c;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_c = vpx_highbd_sad64x64x4d_c;
+const SadMxNx4Func highbd_sad64x32x4d_c = vpx_highbd_sad64x32x4d_c;
+const SadMxNx4Func highbd_sad32x64x4d_c = vpx_highbd_sad32x64x4d_c;
+const SadMxNx4Func highbd_sad32x32x4d_c = vpx_highbd_sad32x32x4d_c;
+const SadMxNx4Func highbd_sad32x16x4d_c = vpx_highbd_sad32x16x4d_c;
+const SadMxNx4Func highbd_sad16x32x4d_c = vpx_highbd_sad16x32x4d_c;
+const SadMxNx4Func highbd_sad16x16x4d_c = vpx_highbd_sad16x16x4d_c;
+const SadMxNx4Func highbd_sad16x8x4d_c = vpx_highbd_sad16x8x4d_c;
+const SadMxNx4Func highbd_sad8x16x4d_c = vpx_highbd_sad8x16x4d_c;
+const SadMxNx4Func highbd_sad8x8x4d_c = vpx_highbd_sad8x8x4d_c;
+const SadMxNx4Func highbd_sad8x4x4d_c = vpx_highbd_sad8x4x4d_c;
+const SadMxNx4Func highbd_sad4x8x4d_c = vpx_highbd_sad4x8x4d_c;
+const SadMxNx4Func highbd_sad4x4x4d_c = vpx_highbd_sad4x4x4d_c;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_c_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_c, -1),
+ make_tuple(64, 32, sad64x32x4d_c, -1),
+ make_tuple(32, 64, sad32x64x4d_c, -1),
+ make_tuple(32, 32, sad32x32x4d_c, -1),
+ make_tuple(32, 16, sad32x16x4d_c, -1),
+ make_tuple(16, 32, sad16x32x4d_c, -1),
+ make_tuple(16, 16, sad16x16x4d_c, -1),
+ make_tuple(16, 8, sad16x8x4d_c, -1),
+ make_tuple(8, 16, sad8x16x4d_c, -1),
+ make_tuple(8, 8, sad8x8x4d_c, -1),
+ make_tuple(8, 4, sad8x4x4d_c, -1),
+ make_tuple(4, 8, sad4x8x4d_c, -1),
+ make_tuple(4, 4, sad4x4x4d_c, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_c, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_c, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_c, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_c, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_c, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_c, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_c, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_c, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_c, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_c, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_c, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_c, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_c, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
//------------------------------------------------------------------------------
// ARM functions
#if HAVE_MEDIA
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_armv6 = vp8_sad16x16_armv6;
-INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_armv6)));
-#endif
-#endif
+const SadMxNFunc sad16x16_media = vpx_sad16x16_media;
+const SadMxNParam media_tests[] = {
+ make_tuple(16, 16, sad16x16_media, -1),
+};
+INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::ValuesIn(media_tests));
+#endif // HAVE_MEDIA
#if HAVE_NEON
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_neon = vp8_sad16x16_neon;
-const sad_m_by_n_fn_t sad_8x16_neon = vp8_sad8x16_neon;
-const sad_m_by_n_fn_t sad_16x8_neon = vp8_sad16x8_neon;
-const sad_m_by_n_fn_t sad_8x8_neon = vp8_sad8x8_neon;
-const sad_m_by_n_fn_t sad_4x4_neon = vp8_sad4x4_neon;
-INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_neon),
- make_tuple(8, 16, sad_8x16_neon),
- make_tuple(16, 8, sad_16x8_neon),
- make_tuple(8, 8, sad_8x8_neon),
- make_tuple(4, 4, sad_4x4_neon)));
-#endif
-#endif
+const SadMxNFunc sad64x64_neon = vpx_sad64x64_neon;
+const SadMxNFunc sad32x32_neon = vpx_sad32x32_neon;
+const SadMxNFunc sad16x16_neon = vpx_sad16x16_neon;
+const SadMxNFunc sad16x8_neon = vpx_sad16x8_neon;
+const SadMxNFunc sad8x16_neon = vpx_sad8x16_neon;
+const SadMxNFunc sad8x8_neon = vpx_sad8x8_neon;
+const SadMxNFunc sad4x4_neon = vpx_sad4x4_neon;
+
+const SadMxNParam neon_tests[] = {
+ make_tuple(64, 64, sad64x64_neon, -1),
+ make_tuple(32, 32, sad32x32_neon, -1),
+ make_tuple(16, 16, sad16x16_neon, -1),
+ make_tuple(16, 8, sad16x8_neon, -1),
+ make_tuple(8, 16, sad8x16_neon, -1),
+ make_tuple(8, 8, sad8x8_neon, -1),
+ make_tuple(4, 4, sad4x4_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
+
+const SadMxNx4Func sad64x64x4d_neon = vpx_sad64x64x4d_neon;
+const SadMxNx4Func sad32x32x4d_neon = vpx_sad32x32x4d_neon;
+const SadMxNx4Func sad16x16x4d_neon = vpx_sad16x16x4d_neon;
+const SadMxNx4Param x4d_neon_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_neon, -1),
+ make_tuple(32, 32, sad32x32x4d_neon, -1),
+ make_tuple(16, 16, sad16x16x4d_neon, -1),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests));
+#endif // HAVE_NEON
//------------------------------------------------------------------------------
// x86 functions
#if HAVE_MMX
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx = vp8_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx = vp8_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx = vp8_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx = vp8_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx = vp8_sad4x4_mmx;
-#endif
-#if CONFIG_VP9_ENCODER
-const sad_m_by_n_fn_t sad_16x16_mmx_vp9 = vp9_sad16x16_mmx;
-const sad_m_by_n_fn_t sad_8x16_mmx_vp9 = vp9_sad8x16_mmx;
-const sad_m_by_n_fn_t sad_16x8_mmx_vp9 = vp9_sad16x8_mmx;
-const sad_m_by_n_fn_t sad_8x8_mmx_vp9 = vp9_sad8x8_mmx;
-const sad_m_by_n_fn_t sad_4x4_mmx_vp9 = vp9_sad4x4_mmx;
-#endif
-
-const sad_m_by_n_test_param_t mmx_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_mmx),
- make_tuple(8, 16, sad_8x16_mmx),
- make_tuple(16, 8, sad_16x8_mmx),
- make_tuple(8, 8, sad_8x8_mmx),
- make_tuple(4, 4, sad_4x4_mmx),
-#endif
-#if CONFIG_VP9_ENCODER
- make_tuple(16, 16, sad_16x16_mmx_vp9),
- make_tuple(8, 16, sad_8x16_mmx_vp9),
- make_tuple(16, 8, sad_16x8_mmx_vp9),
- make_tuple(8, 8, sad_8x8_mmx_vp9),
- make_tuple(4, 4, sad_4x4_mmx_vp9),
-#endif
+const SadMxNFunc sad16x16_mmx = vpx_sad16x16_mmx;
+const SadMxNFunc sad16x8_mmx = vpx_sad16x8_mmx;
+const SadMxNFunc sad8x16_mmx = vpx_sad8x16_mmx;
+const SadMxNFunc sad8x8_mmx = vpx_sad8x8_mmx;
+const SadMxNFunc sad4x4_mmx = vpx_sad4x4_mmx;
+const SadMxNParam mmx_tests[] = {
+ make_tuple(16, 16, sad16x16_mmx, -1),
+ make_tuple(16, 8, sad16x8_mmx, -1),
+ make_tuple(8, 16, sad8x16_mmx, -1),
+ make_tuple(8, 8, sad8x8_mmx, -1),
+ make_tuple(4, 4, sad4x4_mmx, -1),
};
INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests));
-#endif
+#endif // HAVE_MMX
#if HAVE_SSE
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_4x4_sse_vp9 = vp9_sad4x4_sse;
-const sad_m_by_n_fn_t sad_4x8_sse_vp9 = vp9_sad4x8_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::Values(
- make_tuple(4, 4, sad_4x4_sse_vp9),
- make_tuple(4, 8, sad_4x8_sse_vp9)));
+const SadMxNFunc sad4x8_sse = vpx_sad4x8_sse;
+const SadMxNFunc sad4x4_sse = vpx_sad4x4_sse;
+const SadMxNParam sse_tests[] = {
+ make_tuple(4, 8, sad4x8_sse, -1),
+ make_tuple(4, 4, sad4x4_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::ValuesIn(sse_tests));
-const sad_n_by_n_by_4_fn_t sad_4x8x4d_sse = vp9_sad4x8x4d_sse;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse = vp9_sad4x4x4d_sse;
-INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::Values(
- make_tuple(4, 8, sad_4x8x4d_sse),
- make_tuple(4, 4, sad_4x4x4d_sse)));
+const SadMxNAvgFunc sad4x8_avg_sse = vpx_sad4x8_avg_sse;
+const SadMxNAvgFunc sad4x4_avg_sse = vpx_sad4x4_avg_sse;
+const SadMxNAvgParam avg_sse_tests[] = {
+ make_tuple(4, 8, sad4x8_avg_sse, -1),
+ make_tuple(4, 4, sad4x4_avg_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADavgTest, ::testing::ValuesIn(avg_sse_tests));
+
+const SadMxNx4Func sad4x8x4d_sse = vpx_sad4x8x4d_sse;
+const SadMxNx4Func sad4x4x4d_sse = vpx_sad4x4x4d_sse;
+const SadMxNx4Param x4d_sse_tests[] = {
+ make_tuple(4, 8, sad4x8x4d_sse, -1),
+ make_tuple(4, 4, sad4x4x4d_sse, -1),
+};
+INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::ValuesIn(x4d_sse_tests));
#endif // CONFIG_USE_X86INC
-#endif // CONFIG_VP9_ENCODER
#endif // HAVE_SSE
#if HAVE_SSE2
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_wmt = vp8_sad16x16_wmt;
-const sad_m_by_n_fn_t sad_8x16_wmt = vp8_sad8x16_wmt;
-const sad_m_by_n_fn_t sad_16x8_wmt = vp8_sad16x8_wmt;
-const sad_m_by_n_fn_t sad_8x8_wmt = vp8_sad8x8_wmt;
-const sad_m_by_n_fn_t sad_4x4_wmt = vp8_sad4x4_wmt;
-#endif
-#if CONFIG_VP9_ENCODER
#if CONFIG_USE_X86INC
-const sad_m_by_n_fn_t sad_64x64_sse2_vp9 = vp9_sad64x64_sse2;
-const sad_m_by_n_fn_t sad_64x32_sse2_vp9 = vp9_sad64x32_sse2;
-const sad_m_by_n_fn_t sad_32x64_sse2_vp9 = vp9_sad32x64_sse2;
-const sad_m_by_n_fn_t sad_32x32_sse2_vp9 = vp9_sad32x32_sse2;
-const sad_m_by_n_fn_t sad_32x16_sse2_vp9 = vp9_sad32x16_sse2;
-const sad_m_by_n_fn_t sad_16x32_sse2_vp9 = vp9_sad16x32_sse2;
-const sad_m_by_n_fn_t sad_16x16_sse2_vp9 = vp9_sad16x16_sse2;
-const sad_m_by_n_fn_t sad_16x8_sse2_vp9 = vp9_sad16x8_sse2;
-const sad_m_by_n_fn_t sad_8x16_sse2_vp9 = vp9_sad8x16_sse2;
-const sad_m_by_n_fn_t sad_8x8_sse2_vp9 = vp9_sad8x8_sse2;
-const sad_m_by_n_fn_t sad_8x4_sse2_vp9 = vp9_sad8x4_sse2;
-#endif
-#endif
-const sad_m_by_n_test_param_t sse2_tests[] = {
-#if CONFIG_VP8_ENCODER
- make_tuple(16, 16, sad_16x16_wmt),
- make_tuple(8, 16, sad_8x16_wmt),
- make_tuple(16, 8, sad_16x8_wmt),
- make_tuple(8, 8, sad_8x8_wmt),
- make_tuple(4, 4, sad_4x4_wmt),
-#endif
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
- make_tuple(64, 64, sad_64x64_sse2_vp9),
- make_tuple(64, 32, sad_64x32_sse2_vp9),
- make_tuple(32, 64, sad_32x64_sse2_vp9),
- make_tuple(32, 32, sad_32x32_sse2_vp9),
- make_tuple(32, 16, sad_32x16_sse2_vp9),
- make_tuple(16, 32, sad_16x32_sse2_vp9),
- make_tuple(16, 16, sad_16x16_sse2_vp9),
- make_tuple(16, 8, sad_16x8_sse2_vp9),
- make_tuple(8, 16, sad_8x16_sse2_vp9),
- make_tuple(8, 8, sad_8x8_sse2_vp9),
- make_tuple(8, 4, sad_8x4_sse2_vp9),
-#endif
-#endif
+const SadMxNFunc sad64x64_sse2 = vpx_sad64x64_sse2;
+const SadMxNFunc sad64x32_sse2 = vpx_sad64x32_sse2;
+const SadMxNFunc sad32x64_sse2 = vpx_sad32x64_sse2;
+const SadMxNFunc sad32x32_sse2 = vpx_sad32x32_sse2;
+const SadMxNFunc sad32x16_sse2 = vpx_sad32x16_sse2;
+const SadMxNFunc sad16x32_sse2 = vpx_sad16x32_sse2;
+const SadMxNFunc sad16x16_sse2 = vpx_sad16x16_sse2;
+const SadMxNFunc sad16x8_sse2 = vpx_sad16x8_sse2;
+const SadMxNFunc sad8x16_sse2 = vpx_sad8x16_sse2;
+const SadMxNFunc sad8x8_sse2 = vpx_sad8x8_sse2;
+const SadMxNFunc sad8x4_sse2 = vpx_sad8x4_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNFunc highbd_sad64x64_sse2 = vpx_highbd_sad64x64_sse2;
+const SadMxNFunc highbd_sad64x32_sse2 = vpx_highbd_sad64x32_sse2;
+const SadMxNFunc highbd_sad32x64_sse2 = vpx_highbd_sad32x64_sse2;
+const SadMxNFunc highbd_sad32x32_sse2 = vpx_highbd_sad32x32_sse2;
+const SadMxNFunc highbd_sad32x16_sse2 = vpx_highbd_sad32x16_sse2;
+const SadMxNFunc highbd_sad16x32_sse2 = vpx_highbd_sad16x32_sse2;
+const SadMxNFunc highbd_sad16x16_sse2 = vpx_highbd_sad16x16_sse2;
+const SadMxNFunc highbd_sad16x8_sse2 = vpx_highbd_sad16x8_sse2;
+const SadMxNFunc highbd_sad8x16_sse2 = vpx_highbd_sad8x16_sse2;
+const SadMxNFunc highbd_sad8x8_sse2 = vpx_highbd_sad8x8_sse2;
+const SadMxNFunc highbd_sad8x4_sse2 = vpx_highbd_sad8x4_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNParam sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_sse2, -1),
+ make_tuple(64, 32, sad64x32_sse2, -1),
+ make_tuple(32, 64, sad32x64_sse2, -1),
+ make_tuple(32, 32, sad32x32_sse2, -1),
+ make_tuple(32, 16, sad32x16_sse2, -1),
+ make_tuple(16, 32, sad16x32_sse2, -1),
+ make_tuple(16, 16, sad16x16_sse2, -1),
+ make_tuple(16, 8, sad16x8_sse2, -1),
+ make_tuple(8, 16, sad8x16_sse2, -1),
+ make_tuple(8, 8, sad8x8_sse2, -1),
+ make_tuple(8, 4, sad8x4_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
};
INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
-#if CONFIG_VP9_ENCODER
-#if CONFIG_USE_X86INC
-const sad_n_by_n_by_4_fn_t sad_64x64x4d_sse2 = vp9_sad64x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_64x32x4d_sse2 = vp9_sad64x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x64x4d_sse2 = vp9_sad32x64x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x32x4d_sse2 = vp9_sad32x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_32x16x4d_sse2 = vp9_sad32x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x32x4d_sse2 = vp9_sad16x32x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse2 = vp9_sad16x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse2 = vp9_sad16x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse2 = vp9_sad8x16x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse2 = vp9_sad8x8x4d_sse2;
-const sad_n_by_n_by_4_fn_t sad_8x4x4d_sse2 = vp9_sad8x4x4d_sse2;
-INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::Values(
- make_tuple(64, 64, sad_64x64x4d_sse2),
- make_tuple(64, 32, sad_64x32x4d_sse2),
- make_tuple(32, 64, sad_32x64x4d_sse2),
- make_tuple(32, 32, sad_32x32x4d_sse2),
- make_tuple(32, 16, sad_32x16x4d_sse2),
- make_tuple(16, 32, sad_16x32x4d_sse2),
- make_tuple(16, 16, sad_16x16x4d_sse2),
- make_tuple(16, 8, sad_16x8x4d_sse2),
- make_tuple(8, 16, sad_8x16x4d_sse2),
- make_tuple(8, 8, sad_8x8x4d_sse2),
- make_tuple(8, 4, sad_8x4x4d_sse2)));
-#endif
-#endif
-#endif
+const SadMxNAvgFunc sad64x64_avg_sse2 = vpx_sad64x64_avg_sse2;
+const SadMxNAvgFunc sad64x32_avg_sse2 = vpx_sad64x32_avg_sse2;
+const SadMxNAvgFunc sad32x64_avg_sse2 = vpx_sad32x64_avg_sse2;
+const SadMxNAvgFunc sad32x32_avg_sse2 = vpx_sad32x32_avg_sse2;
+const SadMxNAvgFunc sad32x16_avg_sse2 = vpx_sad32x16_avg_sse2;
+const SadMxNAvgFunc sad16x32_avg_sse2 = vpx_sad16x32_avg_sse2;
+const SadMxNAvgFunc sad16x16_avg_sse2 = vpx_sad16x16_avg_sse2;
+const SadMxNAvgFunc sad16x8_avg_sse2 = vpx_sad16x8_avg_sse2;
+const SadMxNAvgFunc sad8x16_avg_sse2 = vpx_sad8x16_avg_sse2;
+const SadMxNAvgFunc sad8x8_avg_sse2 = vpx_sad8x8_avg_sse2;
+const SadMxNAvgFunc sad8x4_avg_sse2 = vpx_sad8x4_avg_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgFunc highbd_sad64x64_avg_sse2 = vpx_highbd_sad64x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad64x32_avg_sse2 = vpx_highbd_sad64x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x64_avg_sse2 = vpx_highbd_sad32x64_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x32_avg_sse2 = vpx_highbd_sad32x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad32x16_avg_sse2 = vpx_highbd_sad32x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x32_avg_sse2 = vpx_highbd_sad16x32_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x16_avg_sse2 = vpx_highbd_sad16x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad16x8_avg_sse2 = vpx_highbd_sad16x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x16_avg_sse2 = vpx_highbd_sad8x16_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x8_avg_sse2 = vpx_highbd_sad8x8_avg_sse2;
+const SadMxNAvgFunc highbd_sad8x4_avg_sse2 = vpx_highbd_sad8x4_avg_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNAvgParam avg_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_sse2, -1),
+ make_tuple(64, 32, sad64x32_avg_sse2, -1),
+ make_tuple(32, 64, sad32x64_avg_sse2, -1),
+ make_tuple(32, 32, sad32x32_avg_sse2, -1),
+ make_tuple(32, 16, sad32x16_avg_sse2, -1),
+ make_tuple(16, 32, sad16x32_avg_sse2, -1),
+ make_tuple(16, 16, sad16x16_avg_sse2, -1),
+ make_tuple(16, 8, sad16x8_avg_sse2, -1),
+ make_tuple(8, 16, sad8x16_avg_sse2, -1),
+ make_tuple(8, 8, sad8x8_avg_sse2, -1),
+ make_tuple(8, 4, sad8x4_avg_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64_avg_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32_avg_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64_avg_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32_avg_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16_avg_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32_avg_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16_avg_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8_avg_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16_avg_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8_avg_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4_avg_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
+
+const SadMxNx4Func sad64x64x4d_sse2 = vpx_sad64x64x4d_sse2;
+const SadMxNx4Func sad64x32x4d_sse2 = vpx_sad64x32x4d_sse2;
+const SadMxNx4Func sad32x64x4d_sse2 = vpx_sad32x64x4d_sse2;
+const SadMxNx4Func sad32x32x4d_sse2 = vpx_sad32x32x4d_sse2;
+const SadMxNx4Func sad32x16x4d_sse2 = vpx_sad32x16x4d_sse2;
+const SadMxNx4Func sad16x32x4d_sse2 = vpx_sad16x32x4d_sse2;
+const SadMxNx4Func sad16x16x4d_sse2 = vpx_sad16x16x4d_sse2;
+const SadMxNx4Func sad16x8x4d_sse2 = vpx_sad16x8x4d_sse2;
+const SadMxNx4Func sad8x16x4d_sse2 = vpx_sad8x16x4d_sse2;
+const SadMxNx4Func sad8x8x4d_sse2 = vpx_sad8x8x4d_sse2;
+const SadMxNx4Func sad8x4x4d_sse2 = vpx_sad8x4x4d_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Func highbd_sad64x64x4d_sse2 = vpx_highbd_sad64x64x4d_sse2;
+const SadMxNx4Func highbd_sad64x32x4d_sse2 = vpx_highbd_sad64x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x64x4d_sse2 = vpx_highbd_sad32x64x4d_sse2;
+const SadMxNx4Func highbd_sad32x32x4d_sse2 = vpx_highbd_sad32x32x4d_sse2;
+const SadMxNx4Func highbd_sad32x16x4d_sse2 = vpx_highbd_sad32x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x32x4d_sse2 = vpx_highbd_sad16x32x4d_sse2;
+const SadMxNx4Func highbd_sad16x16x4d_sse2 = vpx_highbd_sad16x16x4d_sse2;
+const SadMxNx4Func highbd_sad16x8x4d_sse2 = vpx_highbd_sad16x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x16x4d_sse2 = vpx_highbd_sad8x16x4d_sse2;
+const SadMxNx4Func highbd_sad8x8x4d_sse2 = vpx_highbd_sad8x8x4d_sse2;
+const SadMxNx4Func highbd_sad8x4x4d_sse2 = vpx_highbd_sad8x4x4d_sse2;
+const SadMxNx4Func highbd_sad4x8x4d_sse2 = vpx_highbd_sad4x8x4d_sse2;
+const SadMxNx4Func highbd_sad4x4x4d_sse2 = vpx_highbd_sad4x4x4d_sse2;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+const SadMxNx4Param x4d_sse2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_sse2, -1),
+ make_tuple(64, 32, sad64x32x4d_sse2, -1),
+ make_tuple(32, 64, sad32x64x4d_sse2, -1),
+ make_tuple(32, 32, sad32x32x4d_sse2, -1),
+ make_tuple(32, 16, sad32x16x4d_sse2, -1),
+ make_tuple(16, 32, sad16x32x4d_sse2, -1),
+ make_tuple(16, 16, sad16x16x4d_sse2, -1),
+ make_tuple(16, 8, sad16x8x4d_sse2, -1),
+ make_tuple(8, 16, sad8x16x4d_sse2, -1),
+ make_tuple(8, 8, sad8x8x4d_sse2, -1),
+ make_tuple(8, 4, sad8x4x4d_sse2, -1),
+#if CONFIG_VP9_HIGHBITDEPTH
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 8),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 8),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 8),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 8),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 8),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 8),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 8),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 8),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 8),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 8),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 8),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 8),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 8),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 10),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 10),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 10),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 10),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 10),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 10),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 10),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 10),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 10),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 10),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 10),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 10),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 10),
+ make_tuple(64, 64, highbd_sad64x64x4d_sse2, 12),
+ make_tuple(64, 32, highbd_sad64x32x4d_sse2, 12),
+ make_tuple(32, 64, highbd_sad32x64x4d_sse2, 12),
+ make_tuple(32, 32, highbd_sad32x32x4d_sse2, 12),
+ make_tuple(32, 16, highbd_sad32x16x4d_sse2, 12),
+ make_tuple(16, 32, highbd_sad16x32x4d_sse2, 12),
+ make_tuple(16, 16, highbd_sad16x16x4d_sse2, 12),
+ make_tuple(16, 8, highbd_sad16x8x4d_sse2, 12),
+ make_tuple(8, 16, highbd_sad8x16x4d_sse2, 12),
+ make_tuple(8, 8, highbd_sad8x8x4d_sse2, 12),
+ make_tuple(8, 4, highbd_sad8x4x4d_sse2, 12),
+ make_tuple(4, 8, highbd_sad4x8x4d_sse2, 12),
+ make_tuple(4, 4, highbd_sad4x4x4d_sse2, 12),
+#endif // CONFIG_VP9_HIGHBITDEPTH
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSE2
#if HAVE_SSE3
-#if CONFIG_VP8_ENCODER
-const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse3 = vp8_sad16x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse3 = vp8_sad16x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse3 = vp8_sad8x16x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse3 = vp8_sad8x8x4d_sse3;
-const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse3 = vp8_sad4x4x4d_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADx4Test, ::testing::Values(
- make_tuple(16, 16, sad_16x16x4d_sse3),
- make_tuple(16, 8, sad_16x8x4d_sse3),
- make_tuple(8, 16, sad_8x16x4d_sse3),
- make_tuple(8, 8, sad_8x8x4d_sse3),
- make_tuple(4, 4, sad_4x4x4d_sse3)));
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSE3
#if HAVE_SSSE3
-#if CONFIG_USE_X86INC
-#if CONFIG_VP8_ENCODER
-const sad_m_by_n_fn_t sad_16x16_sse3 = vp8_sad16x16_sse3;
-INSTANTIATE_TEST_CASE_P(SSE3, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_sse3)));
-#endif
-#endif
-#endif
+// Only functions are x3, which do not have tests.
+#endif // HAVE_SSSE3
+
+#if HAVE_SSE4_1
+// Only functions are x8, which do not have tests.
+#endif // HAVE_SSE4_1
+
+#if HAVE_AVX2
+const SadMxNFunc sad64x64_avx2 = vpx_sad64x64_avx2;
+const SadMxNFunc sad64x32_avx2 = vpx_sad64x32_avx2;
+const SadMxNFunc sad32x64_avx2 = vpx_sad32x64_avx2;
+const SadMxNFunc sad32x32_avx2 = vpx_sad32x32_avx2;
+const SadMxNFunc sad32x16_avx2 = vpx_sad32x16_avx2;
+const SadMxNParam avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avx2, -1),
+ make_tuple(64, 32, sad64x32_avx2, -1),
+ make_tuple(32, 64, sad32x64_avx2, -1),
+ make_tuple(32, 32, sad32x32_avx2, -1),
+ make_tuple(32, 16, sad32x16_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADTest, ::testing::ValuesIn(avx2_tests));
+
+const SadMxNAvgFunc sad64x64_avg_avx2 = vpx_sad64x64_avg_avx2;
+const SadMxNAvgFunc sad64x32_avg_avx2 = vpx_sad64x32_avg_avx2;
+const SadMxNAvgFunc sad32x64_avg_avx2 = vpx_sad32x64_avg_avx2;
+const SadMxNAvgFunc sad32x32_avg_avx2 = vpx_sad32x32_avg_avx2;
+const SadMxNAvgFunc sad32x16_avg_avx2 = vpx_sad32x16_avg_avx2;
+const SadMxNAvgParam avg_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_avx2, -1),
+ make_tuple(64, 32, sad64x32_avg_avx2, -1),
+ make_tuple(32, 64, sad32x64_avg_avx2, -1),
+ make_tuple(32, 32, sad32x32_avg_avx2, -1),
+ make_tuple(32, 16, sad32x16_avg_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests));
+
+const SadMxNx4Func sad64x64x4d_avx2 = vpx_sad64x64x4d_avx2;
+const SadMxNx4Func sad32x32x4d_avx2 = vpx_sad32x32x4d_avx2;
+const SadMxNx4Param x4d_avx2_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_avx2, -1),
+ make_tuple(32, 32, sad32x32x4d_avx2, -1),
+};
+INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests));
+#endif // HAVE_AVX2
+
+//------------------------------------------------------------------------------
+// MIPS functions
+#if HAVE_MSA
+const SadMxNFunc sad64x64_msa = vpx_sad64x64_msa;
+const SadMxNFunc sad64x32_msa = vpx_sad64x32_msa;
+const SadMxNFunc sad32x64_msa = vpx_sad32x64_msa;
+const SadMxNFunc sad32x32_msa = vpx_sad32x32_msa;
+const SadMxNFunc sad32x16_msa = vpx_sad32x16_msa;
+const SadMxNFunc sad16x32_msa = vpx_sad16x32_msa;
+const SadMxNFunc sad16x16_msa = vpx_sad16x16_msa;
+const SadMxNFunc sad16x8_msa = vpx_sad16x8_msa;
+const SadMxNFunc sad8x16_msa = vpx_sad8x16_msa;
+const SadMxNFunc sad8x8_msa = vpx_sad8x8_msa;
+const SadMxNFunc sad8x4_msa = vpx_sad8x4_msa;
+const SadMxNFunc sad4x8_msa = vpx_sad4x8_msa;
+const SadMxNFunc sad4x4_msa = vpx_sad4x4_msa;
+const SadMxNParam msa_tests[] = {
+ make_tuple(64, 64, sad64x64_msa, -1),
+ make_tuple(64, 32, sad64x32_msa, -1),
+ make_tuple(32, 64, sad32x64_msa, -1),
+ make_tuple(32, 32, sad32x32_msa, -1),
+ make_tuple(32, 16, sad32x16_msa, -1),
+ make_tuple(16, 32, sad16x32_msa, -1),
+ make_tuple(16, 16, sad16x16_msa, -1),
+ make_tuple(16, 8, sad16x8_msa, -1),
+ make_tuple(8, 16, sad8x16_msa, -1),
+ make_tuple(8, 8, sad8x8_msa, -1),
+ make_tuple(8, 4, sad8x4_msa, -1),
+ make_tuple(4, 8, sad4x8_msa, -1),
+ make_tuple(4, 4, sad4x4_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests));
+
+const SadMxNAvgFunc sad64x64_avg_msa = vpx_sad64x64_avg_msa;
+const SadMxNAvgFunc sad64x32_avg_msa = vpx_sad64x32_avg_msa;
+const SadMxNAvgFunc sad32x64_avg_msa = vpx_sad32x64_avg_msa;
+const SadMxNAvgFunc sad32x32_avg_msa = vpx_sad32x32_avg_msa;
+const SadMxNAvgFunc sad32x16_avg_msa = vpx_sad32x16_avg_msa;
+const SadMxNAvgFunc sad16x32_avg_msa = vpx_sad16x32_avg_msa;
+const SadMxNAvgFunc sad16x16_avg_msa = vpx_sad16x16_avg_msa;
+const SadMxNAvgFunc sad16x8_avg_msa = vpx_sad16x8_avg_msa;
+const SadMxNAvgFunc sad8x16_avg_msa = vpx_sad8x16_avg_msa;
+const SadMxNAvgFunc sad8x8_avg_msa = vpx_sad8x8_avg_msa;
+const SadMxNAvgFunc sad8x4_avg_msa = vpx_sad8x4_avg_msa;
+const SadMxNAvgFunc sad4x8_avg_msa = vpx_sad4x8_avg_msa;
+const SadMxNAvgFunc sad4x4_avg_msa = vpx_sad4x4_avg_msa;
+const SadMxNAvgParam avg_msa_tests[] = {
+ make_tuple(64, 64, sad64x64_avg_msa, -1),
+ make_tuple(64, 32, sad64x32_avg_msa, -1),
+ make_tuple(32, 64, sad32x64_avg_msa, -1),
+ make_tuple(32, 32, sad32x32_avg_msa, -1),
+ make_tuple(32, 16, sad32x16_avg_msa, -1),
+ make_tuple(16, 32, sad16x32_avg_msa, -1),
+ make_tuple(16, 16, sad16x16_avg_msa, -1),
+ make_tuple(16, 8, sad16x8_avg_msa, -1),
+ make_tuple(8, 16, sad8x16_avg_msa, -1),
+ make_tuple(8, 8, sad8x8_avg_msa, -1),
+ make_tuple(8, 4, sad8x4_avg_msa, -1),
+ make_tuple(4, 8, sad4x8_avg_msa, -1),
+ make_tuple(4, 4, sad4x4_avg_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests));
+
+const SadMxNx4Func sad64x64x4d_msa = vpx_sad64x64x4d_msa;
+const SadMxNx4Func sad64x32x4d_msa = vpx_sad64x32x4d_msa;
+const SadMxNx4Func sad32x64x4d_msa = vpx_sad32x64x4d_msa;
+const SadMxNx4Func sad32x32x4d_msa = vpx_sad32x32x4d_msa;
+const SadMxNx4Func sad32x16x4d_msa = vpx_sad32x16x4d_msa;
+const SadMxNx4Func sad16x32x4d_msa = vpx_sad16x32x4d_msa;
+const SadMxNx4Func sad16x16x4d_msa = vpx_sad16x16x4d_msa;
+const SadMxNx4Func sad16x8x4d_msa = vpx_sad16x8x4d_msa;
+const SadMxNx4Func sad8x16x4d_msa = vpx_sad8x16x4d_msa;
+const SadMxNx4Func sad8x8x4d_msa = vpx_sad8x8x4d_msa;
+const SadMxNx4Func sad8x4x4d_msa = vpx_sad8x4x4d_msa;
+const SadMxNx4Func sad4x8x4d_msa = vpx_sad4x8x4d_msa;
+const SadMxNx4Func sad4x4x4d_msa = vpx_sad4x4x4d_msa;
+const SadMxNx4Param x4d_msa_tests[] = {
+ make_tuple(64, 64, sad64x64x4d_msa, -1),
+ make_tuple(64, 32, sad64x32x4d_msa, -1),
+ make_tuple(32, 64, sad32x64x4d_msa, -1),
+ make_tuple(32, 32, sad32x32x4d_msa, -1),
+ make_tuple(32, 16, sad32x16x4d_msa, -1),
+ make_tuple(16, 32, sad16x32x4d_msa, -1),
+ make_tuple(16, 16, sad16x16x4d_msa, -1),
+ make_tuple(16, 8, sad16x8x4d_msa, -1),
+ make_tuple(8, 16, sad8x16x4d_msa, -1),
+ make_tuple(8, 8, sad8x8x4d_msa, -1),
+ make_tuple(8, 4, sad8x4x4d_msa, -1),
+ make_tuple(4, 8, sad4x8x4d_msa, -1),
+ make_tuple(4, 4, sad4x4x4d_msa, -1),
+};
+INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
source_data_ = reinterpret_cast<uint8_t*>(
reference_data_ = reinterpret_cast<uint8_t*>(
|
source_data8_ = reinterpret_cast<uint8_t*>(
reference_data8_ = reinterpret_cast<uint8_t*>(
second_pred8_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, 64*64));
source_data16_ = reinterpret_cast<uint16_t*>(
vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
reference_data16_ = reinterpret_cast<uint16_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
second_pred16_ = reinterpret_cast<uint16_t*>(
vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t)));
|
150,879 |
virtual void SetUp() {
svc_.encoding_mode = INTER_LAYER_PREDICTION_IP;
svc_.log_level = SVC_LOG_DEBUG;
svc_.log_print = 0;
codec_iface_ = vpx_codec_vp9_cx();
const vpx_codec_err_t res =
vpx_codec_enc_config_default(codec_iface_, &codec_enc_, 0);
EXPECT_EQ(VPX_CODEC_OK, res);
codec_enc_.g_w = kWidth;
codec_enc_.g_h = kHeight;
codec_enc_.g_timebase.num = 1;
codec_enc_.g_timebase.den = 60;
codec_enc_.kf_min_dist = 100;
codec_enc_.kf_max_dist = 100;
vpx_codec_dec_cfg_t dec_cfg = {0};
VP9CodecFactory codec_factory;
decoder_ = codec_factory.CreateDecoder(dec_cfg, 0);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
svc_.log_level = SVC_LOG_DEBUG;
svc_.log_print = 0;
codec_iface_ = vpx_codec_vp9_cx();
const vpx_codec_err_t res =
vpx_codec_enc_config_default(codec_iface_, &codec_enc_, 0);
EXPECT_EQ(VPX_CODEC_OK, res);
codec_enc_.g_w = kWidth;
codec_enc_.g_h = kHeight;
codec_enc_.g_timebase.num = 1;
codec_enc_.g_timebase.den = 60;
codec_enc_.kf_min_dist = 100;
codec_enc_.kf_max_dist = 100;
vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
VP9CodecFactory codec_factory;
decoder_ = codec_factory.CreateDecoder(dec_cfg, 0);
tile_columns_ = 0;
tile_rows_ = 0;
}
|
@@ -13,6 +13,9 @@
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/i420_video_source.h"
+
+#include "vp9/decoder/vp9_decoder.h"
+
#include "vpx/svc_context.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
@@ -21,6 +24,7 @@
using libvpx_test::CodecFactory;
using libvpx_test::Decoder;
+using libvpx_test::DxDataIterator;
using libvpx_test::VP9CodecFactory;
class SvcTest : public ::testing::Test {
@@ -31,7 +35,6 @@
SvcTest()
: codec_iface_(0),
test_file_name_("hantro_collage_w352h288.yuv"),
- stats_file_name_("hantro_collage_w352h288.stat"),
codec_initialized_(false),
decoder_(0) {
memset(&svc_, 0, sizeof(svc_));
@@ -42,7 +45,6 @@
virtual ~SvcTest() {}
virtual void SetUp() {
- svc_.encoding_mode = INTER_LAYER_PREDICTION_IP;
svc_.log_level = SVC_LOG_DEBUG;
svc_.log_print = 0;
@@ -58,15 +60,254 @@
codec_enc_.kf_min_dist = 100;
codec_enc_.kf_max_dist = 100;
- vpx_codec_dec_cfg_t dec_cfg = {0};
+ vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
VP9CodecFactory codec_factory;
decoder_ = codec_factory.CreateDecoder(dec_cfg, 0);
+
+ tile_columns_ = 0;
+ tile_rows_ = 0;
}
virtual void TearDown() {
- vpx_svc_release(&svc_);
+ ReleaseEncoder();
delete(decoder_);
+ }
+
+ void InitializeEncoder() {
+ const vpx_codec_err_t res =
+ vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ vpx_codec_control(&codec_, VP8E_SET_CPUUSED, 4); // Make the test faster
+ vpx_codec_control(&codec_, VP9E_SET_TILE_COLUMNS, tile_columns_);
+ vpx_codec_control(&codec_, VP9E_SET_TILE_ROWS, tile_rows_);
+ codec_initialized_ = true;
+ }
+
+ void ReleaseEncoder() {
+ vpx_svc_release(&svc_);
if (codec_initialized_) vpx_codec_destroy(&codec_);
+ codec_initialized_ = false;
+ }
+
+ void GetStatsData(std::string *const stats_buf) {
+ vpx_codec_iter_t iter = NULL;
+ const vpx_codec_cx_pkt_t *cx_pkt;
+
+ while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
+ if (cx_pkt->kind == VPX_CODEC_STATS_PKT) {
+ EXPECT_GT(cx_pkt->data.twopass_stats.sz, 0U);
+ ASSERT_TRUE(cx_pkt->data.twopass_stats.buf != NULL);
+ stats_buf->append(static_cast<char*>(cx_pkt->data.twopass_stats.buf),
+ cx_pkt->data.twopass_stats.sz);
+ }
+ }
+ }
+
+ void Pass1EncodeNFrames(const int n, const int layers,
+ std::string *const stats_buf) {
+ vpx_codec_err_t res;
+
+ ASSERT_GT(n, 0);
+ ASSERT_GT(layers, 0);
+ svc_.spatial_layers = layers;
+ codec_enc_.g_pass = VPX_RC_FIRST_PASS;
+ InitializeEncoder();
+
+ libvpx_test::I420VideoSource video(test_file_name_,
+ codec_enc_.g_w, codec_enc_.g_h,
+ codec_enc_.g_timebase.den,
+ codec_enc_.g_timebase.num, 0, 30);
+ video.Begin();
+
+ for (int i = 0; i < n; ++i) {
+ res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ GetStatsData(stats_buf);
+ video.Next();
+ }
+
+ // Flush encoder and test EOS packet.
+ res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ GetStatsData(stats_buf);
+
+ ReleaseEncoder();
+ }
+
+ void StoreFrames(const size_t max_frame_received,
+ struct vpx_fixed_buf *const outputs,
+ size_t *const frame_received) {
+ vpx_codec_iter_t iter = NULL;
+ const vpx_codec_cx_pkt_t *cx_pkt;
+
+ while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
+ if (cx_pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ const size_t frame_size = cx_pkt->data.frame.sz;
+
+ EXPECT_GT(frame_size, 0U);
+ ASSERT_TRUE(cx_pkt->data.frame.buf != NULL);
+ ASSERT_LT(*frame_received, max_frame_received);
+
+ if (*frame_received == 0)
+ EXPECT_EQ(1, !!(cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY));
+
+ outputs[*frame_received].buf = malloc(frame_size + 16);
+ ASSERT_TRUE(outputs[*frame_received].buf != NULL);
+ memcpy(outputs[*frame_received].buf, cx_pkt->data.frame.buf,
+ frame_size);
+ outputs[*frame_received].sz = frame_size;
+ ++(*frame_received);
+ }
+ }
+ }
+
+ void Pass2EncodeNFrames(std::string *const stats_buf,
+ const int n, const int layers,
+ struct vpx_fixed_buf *const outputs) {
+ vpx_codec_err_t res;
+ size_t frame_received = 0;
+
+ ASSERT_TRUE(outputs != NULL);
+ ASSERT_GT(n, 0);
+ ASSERT_GT(layers, 0);
+ svc_.spatial_layers = layers;
+ codec_enc_.rc_target_bitrate = 500;
+ if (codec_enc_.g_pass == VPX_RC_LAST_PASS) {
+ ASSERT_TRUE(stats_buf != NULL);
+ ASSERT_GT(stats_buf->size(), 0U);
+ codec_enc_.rc_twopass_stats_in.buf = &(*stats_buf)[0];
+ codec_enc_.rc_twopass_stats_in.sz = stats_buf->size();
+ }
+ InitializeEncoder();
+
+ libvpx_test::I420VideoSource video(test_file_name_,
+ codec_enc_.g_w, codec_enc_.g_h,
+ codec_enc_.g_timebase.den,
+ codec_enc_.g_timebase.num, 0, 30);
+ video.Begin();
+
+ for (int i = 0; i < n; ++i) {
+ res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ StoreFrames(n, outputs, &frame_received);
+ video.Next();
+ }
+
+ // Flush encoder.
+ res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ StoreFrames(n, outputs, &frame_received);
+
+ EXPECT_EQ(frame_received, static_cast<size_t>(n));
+
+ ReleaseEncoder();
+ }
+
+ void DecodeNFrames(const struct vpx_fixed_buf *const inputs, const int n) {
+ int decoded_frames = 0;
+ int received_frames = 0;
+
+ ASSERT_TRUE(inputs != NULL);
+ ASSERT_GT(n, 0);
+
+ for (int i = 0; i < n; ++i) {
+ ASSERT_TRUE(inputs[i].buf != NULL);
+ ASSERT_GT(inputs[i].sz, 0U);
+ const vpx_codec_err_t res_dec =
+ decoder_->DecodeFrame(static_cast<const uint8_t *>(inputs[i].buf),
+ inputs[i].sz);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+
+ DxDataIterator dec_iter = decoder_->GetDxData();
+ while (dec_iter.Next() != NULL) {
+ ++received_frames;
+ }
+ }
+ EXPECT_EQ(decoded_frames, n);
+ EXPECT_EQ(received_frames, n);
+ }
+
+ void DropEnhancementLayers(struct vpx_fixed_buf *const inputs,
+ const int num_super_frames,
+ const int remained_spatial_layers) {
+ ASSERT_TRUE(inputs != NULL);
+ ASSERT_GT(num_super_frames, 0);
+ ASSERT_GT(remained_spatial_layers, 0);
+
+ for (int i = 0; i < num_super_frames; ++i) {
+ uint32_t frame_sizes[8] = {0};
+ int frame_count = 0;
+ int frames_found = 0;
+ int frame;
+ ASSERT_TRUE(inputs[i].buf != NULL);
+ ASSERT_GT(inputs[i].sz, 0U);
+
+ vpx_codec_err_t res =
+ vp9_parse_superframe_index(static_cast<const uint8_t*>(inputs[i].buf),
+ inputs[i].sz, frame_sizes, &frame_count,
+ NULL, NULL);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+
+ if (frame_count == 0) {
+ // There's no super frame but only a single frame.
+ ASSERT_EQ(1, remained_spatial_layers);
+ } else {
+ // Found a super frame.
+ uint8_t *frame_data = static_cast<uint8_t*>(inputs[i].buf);
+ uint8_t *frame_start = frame_data;
+ for (frame = 0; frame < frame_count; ++frame) {
+ // Looking for a visible frame.
+ if (frame_data[0] & 0x02) {
+ ++frames_found;
+ if (frames_found == remained_spatial_layers)
+ break;
+ }
+ frame_data += frame_sizes[frame];
+ }
+ ASSERT_LT(frame, frame_count) << "Couldn't find a visible frame. "
+ << "remained_spatial_layers: " << remained_spatial_layers
+ << " super_frame: " << i;
+ if (frame == frame_count - 1)
+ continue;
+
+ frame_data += frame_sizes[frame];
+
+ // We need to add one more frame for multiple frame contexts.
+ uint8_t marker =
+ static_cast<const uint8_t*>(inputs[i].buf)[inputs[i].sz - 1];
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const size_t index_sz = 2 + mag * frame_count;
+ const size_t new_index_sz = 2 + mag * (frame + 1);
+ marker &= 0x0f8;
+ marker |= frame;
+
+ // Copy existing frame sizes.
+ memmove(frame_data + 1, frame_start + inputs[i].sz - index_sz + 1,
+ new_index_sz - 2);
+ // New marker.
+ frame_data[0] = marker;
+ frame_data += (mag * (frame + 1) + 1);
+
+ *frame_data++ = marker;
+ inputs[i].sz = frame_data - frame_start;
+ }
+ }
+ }
+
+ void FreeBitstreamBuffers(struct vpx_fixed_buf *const inputs, const int n) {
+ ASSERT_TRUE(inputs != NULL);
+ ASSERT_GT(n, 0);
+
+ for (int i = 0; i < n; ++i) {
+ free(inputs[i].buf);
+ inputs[i].buf = NULL;
+ inputs[i].sz = 0;
+ }
}
SvcContext svc_;
@@ -74,9 +315,10 @@
struct vpx_codec_enc_cfg codec_enc_;
vpx_codec_iface_t *codec_iface_;
std::string test_file_name_;
- std::string stats_file_name_;
bool codec_initialized_;
Decoder *decoder_;
+ int tile_columns_;
+ int tile_rows_;
};
TEST_F(SvcTest, SvcInit) {
@@ -96,22 +338,13 @@
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
svc_.spatial_layers = 0; // use default layers
- res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
EXPECT_EQ(VPX_SS_DEFAULT_LAYERS, svc_.spatial_layers);
}
TEST_F(SvcTest, InitTwoLayers) {
svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,16*16"); // invalid scale values
- vpx_codec_err_t res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16"); // valid scale values
- res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
}
TEST_F(SvcTest, InvalidOptions) {
@@ -125,30 +358,18 @@
}
TEST_F(SvcTest, SetLayersOption) {
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "layers=3");
+ vpx_codec_err_t res = vpx_svc_set_options(&svc_, "spatial-layers=3");
EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
EXPECT_EQ(3, svc_.spatial_layers);
}
-TEST_F(SvcTest, SetEncodingMode) {
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "encoding-mode=alt-ip");
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
- EXPECT_EQ(ALT_INTER_LAYER_PREDICTION_IP, svc_.encoding_mode);
-}
-
TEST_F(SvcTest, SetMultipleOptions) {
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "layers=2 encoding-mode=ip");
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ vpx_codec_err_t res =
+ vpx_svc_set_options(&svc_, "spatial-layers=2 scale-factors=1/3,2/3");
EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
EXPECT_EQ(2, svc_.spatial_layers);
- EXPECT_EQ(INTER_LAYER_PREDICTION_IP, svc_.encoding_mode);
}
TEST_F(SvcTest, SetScaleFactorsOption) {
@@ -159,314 +380,418 @@
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- res = vpx_svc_set_options(&svc_, "scale-factors=1/3,2/3");
+ res = vpx_svc_set_options(&svc_, "scale-factors=1/3, 3*3");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "scale-factors=1/3");
EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "scale-factors=1/3,2/3");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ InitializeEncoder();
}
TEST_F(SvcTest, SetQuantizersOption) {
svc_.spatial_layers = 2;
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "quantizers=not-quantizers");
+ vpx_codec_err_t res = vpx_svc_set_options(&svc_, "max-quantizers=nothing");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- vpx_svc_set_options(&svc_, "quantizers=40,45");
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ res = vpx_svc_set_options(&svc_, "min-quantizers=nothing");
EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "max-quantizers=40");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "min-quantizers=40");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "max-quantizers=30,30 min-quantizers=40,40");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "max-quantizers=40,40 min-quantizers=30,30");
+ InitializeEncoder();
}
-TEST_F(SvcTest, SetKeyFrameQuantizersOption) {
- svc_.spatial_layers = 2;
- vpx_codec_err_t res = vpx_svc_set_options(&svc_,
- "quantizers-keyframe=not-quantizers");
+TEST_F(SvcTest, SetAutoAltRefOption) {
+ svc_.spatial_layers = 5;
+ vpx_codec_err_t res = vpx_svc_set_options(&svc_, "auto-alt-refs=none");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- vpx_svc_set_options(&svc_, "quantizers-keyframe=40,45");
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-}
-
-TEST_F(SvcTest, SetQuantizers) {
- vpx_codec_err_t res = vpx_svc_set_quantizers(NULL, "40,30", 0);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_quantizers(&svc_, NULL, 0);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- svc_.spatial_layers = 2;
- res = vpx_svc_set_quantizers(&svc_, "40", 0);
+ res = vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1,1,0");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- res = vpx_svc_set_quantizers(&svc_, "40,30", 0);
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-}
-
-TEST_F(SvcTest, SetKeyFrameQuantizers) {
- vpx_codec_err_t res = vpx_svc_set_quantizers(NULL, "40,31", 1);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_quantizers(&svc_, NULL, 1);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_quantizers(&svc_, "40,30", 1);
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-}
-
-TEST_F(SvcTest, SetScaleFactors) {
- vpx_codec_err_t res = vpx_svc_set_scale_factors(NULL, "4/16,16/16");
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_scale_factors(&svc_, NULL);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- svc_.spatial_layers = 2;
- res = vpx_svc_set_scale_factors(&svc_, "4/16");
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=0,1,1,1,0");
+ InitializeEncoder();
}
// Test that decoder can handle an SVC frame as the first frame in a sequence.
-TEST_F(SvcTest, FirstFrameHasLayers) {
- svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
-
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- video.Begin();
-
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- EXPECT_EQ(VPX_CODEC_OK, res);
-
- const vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
-
- // this test fails with a decoder error
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, OnePassEncodeOneFrame) {
+ codec_enc_.g_pass = VPX_RC_ONE_PASS;
+ vpx_fixed_buf output = {0};
+ Pass2EncodeNFrames(NULL, 1, 2, &output);
+ DecodeNFrames(&output, 1);
+ FreeBitstreamBuffers(&output, 1);
}
-TEST_F(SvcTest, EncodeThreeFrames) {
- svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
-
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- ASSERT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- // FRAME 0
- video.Begin();
- // This frame is a keyframe.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
-
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
-
- // FRAME 1
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
-
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
-
- // FRAME 2
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
-
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, OnePassEncodeThreeFrames) {
+ codec_enc_.g_pass = VPX_RC_ONE_PASS;
+ codec_enc_.g_lag_in_frames = 0;
+ vpx_fixed_buf outputs[3];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(NULL, 3, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 3);
+ FreeBitstreamBuffers(&outputs[0], 3);
}
-TEST_F(SvcTest, GetLayerResolution) {
- svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,8/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
+TEST_F(SvcTest, TwoPassEncode10Frames) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- // ensure that requested layer is a valid layer
- uint32_t layer_width, layer_height;
- res = vpx_svc_get_layer_resolution(&svc_, svc_.spatial_layers,
- &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(NULL, 0, &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(&svc_, 0, NULL, &layer_height);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(&svc_, 0, &layer_width, NULL);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(&svc_, 0, &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(kWidth * 4 / 16, layer_width);
- EXPECT_EQ(kHeight * 4 / 16, layer_height);
-
- res = vpx_svc_get_layer_resolution(&svc_, 1, &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(kWidth * 8 / 16, layer_width);
- EXPECT_EQ(kHeight * 8 / 16, layer_height);
-}
-
-TEST_F(SvcTest, FirstPassEncode) {
- svc_.spatial_layers = 2;
- codec_enc_.g_pass = VPX_RC_FIRST_PASS;
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
-
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- ASSERT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- // FRAME 0
- video.Begin();
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_GT(vpx_svc_get_rc_stats_buffer_size(&svc_), 0U);
-
- // FRAME 1
- video.Next();
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_GT(vpx_svc_get_rc_stats_buffer_size(&svc_), 0U);
-
- // Flush encoder and test EOS packet
- res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_GT(vpx_svc_get_rc_stats_buffer_size(&svc_), 0U);
-}
-
-TEST_F(SvcTest, SecondPassEncode) {
- svc_.spatial_layers = 2;
+ // Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
- FILE *const stats_file = libvpx_test::OpenTestDataFile(stats_file_name_);
- ASSERT_TRUE(stats_file != NULL) << "Stats file open failed. Filename: "
- << stats_file;
+TEST_F(SvcTest, TwoPassEncode20FramesWithAltRef) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(20, 2, &stats_buf);
- struct vpx_fixed_buf stats_buf;
- fseek(stats_file, 0, SEEK_END);
- stats_buf.sz = static_cast<size_t>(ftell(stats_file));
- fseek(stats_file, 0, SEEK_SET);
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1");
+ vpx_fixed_buf outputs[20];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 20);
+ FreeBitstreamBuffers(&outputs[0], 20);
+}
- stats_buf.buf = malloc(stats_buf.sz);
- ASSERT_TRUE(stats_buf.buf != NULL);
- const size_t bytes_read = fread(stats_buf.buf, 1, stats_buf.sz, stats_file);
- ASSERT_EQ(bytes_read, stats_buf.sz);
- fclose(stats_file);
- codec_enc_.rc_twopass_stats_in = stats_buf;
+TEST_F(SvcTest, TwoPassEncode2SpatialLayersDecodeBaseLayerOnly) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode5SpatialLayersDecode54321Layers) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 5, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=0,1,1,1,0");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 5, &outputs[0]);
+
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 4);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 3);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 2);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2SNRLayers) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1");
+ Pass1EncodeNFrames(20, 2, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_,
+ "auto-alt-refs=1,1 scale-factors=1/1,1/1");
+ vpx_fixed_buf outputs[20];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 20);
+ FreeBitstreamBuffers(&outputs[0], 20);
+}
+
+TEST_F(SvcTest, TwoPassEncode3SNRLayersDecode321Layers) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1,1/1");
+ Pass1EncodeNFrames(20, 3, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_,
+ "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1");
+ vpx_fixed_buf outputs[20];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 20, 3, &outputs[0]);
+ DecodeNFrames(&outputs[0], 20);
+ DropEnhancementLayers(&outputs[0], 20, 2);
+ DecodeNFrames(&outputs[0], 20);
+ DropEnhancementLayers(&outputs[0], 20, 1);
+ DecodeNFrames(&outputs[0], 20);
+
+ FreeBitstreamBuffers(&outputs[0], 20);
+}
+
+TEST_F(SvcTest, SetMultipleFrameContextsOption) {
+ svc_.spatial_layers = 5;
vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- ASSERT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- // FRAME 0
- video.Begin();
- // This frame is a keyframe.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
+ svc_.spatial_layers = 2;
+ res = vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
+ InitializeEncoder();
+}
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, TwoPassEncode2SpatialLayersWithMultipleFrameContexts) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- // FRAME 1
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest,
+ TwoPassEncode2SpatialLayersWithMultipleFrameContextsDecodeBaselayer) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- // FRAME 2
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, TwoPassEncode2SNRLayersWithMultipleFrameContexts) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1");
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- free(stats_buf.buf);
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 scale-factors=1/1,1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest,
+ TwoPassEncode3SNRLayersWithMultipleFrameContextsDecode321Layer) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1,1/1");
+ Pass1EncodeNFrames(10, 3, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 3, &outputs[0]);
+
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 2);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayers) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithMultipleFrameContexts) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersDecodeBaseLayer) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+
+ vpx_fixed_buf base_layer[5];
+ for (int i = 0; i < 5; ++i)
+ base_layer[i] = outputs[i * 2];
+
+ DecodeNFrames(&base_layer[0], 5);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest,
+ TwoPassEncode2TemporalLayersWithMultipleFrameContextsDecodeBaseLayer) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+
+ vpx_fixed_buf base_layer[5];
+ for (int i = 0; i < 5; ++i)
+ base_layer[i] = outputs[i * 2];
+
+ DecodeNFrames(&base_layer[0], 5);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithTiles) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
+ codec_enc_.g_w = 704;
+ codec_enc_.g_h = 144;
+ tile_columns_ = 1;
+ tile_rows_ = 1;
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest,
+ TwoPassEncode2TemporalLayersWithMultipleFrameContextsAndTiles) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ codec_enc_.g_error_resilient = 0;
+ codec_enc_.g_w = 704;
+ codec_enc_.g_h = 144;
+ tile_columns_ = 1;
+ tile_rows_ = 1;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
}
} // namespace
|
CWE-119
|
svc_.encoding_mode = INTER_LAYER_PREDICTION_IP;
vpx_codec_dec_cfg_t dec_cfg = {0};
|
vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
tile_columns_ = 0;
tile_rows_ = 0;
|
150,880 |
SvcTest()
: codec_iface_(0),
test_file_name_("hantro_collage_w352h288.yuv"),
stats_file_name_("hantro_collage_w352h288.stat"),
codec_initialized_(false),
decoder_(0) {
memset(&svc_, 0, sizeof(svc_));
memset(&codec_, 0, sizeof(codec_));
memset(&codec_enc_, 0, sizeof(codec_enc_));
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
SvcTest()
: codec_iface_(0),
test_file_name_("hantro_collage_w352h288.yuv"),
codec_initialized_(false),
decoder_(0) {
memset(&svc_, 0, sizeof(svc_));
memset(&codec_, 0, sizeof(codec_));
memset(&codec_enc_, 0, sizeof(codec_enc_));
}
|
@@ -13,6 +13,9 @@
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/i420_video_source.h"
+
+#include "vp9/decoder/vp9_decoder.h"
+
#include "vpx/svc_context.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
@@ -21,6 +24,7 @@
using libvpx_test::CodecFactory;
using libvpx_test::Decoder;
+using libvpx_test::DxDataIterator;
using libvpx_test::VP9CodecFactory;
class SvcTest : public ::testing::Test {
@@ -31,7 +35,6 @@
SvcTest()
: codec_iface_(0),
test_file_name_("hantro_collage_w352h288.yuv"),
- stats_file_name_("hantro_collage_w352h288.stat"),
codec_initialized_(false),
decoder_(0) {
memset(&svc_, 0, sizeof(svc_));
@@ -42,7 +45,6 @@
virtual ~SvcTest() {}
virtual void SetUp() {
- svc_.encoding_mode = INTER_LAYER_PREDICTION_IP;
svc_.log_level = SVC_LOG_DEBUG;
svc_.log_print = 0;
@@ -58,15 +60,254 @@
codec_enc_.kf_min_dist = 100;
codec_enc_.kf_max_dist = 100;
- vpx_codec_dec_cfg_t dec_cfg = {0};
+ vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
VP9CodecFactory codec_factory;
decoder_ = codec_factory.CreateDecoder(dec_cfg, 0);
+
+ tile_columns_ = 0;
+ tile_rows_ = 0;
}
virtual void TearDown() {
- vpx_svc_release(&svc_);
+ ReleaseEncoder();
delete(decoder_);
+ }
+
+ void InitializeEncoder() {
+ const vpx_codec_err_t res =
+ vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ vpx_codec_control(&codec_, VP8E_SET_CPUUSED, 4); // Make the test faster
+ vpx_codec_control(&codec_, VP9E_SET_TILE_COLUMNS, tile_columns_);
+ vpx_codec_control(&codec_, VP9E_SET_TILE_ROWS, tile_rows_);
+ codec_initialized_ = true;
+ }
+
+ void ReleaseEncoder() {
+ vpx_svc_release(&svc_);
if (codec_initialized_) vpx_codec_destroy(&codec_);
+ codec_initialized_ = false;
+ }
+
+ void GetStatsData(std::string *const stats_buf) {
+ vpx_codec_iter_t iter = NULL;
+ const vpx_codec_cx_pkt_t *cx_pkt;
+
+ while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
+ if (cx_pkt->kind == VPX_CODEC_STATS_PKT) {
+ EXPECT_GT(cx_pkt->data.twopass_stats.sz, 0U);
+ ASSERT_TRUE(cx_pkt->data.twopass_stats.buf != NULL);
+ stats_buf->append(static_cast<char*>(cx_pkt->data.twopass_stats.buf),
+ cx_pkt->data.twopass_stats.sz);
+ }
+ }
+ }
+
+ void Pass1EncodeNFrames(const int n, const int layers,
+ std::string *const stats_buf) {
+ vpx_codec_err_t res;
+
+ ASSERT_GT(n, 0);
+ ASSERT_GT(layers, 0);
+ svc_.spatial_layers = layers;
+ codec_enc_.g_pass = VPX_RC_FIRST_PASS;
+ InitializeEncoder();
+
+ libvpx_test::I420VideoSource video(test_file_name_,
+ codec_enc_.g_w, codec_enc_.g_h,
+ codec_enc_.g_timebase.den,
+ codec_enc_.g_timebase.num, 0, 30);
+ video.Begin();
+
+ for (int i = 0; i < n; ++i) {
+ res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ GetStatsData(stats_buf);
+ video.Next();
+ }
+
+ // Flush encoder and test EOS packet.
+ res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ GetStatsData(stats_buf);
+
+ ReleaseEncoder();
+ }
+
+ void StoreFrames(const size_t max_frame_received,
+ struct vpx_fixed_buf *const outputs,
+ size_t *const frame_received) {
+ vpx_codec_iter_t iter = NULL;
+ const vpx_codec_cx_pkt_t *cx_pkt;
+
+ while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
+ if (cx_pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ const size_t frame_size = cx_pkt->data.frame.sz;
+
+ EXPECT_GT(frame_size, 0U);
+ ASSERT_TRUE(cx_pkt->data.frame.buf != NULL);
+ ASSERT_LT(*frame_received, max_frame_received);
+
+ if (*frame_received == 0)
+ EXPECT_EQ(1, !!(cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY));
+
+ outputs[*frame_received].buf = malloc(frame_size + 16);
+ ASSERT_TRUE(outputs[*frame_received].buf != NULL);
+ memcpy(outputs[*frame_received].buf, cx_pkt->data.frame.buf,
+ frame_size);
+ outputs[*frame_received].sz = frame_size;
+ ++(*frame_received);
+ }
+ }
+ }
+
+ void Pass2EncodeNFrames(std::string *const stats_buf,
+ const int n, const int layers,
+ struct vpx_fixed_buf *const outputs) {
+ vpx_codec_err_t res;
+ size_t frame_received = 0;
+
+ ASSERT_TRUE(outputs != NULL);
+ ASSERT_GT(n, 0);
+ ASSERT_GT(layers, 0);
+ svc_.spatial_layers = layers;
+ codec_enc_.rc_target_bitrate = 500;
+ if (codec_enc_.g_pass == VPX_RC_LAST_PASS) {
+ ASSERT_TRUE(stats_buf != NULL);
+ ASSERT_GT(stats_buf->size(), 0U);
+ codec_enc_.rc_twopass_stats_in.buf = &(*stats_buf)[0];
+ codec_enc_.rc_twopass_stats_in.sz = stats_buf->size();
+ }
+ InitializeEncoder();
+
+ libvpx_test::I420VideoSource video(test_file_name_,
+ codec_enc_.g_w, codec_enc_.g_h,
+ codec_enc_.g_timebase.den,
+ codec_enc_.g_timebase.num, 0, 30);
+ video.Begin();
+
+ for (int i = 0; i < n; ++i) {
+ res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ StoreFrames(n, outputs, &frame_received);
+ video.Next();
+ }
+
+ // Flush encoder.
+ res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ StoreFrames(n, outputs, &frame_received);
+
+ EXPECT_EQ(frame_received, static_cast<size_t>(n));
+
+ ReleaseEncoder();
+ }
+
+ void DecodeNFrames(const struct vpx_fixed_buf *const inputs, const int n) {
+ int decoded_frames = 0;
+ int received_frames = 0;
+
+ ASSERT_TRUE(inputs != NULL);
+ ASSERT_GT(n, 0);
+
+ for (int i = 0; i < n; ++i) {
+ ASSERT_TRUE(inputs[i].buf != NULL);
+ ASSERT_GT(inputs[i].sz, 0U);
+ const vpx_codec_err_t res_dec =
+ decoder_->DecodeFrame(static_cast<const uint8_t *>(inputs[i].buf),
+ inputs[i].sz);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+
+ DxDataIterator dec_iter = decoder_->GetDxData();
+ while (dec_iter.Next() != NULL) {
+ ++received_frames;
+ }
+ }
+ EXPECT_EQ(decoded_frames, n);
+ EXPECT_EQ(received_frames, n);
+ }
+
+ void DropEnhancementLayers(struct vpx_fixed_buf *const inputs,
+ const int num_super_frames,
+ const int remained_spatial_layers) {
+ ASSERT_TRUE(inputs != NULL);
+ ASSERT_GT(num_super_frames, 0);
+ ASSERT_GT(remained_spatial_layers, 0);
+
+ for (int i = 0; i < num_super_frames; ++i) {
+ uint32_t frame_sizes[8] = {0};
+ int frame_count = 0;
+ int frames_found = 0;
+ int frame;
+ ASSERT_TRUE(inputs[i].buf != NULL);
+ ASSERT_GT(inputs[i].sz, 0U);
+
+ vpx_codec_err_t res =
+ vp9_parse_superframe_index(static_cast<const uint8_t*>(inputs[i].buf),
+ inputs[i].sz, frame_sizes, &frame_count,
+ NULL, NULL);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+
+ if (frame_count == 0) {
+ // There's no super frame but only a single frame.
+ ASSERT_EQ(1, remained_spatial_layers);
+ } else {
+ // Found a super frame.
+ uint8_t *frame_data = static_cast<uint8_t*>(inputs[i].buf);
+ uint8_t *frame_start = frame_data;
+ for (frame = 0; frame < frame_count; ++frame) {
+ // Looking for a visible frame.
+ if (frame_data[0] & 0x02) {
+ ++frames_found;
+ if (frames_found == remained_spatial_layers)
+ break;
+ }
+ frame_data += frame_sizes[frame];
+ }
+ ASSERT_LT(frame, frame_count) << "Couldn't find a visible frame. "
+ << "remained_spatial_layers: " << remained_spatial_layers
+ << " super_frame: " << i;
+ if (frame == frame_count - 1)
+ continue;
+
+ frame_data += frame_sizes[frame];
+
+ // We need to add one more frame for multiple frame contexts.
+ uint8_t marker =
+ static_cast<const uint8_t*>(inputs[i].buf)[inputs[i].sz - 1];
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const size_t index_sz = 2 + mag * frame_count;
+ const size_t new_index_sz = 2 + mag * (frame + 1);
+ marker &= 0x0f8;
+ marker |= frame;
+
+ // Copy existing frame sizes.
+ memmove(frame_data + 1, frame_start + inputs[i].sz - index_sz + 1,
+ new_index_sz - 2);
+ // New marker.
+ frame_data[0] = marker;
+ frame_data += (mag * (frame + 1) + 1);
+
+ *frame_data++ = marker;
+ inputs[i].sz = frame_data - frame_start;
+ }
+ }
+ }
+
+ void FreeBitstreamBuffers(struct vpx_fixed_buf *const inputs, const int n) {
+ ASSERT_TRUE(inputs != NULL);
+ ASSERT_GT(n, 0);
+
+ for (int i = 0; i < n; ++i) {
+ free(inputs[i].buf);
+ inputs[i].buf = NULL;
+ inputs[i].sz = 0;
+ }
}
SvcContext svc_;
@@ -74,9 +315,10 @@
struct vpx_codec_enc_cfg codec_enc_;
vpx_codec_iface_t *codec_iface_;
std::string test_file_name_;
- std::string stats_file_name_;
bool codec_initialized_;
Decoder *decoder_;
+ int tile_columns_;
+ int tile_rows_;
};
TEST_F(SvcTest, SvcInit) {
@@ -96,22 +338,13 @@
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
svc_.spatial_layers = 0; // use default layers
- res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
EXPECT_EQ(VPX_SS_DEFAULT_LAYERS, svc_.spatial_layers);
}
TEST_F(SvcTest, InitTwoLayers) {
svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,16*16"); // invalid scale values
- vpx_codec_err_t res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16"); // valid scale values
- res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
}
TEST_F(SvcTest, InvalidOptions) {
@@ -125,30 +358,18 @@
}
TEST_F(SvcTest, SetLayersOption) {
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "layers=3");
+ vpx_codec_err_t res = vpx_svc_set_options(&svc_, "spatial-layers=3");
EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
EXPECT_EQ(3, svc_.spatial_layers);
}
-TEST_F(SvcTest, SetEncodingMode) {
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "encoding-mode=alt-ip");
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
- EXPECT_EQ(ALT_INTER_LAYER_PREDICTION_IP, svc_.encoding_mode);
-}
-
TEST_F(SvcTest, SetMultipleOptions) {
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "layers=2 encoding-mode=ip");
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ vpx_codec_err_t res =
+ vpx_svc_set_options(&svc_, "spatial-layers=2 scale-factors=1/3,2/3");
EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
EXPECT_EQ(2, svc_.spatial_layers);
- EXPECT_EQ(INTER_LAYER_PREDICTION_IP, svc_.encoding_mode);
}
TEST_F(SvcTest, SetScaleFactorsOption) {
@@ -159,314 +380,418 @@
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- res = vpx_svc_set_options(&svc_, "scale-factors=1/3,2/3");
+ res = vpx_svc_set_options(&svc_, "scale-factors=1/3, 3*3");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "scale-factors=1/3");
EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "scale-factors=1/3,2/3");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ InitializeEncoder();
}
TEST_F(SvcTest, SetQuantizersOption) {
svc_.spatial_layers = 2;
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "quantizers=not-quantizers");
+ vpx_codec_err_t res = vpx_svc_set_options(&svc_, "max-quantizers=nothing");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- vpx_svc_set_options(&svc_, "quantizers=40,45");
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ res = vpx_svc_set_options(&svc_, "min-quantizers=nothing");
EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "max-quantizers=40");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "min-quantizers=40");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "max-quantizers=30,30 min-quantizers=40,40");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "max-quantizers=40,40 min-quantizers=30,30");
+ InitializeEncoder();
}
-TEST_F(SvcTest, SetKeyFrameQuantizersOption) {
- svc_.spatial_layers = 2;
- vpx_codec_err_t res = vpx_svc_set_options(&svc_,
- "quantizers-keyframe=not-quantizers");
+TEST_F(SvcTest, SetAutoAltRefOption) {
+ svc_.spatial_layers = 5;
+ vpx_codec_err_t res = vpx_svc_set_options(&svc_, "auto-alt-refs=none");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- vpx_svc_set_options(&svc_, "quantizers-keyframe=40,45");
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-}
-
-TEST_F(SvcTest, SetQuantizers) {
- vpx_codec_err_t res = vpx_svc_set_quantizers(NULL, "40,30", 0);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_quantizers(&svc_, NULL, 0);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- svc_.spatial_layers = 2;
- res = vpx_svc_set_quantizers(&svc_, "40", 0);
+ res = vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1,1,0");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- res = vpx_svc_set_quantizers(&svc_, "40,30", 0);
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-}
-
-TEST_F(SvcTest, SetKeyFrameQuantizers) {
- vpx_codec_err_t res = vpx_svc_set_quantizers(NULL, "40,31", 1);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_quantizers(&svc_, NULL, 1);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_quantizers(&svc_, "40,30", 1);
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-}
-
-TEST_F(SvcTest, SetScaleFactors) {
- vpx_codec_err_t res = vpx_svc_set_scale_factors(NULL, "4/16,16/16");
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_scale_factors(&svc_, NULL);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- svc_.spatial_layers = 2;
- res = vpx_svc_set_scale_factors(&svc_, "4/16");
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=0,1,1,1,0");
+ InitializeEncoder();
}
// Test that decoder can handle an SVC frame as the first frame in a sequence.
-TEST_F(SvcTest, FirstFrameHasLayers) {
- svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
-
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- video.Begin();
-
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- EXPECT_EQ(VPX_CODEC_OK, res);
-
- const vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
-
- // this test fails with a decoder error
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, OnePassEncodeOneFrame) {
+ codec_enc_.g_pass = VPX_RC_ONE_PASS;
+ vpx_fixed_buf output = {0};
+ Pass2EncodeNFrames(NULL, 1, 2, &output);
+ DecodeNFrames(&output, 1);
+ FreeBitstreamBuffers(&output, 1);
}
-TEST_F(SvcTest, EncodeThreeFrames) {
- svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
-
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- ASSERT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- // FRAME 0
- video.Begin();
- // This frame is a keyframe.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
-
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
-
- // FRAME 1
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
-
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
-
- // FRAME 2
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
-
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, OnePassEncodeThreeFrames) {
+ codec_enc_.g_pass = VPX_RC_ONE_PASS;
+ codec_enc_.g_lag_in_frames = 0;
+ vpx_fixed_buf outputs[3];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(NULL, 3, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 3);
+ FreeBitstreamBuffers(&outputs[0], 3);
}
-TEST_F(SvcTest, GetLayerResolution) {
- svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,8/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
+TEST_F(SvcTest, TwoPassEncode10Frames) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- // ensure that requested layer is a valid layer
- uint32_t layer_width, layer_height;
- res = vpx_svc_get_layer_resolution(&svc_, svc_.spatial_layers,
- &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(NULL, 0, &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(&svc_, 0, NULL, &layer_height);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(&svc_, 0, &layer_width, NULL);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(&svc_, 0, &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(kWidth * 4 / 16, layer_width);
- EXPECT_EQ(kHeight * 4 / 16, layer_height);
-
- res = vpx_svc_get_layer_resolution(&svc_, 1, &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(kWidth * 8 / 16, layer_width);
- EXPECT_EQ(kHeight * 8 / 16, layer_height);
-}
-
-TEST_F(SvcTest, FirstPassEncode) {
- svc_.spatial_layers = 2;
- codec_enc_.g_pass = VPX_RC_FIRST_PASS;
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
-
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- ASSERT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- // FRAME 0
- video.Begin();
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_GT(vpx_svc_get_rc_stats_buffer_size(&svc_), 0U);
-
- // FRAME 1
- video.Next();
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_GT(vpx_svc_get_rc_stats_buffer_size(&svc_), 0U);
-
- // Flush encoder and test EOS packet
- res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_GT(vpx_svc_get_rc_stats_buffer_size(&svc_), 0U);
-}
-
-TEST_F(SvcTest, SecondPassEncode) {
- svc_.spatial_layers = 2;
+ // Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
- FILE *const stats_file = libvpx_test::OpenTestDataFile(stats_file_name_);
- ASSERT_TRUE(stats_file != NULL) << "Stats file open failed. Filename: "
- << stats_file;
+TEST_F(SvcTest, TwoPassEncode20FramesWithAltRef) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(20, 2, &stats_buf);
- struct vpx_fixed_buf stats_buf;
- fseek(stats_file, 0, SEEK_END);
- stats_buf.sz = static_cast<size_t>(ftell(stats_file));
- fseek(stats_file, 0, SEEK_SET);
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1");
+ vpx_fixed_buf outputs[20];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 20);
+ FreeBitstreamBuffers(&outputs[0], 20);
+}
- stats_buf.buf = malloc(stats_buf.sz);
- ASSERT_TRUE(stats_buf.buf != NULL);
- const size_t bytes_read = fread(stats_buf.buf, 1, stats_buf.sz, stats_file);
- ASSERT_EQ(bytes_read, stats_buf.sz);
- fclose(stats_file);
- codec_enc_.rc_twopass_stats_in = stats_buf;
+TEST_F(SvcTest, TwoPassEncode2SpatialLayersDecodeBaseLayerOnly) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode5SpatialLayersDecode54321Layers) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 5, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=0,1,1,1,0");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 5, &outputs[0]);
+
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 4);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 3);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 2);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2SNRLayers) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1");
+ Pass1EncodeNFrames(20, 2, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_,
+ "auto-alt-refs=1,1 scale-factors=1/1,1/1");
+ vpx_fixed_buf outputs[20];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 20);
+ FreeBitstreamBuffers(&outputs[0], 20);
+}
+
+TEST_F(SvcTest, TwoPassEncode3SNRLayersDecode321Layers) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1,1/1");
+ Pass1EncodeNFrames(20, 3, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_,
+ "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1");
+ vpx_fixed_buf outputs[20];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 20, 3, &outputs[0]);
+ DecodeNFrames(&outputs[0], 20);
+ DropEnhancementLayers(&outputs[0], 20, 2);
+ DecodeNFrames(&outputs[0], 20);
+ DropEnhancementLayers(&outputs[0], 20, 1);
+ DecodeNFrames(&outputs[0], 20);
+
+ FreeBitstreamBuffers(&outputs[0], 20);
+}
+
+TEST_F(SvcTest, SetMultipleFrameContextsOption) {
+ svc_.spatial_layers = 5;
vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- ASSERT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- // FRAME 0
- video.Begin();
- // This frame is a keyframe.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
+ svc_.spatial_layers = 2;
+ res = vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
+ InitializeEncoder();
+}
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, TwoPassEncode2SpatialLayersWithMultipleFrameContexts) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- // FRAME 1
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest,
+ TwoPassEncode2SpatialLayersWithMultipleFrameContextsDecodeBaselayer) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- // FRAME 2
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, TwoPassEncode2SNRLayersWithMultipleFrameContexts) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1");
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- free(stats_buf.buf);
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 scale-factors=1/1,1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest,
+ TwoPassEncode3SNRLayersWithMultipleFrameContextsDecode321Layer) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1,1/1");
+ Pass1EncodeNFrames(10, 3, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 3, &outputs[0]);
+
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 2);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayers) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithMultipleFrameContexts) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersDecodeBaseLayer) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+
+ vpx_fixed_buf base_layer[5];
+ for (int i = 0; i < 5; ++i)
+ base_layer[i] = outputs[i * 2];
+
+ DecodeNFrames(&base_layer[0], 5);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest,
+ TwoPassEncode2TemporalLayersWithMultipleFrameContextsDecodeBaseLayer) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+
+ vpx_fixed_buf base_layer[5];
+ for (int i = 0; i < 5; ++i)
+ base_layer[i] = outputs[i * 2];
+
+ DecodeNFrames(&base_layer[0], 5);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithTiles) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
+ codec_enc_.g_w = 704;
+ codec_enc_.g_h = 144;
+ tile_columns_ = 1;
+ tile_rows_ = 1;
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest,
+ TwoPassEncode2TemporalLayersWithMultipleFrameContextsAndTiles) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ codec_enc_.g_error_resilient = 0;
+ codec_enc_.g_w = 704;
+ codec_enc_.g_h = 144;
+ tile_columns_ = 1;
+ tile_rows_ = 1;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
}
} // namespace
|
CWE-119
|
stats_file_name_("hantro_collage_w352h288.stat"),
| null |
150,881 |
virtual void TearDown() {
vpx_svc_release(&svc_);
delete(decoder_);
if (codec_initialized_) vpx_codec_destroy(&codec_);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void TearDown() {
ReleaseEncoder();
delete(decoder_);
}
void InitializeEncoder() {
const vpx_codec_err_t res =
vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_OK, res);
vpx_codec_control(&codec_, VP8E_SET_CPUUSED, 4); // Make the test faster
vpx_codec_control(&codec_, VP9E_SET_TILE_COLUMNS, tile_columns_);
vpx_codec_control(&codec_, VP9E_SET_TILE_ROWS, tile_rows_);
codec_initialized_ = true;
}
void ReleaseEncoder() {
vpx_svc_release(&svc_);
if (codec_initialized_) vpx_codec_destroy(&codec_);
codec_initialized_ = false;
}
void GetStatsData(std::string *const stats_buf) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *cx_pkt;
while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
if (cx_pkt->kind == VPX_CODEC_STATS_PKT) {
EXPECT_GT(cx_pkt->data.twopass_stats.sz, 0U);
ASSERT_TRUE(cx_pkt->data.twopass_stats.buf != NULL);
stats_buf->append(static_cast<char*>(cx_pkt->data.twopass_stats.buf),
cx_pkt->data.twopass_stats.sz);
}
}
}
void Pass1EncodeNFrames(const int n, const int layers,
std::string *const stats_buf) {
vpx_codec_err_t res;
ASSERT_GT(n, 0);
ASSERT_GT(layers, 0);
svc_.spatial_layers = layers;
codec_enc_.g_pass = VPX_RC_FIRST_PASS;
InitializeEncoder();
libvpx_test::I420VideoSource video(test_file_name_,
codec_enc_.g_w, codec_enc_.g_h,
codec_enc_.g_timebase.den,
codec_enc_.g_timebase.num, 0, 30);
video.Begin();
for (int i = 0; i < n; ++i) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
GetStatsData(stats_buf);
video.Next();
}
// Flush encoder and test EOS packet.
res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
GetStatsData(stats_buf);
ReleaseEncoder();
}
void StoreFrames(const size_t max_frame_received,
struct vpx_fixed_buf *const outputs,
size_t *const frame_received) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *cx_pkt;
while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
if (cx_pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const size_t frame_size = cx_pkt->data.frame.sz;
EXPECT_GT(frame_size, 0U);
ASSERT_TRUE(cx_pkt->data.frame.buf != NULL);
ASSERT_LT(*frame_received, max_frame_received);
if (*frame_received == 0)
EXPECT_EQ(1, !!(cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY));
outputs[*frame_received].buf = malloc(frame_size + 16);
ASSERT_TRUE(outputs[*frame_received].buf != NULL);
memcpy(outputs[*frame_received].buf, cx_pkt->data.frame.buf,
frame_size);
outputs[*frame_received].sz = frame_size;
++(*frame_received);
}
}
}
void Pass2EncodeNFrames(std::string *const stats_buf,
const int n, const int layers,
struct vpx_fixed_buf *const outputs) {
vpx_codec_err_t res;
size_t frame_received = 0;
ASSERT_TRUE(outputs != NULL);
ASSERT_GT(n, 0);
ASSERT_GT(layers, 0);
svc_.spatial_layers = layers;
codec_enc_.rc_target_bitrate = 500;
if (codec_enc_.g_pass == VPX_RC_LAST_PASS) {
ASSERT_TRUE(stats_buf != NULL);
ASSERT_GT(stats_buf->size(), 0U);
codec_enc_.rc_twopass_stats_in.buf = &(*stats_buf)[0];
codec_enc_.rc_twopass_stats_in.sz = stats_buf->size();
}
InitializeEncoder();
libvpx_test::I420VideoSource video(test_file_name_,
codec_enc_.g_w, codec_enc_.g_h,
codec_enc_.g_timebase.den,
codec_enc_.g_timebase.num, 0, 30);
video.Begin();
for (int i = 0; i < n; ++i) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
StoreFrames(n, outputs, &frame_received);
video.Next();
}
// Flush encoder.
res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
video.duration(), VPX_DL_GOOD_QUALITY);
EXPECT_EQ(VPX_CODEC_OK, res);
StoreFrames(n, outputs, &frame_received);
EXPECT_EQ(frame_received, static_cast<size_t>(n));
ReleaseEncoder();
}
void DecodeNFrames(const struct vpx_fixed_buf *const inputs, const int n) {
int decoded_frames = 0;
int received_frames = 0;
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(n, 0);
for (int i = 0; i < n; ++i) {
ASSERT_TRUE(inputs[i].buf != NULL);
ASSERT_GT(inputs[i].sz, 0U);
const vpx_codec_err_t res_dec =
decoder_->DecodeFrame(static_cast<const uint8_t *>(inputs[i].buf),
inputs[i].sz);
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
++decoded_frames;
DxDataIterator dec_iter = decoder_->GetDxData();
while (dec_iter.Next() != NULL) {
++received_frames;
}
}
EXPECT_EQ(decoded_frames, n);
EXPECT_EQ(received_frames, n);
}
void DropEnhancementLayers(struct vpx_fixed_buf *const inputs,
const int num_super_frames,
const int remained_spatial_layers) {
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(num_super_frames, 0);
ASSERT_GT(remained_spatial_layers, 0);
for (int i = 0; i < num_super_frames; ++i) {
uint32_t frame_sizes[8] = {0};
int frame_count = 0;
int frames_found = 0;
int frame;
ASSERT_TRUE(inputs[i].buf != NULL);
ASSERT_GT(inputs[i].sz, 0U);
vpx_codec_err_t res =
vp9_parse_superframe_index(static_cast<const uint8_t*>(inputs[i].buf),
inputs[i].sz, frame_sizes, &frame_count,
NULL, NULL);
ASSERT_EQ(VPX_CODEC_OK, res);
if (frame_count == 0) {
// There's no super frame but only a single frame.
ASSERT_EQ(1, remained_spatial_layers);
} else {
// Found a super frame.
uint8_t *frame_data = static_cast<uint8_t*>(inputs[i].buf);
uint8_t *frame_start = frame_data;
for (frame = 0; frame < frame_count; ++frame) {
// Looking for a visible frame.
if (frame_data[0] & 0x02) {
++frames_found;
if (frames_found == remained_spatial_layers)
break;
}
frame_data += frame_sizes[frame];
}
ASSERT_LT(frame, frame_count) << "Couldn't find a visible frame. "
<< "remained_spatial_layers: " << remained_spatial_layers
<< " super_frame: " << i;
if (frame == frame_count - 1)
continue;
frame_data += frame_sizes[frame];
// We need to add one more frame for multiple frame contexts.
uint8_t marker =
static_cast<const uint8_t*>(inputs[i].buf)[inputs[i].sz - 1];
const uint32_t mag = ((marker >> 3) & 0x3) + 1;
const size_t index_sz = 2 + mag * frame_count;
const size_t new_index_sz = 2 + mag * (frame + 1);
marker &= 0x0f8;
marker |= frame;
// Copy existing frame sizes.
memmove(frame_data + 1, frame_start + inputs[i].sz - index_sz + 1,
new_index_sz - 2);
// New marker.
frame_data[0] = marker;
frame_data += (mag * (frame + 1) + 1);
*frame_data++ = marker;
inputs[i].sz = frame_data - frame_start;
}
}
}
void FreeBitstreamBuffers(struct vpx_fixed_buf *const inputs, const int n) {
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(n, 0);
for (int i = 0; i < n; ++i) {
free(inputs[i].buf);
inputs[i].buf = NULL;
inputs[i].sz = 0;
}
}
|
@@ -13,6 +13,9 @@
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/i420_video_source.h"
+
+#include "vp9/decoder/vp9_decoder.h"
+
#include "vpx/svc_context.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
@@ -21,6 +24,7 @@
using libvpx_test::CodecFactory;
using libvpx_test::Decoder;
+using libvpx_test::DxDataIterator;
using libvpx_test::VP9CodecFactory;
class SvcTest : public ::testing::Test {
@@ -31,7 +35,6 @@
SvcTest()
: codec_iface_(0),
test_file_name_("hantro_collage_w352h288.yuv"),
- stats_file_name_("hantro_collage_w352h288.stat"),
codec_initialized_(false),
decoder_(0) {
memset(&svc_, 0, sizeof(svc_));
@@ -42,7 +45,6 @@
virtual ~SvcTest() {}
virtual void SetUp() {
- svc_.encoding_mode = INTER_LAYER_PREDICTION_IP;
svc_.log_level = SVC_LOG_DEBUG;
svc_.log_print = 0;
@@ -58,15 +60,254 @@
codec_enc_.kf_min_dist = 100;
codec_enc_.kf_max_dist = 100;
- vpx_codec_dec_cfg_t dec_cfg = {0};
+ vpx_codec_dec_cfg_t dec_cfg = vpx_codec_dec_cfg_t();
VP9CodecFactory codec_factory;
decoder_ = codec_factory.CreateDecoder(dec_cfg, 0);
+
+ tile_columns_ = 0;
+ tile_rows_ = 0;
}
virtual void TearDown() {
- vpx_svc_release(&svc_);
+ ReleaseEncoder();
delete(decoder_);
+ }
+
+ void InitializeEncoder() {
+ const vpx_codec_err_t res =
+ vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ vpx_codec_control(&codec_, VP8E_SET_CPUUSED, 4); // Make the test faster
+ vpx_codec_control(&codec_, VP9E_SET_TILE_COLUMNS, tile_columns_);
+ vpx_codec_control(&codec_, VP9E_SET_TILE_ROWS, tile_rows_);
+ codec_initialized_ = true;
+ }
+
+ void ReleaseEncoder() {
+ vpx_svc_release(&svc_);
if (codec_initialized_) vpx_codec_destroy(&codec_);
+ codec_initialized_ = false;
+ }
+
+ void GetStatsData(std::string *const stats_buf) {
+ vpx_codec_iter_t iter = NULL;
+ const vpx_codec_cx_pkt_t *cx_pkt;
+
+ while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
+ if (cx_pkt->kind == VPX_CODEC_STATS_PKT) {
+ EXPECT_GT(cx_pkt->data.twopass_stats.sz, 0U);
+ ASSERT_TRUE(cx_pkt->data.twopass_stats.buf != NULL);
+ stats_buf->append(static_cast<char*>(cx_pkt->data.twopass_stats.buf),
+ cx_pkt->data.twopass_stats.sz);
+ }
+ }
+ }
+
+ void Pass1EncodeNFrames(const int n, const int layers,
+ std::string *const stats_buf) {
+ vpx_codec_err_t res;
+
+ ASSERT_GT(n, 0);
+ ASSERT_GT(layers, 0);
+ svc_.spatial_layers = layers;
+ codec_enc_.g_pass = VPX_RC_FIRST_PASS;
+ InitializeEncoder();
+
+ libvpx_test::I420VideoSource video(test_file_name_,
+ codec_enc_.g_w, codec_enc_.g_h,
+ codec_enc_.g_timebase.den,
+ codec_enc_.g_timebase.num, 0, 30);
+ video.Begin();
+
+ for (int i = 0; i < n; ++i) {
+ res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ GetStatsData(stats_buf);
+ video.Next();
+ }
+
+ // Flush encoder and test EOS packet.
+ res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ GetStatsData(stats_buf);
+
+ ReleaseEncoder();
+ }
+
+ void StoreFrames(const size_t max_frame_received,
+ struct vpx_fixed_buf *const outputs,
+ size_t *const frame_received) {
+ vpx_codec_iter_t iter = NULL;
+ const vpx_codec_cx_pkt_t *cx_pkt;
+
+ while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
+ if (cx_pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ const size_t frame_size = cx_pkt->data.frame.sz;
+
+ EXPECT_GT(frame_size, 0U);
+ ASSERT_TRUE(cx_pkt->data.frame.buf != NULL);
+ ASSERT_LT(*frame_received, max_frame_received);
+
+ if (*frame_received == 0)
+ EXPECT_EQ(1, !!(cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY));
+
+ outputs[*frame_received].buf = malloc(frame_size + 16);
+ ASSERT_TRUE(outputs[*frame_received].buf != NULL);
+ memcpy(outputs[*frame_received].buf, cx_pkt->data.frame.buf,
+ frame_size);
+ outputs[*frame_received].sz = frame_size;
+ ++(*frame_received);
+ }
+ }
+ }
+
+ void Pass2EncodeNFrames(std::string *const stats_buf,
+ const int n, const int layers,
+ struct vpx_fixed_buf *const outputs) {
+ vpx_codec_err_t res;
+ size_t frame_received = 0;
+
+ ASSERT_TRUE(outputs != NULL);
+ ASSERT_GT(n, 0);
+ ASSERT_GT(layers, 0);
+ svc_.spatial_layers = layers;
+ codec_enc_.rc_target_bitrate = 500;
+ if (codec_enc_.g_pass == VPX_RC_LAST_PASS) {
+ ASSERT_TRUE(stats_buf != NULL);
+ ASSERT_GT(stats_buf->size(), 0U);
+ codec_enc_.rc_twopass_stats_in.buf = &(*stats_buf)[0];
+ codec_enc_.rc_twopass_stats_in.sz = stats_buf->size();
+ }
+ InitializeEncoder();
+
+ libvpx_test::I420VideoSource video(test_file_name_,
+ codec_enc_.g_w, codec_enc_.g_h,
+ codec_enc_.g_timebase.den,
+ codec_enc_.g_timebase.num, 0, 30);
+ video.Begin();
+
+ for (int i = 0; i < n; ++i) {
+ res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ StoreFrames(n, outputs, &frame_received);
+ video.Next();
+ }
+
+ // Flush encoder.
+ res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ StoreFrames(n, outputs, &frame_received);
+
+ EXPECT_EQ(frame_received, static_cast<size_t>(n));
+
+ ReleaseEncoder();
+ }
+
+ void DecodeNFrames(const struct vpx_fixed_buf *const inputs, const int n) {
+ int decoded_frames = 0;
+ int received_frames = 0;
+
+ ASSERT_TRUE(inputs != NULL);
+ ASSERT_GT(n, 0);
+
+ for (int i = 0; i < n; ++i) {
+ ASSERT_TRUE(inputs[i].buf != NULL);
+ ASSERT_GT(inputs[i].sz, 0U);
+ const vpx_codec_err_t res_dec =
+ decoder_->DecodeFrame(static_cast<const uint8_t *>(inputs[i].buf),
+ inputs[i].sz);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+
+ DxDataIterator dec_iter = decoder_->GetDxData();
+ while (dec_iter.Next() != NULL) {
+ ++received_frames;
+ }
+ }
+ EXPECT_EQ(decoded_frames, n);
+ EXPECT_EQ(received_frames, n);
+ }
+
+ void DropEnhancementLayers(struct vpx_fixed_buf *const inputs,
+ const int num_super_frames,
+ const int remained_spatial_layers) {
+ ASSERT_TRUE(inputs != NULL);
+ ASSERT_GT(num_super_frames, 0);
+ ASSERT_GT(remained_spatial_layers, 0);
+
+ for (int i = 0; i < num_super_frames; ++i) {
+ uint32_t frame_sizes[8] = {0};
+ int frame_count = 0;
+ int frames_found = 0;
+ int frame;
+ ASSERT_TRUE(inputs[i].buf != NULL);
+ ASSERT_GT(inputs[i].sz, 0U);
+
+ vpx_codec_err_t res =
+ vp9_parse_superframe_index(static_cast<const uint8_t*>(inputs[i].buf),
+ inputs[i].sz, frame_sizes, &frame_count,
+ NULL, NULL);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+
+ if (frame_count == 0) {
+ // There's no super frame but only a single frame.
+ ASSERT_EQ(1, remained_spatial_layers);
+ } else {
+ // Found a super frame.
+ uint8_t *frame_data = static_cast<uint8_t*>(inputs[i].buf);
+ uint8_t *frame_start = frame_data;
+ for (frame = 0; frame < frame_count; ++frame) {
+ // Looking for a visible frame.
+ if (frame_data[0] & 0x02) {
+ ++frames_found;
+ if (frames_found == remained_spatial_layers)
+ break;
+ }
+ frame_data += frame_sizes[frame];
+ }
+ ASSERT_LT(frame, frame_count) << "Couldn't find a visible frame. "
+ << "remained_spatial_layers: " << remained_spatial_layers
+ << " super_frame: " << i;
+ if (frame == frame_count - 1)
+ continue;
+
+ frame_data += frame_sizes[frame];
+
+ // We need to add one more frame for multiple frame contexts.
+ uint8_t marker =
+ static_cast<const uint8_t*>(inputs[i].buf)[inputs[i].sz - 1];
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const size_t index_sz = 2 + mag * frame_count;
+ const size_t new_index_sz = 2 + mag * (frame + 1);
+ marker &= 0x0f8;
+ marker |= frame;
+
+ // Copy existing frame sizes.
+ memmove(frame_data + 1, frame_start + inputs[i].sz - index_sz + 1,
+ new_index_sz - 2);
+ // New marker.
+ frame_data[0] = marker;
+ frame_data += (mag * (frame + 1) + 1);
+
+ *frame_data++ = marker;
+ inputs[i].sz = frame_data - frame_start;
+ }
+ }
+ }
+
+ void FreeBitstreamBuffers(struct vpx_fixed_buf *const inputs, const int n) {
+ ASSERT_TRUE(inputs != NULL);
+ ASSERT_GT(n, 0);
+
+ for (int i = 0; i < n; ++i) {
+ free(inputs[i].buf);
+ inputs[i].buf = NULL;
+ inputs[i].sz = 0;
+ }
}
SvcContext svc_;
@@ -74,9 +315,10 @@
struct vpx_codec_enc_cfg codec_enc_;
vpx_codec_iface_t *codec_iface_;
std::string test_file_name_;
- std::string stats_file_name_;
bool codec_initialized_;
Decoder *decoder_;
+ int tile_columns_;
+ int tile_rows_;
};
TEST_F(SvcTest, SvcInit) {
@@ -96,22 +338,13 @@
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
svc_.spatial_layers = 0; // use default layers
- res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
EXPECT_EQ(VPX_SS_DEFAULT_LAYERS, svc_.spatial_layers);
}
TEST_F(SvcTest, InitTwoLayers) {
svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,16*16"); // invalid scale values
- vpx_codec_err_t res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16"); // valid scale values
- res = vpx_svc_init(&svc_, &codec_, codec_iface_, &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
}
TEST_F(SvcTest, InvalidOptions) {
@@ -125,30 +358,18 @@
}
TEST_F(SvcTest, SetLayersOption) {
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "layers=3");
+ vpx_codec_err_t res = vpx_svc_set_options(&svc_, "spatial-layers=3");
EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
EXPECT_EQ(3, svc_.spatial_layers);
}
-TEST_F(SvcTest, SetEncodingMode) {
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "encoding-mode=alt-ip");
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
- EXPECT_EQ(ALT_INTER_LAYER_PREDICTION_IP, svc_.encoding_mode);
-}
-
TEST_F(SvcTest, SetMultipleOptions) {
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "layers=2 encoding-mode=ip");
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ vpx_codec_err_t res =
+ vpx_svc_set_options(&svc_, "spatial-layers=2 scale-factors=1/3,2/3");
EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ InitializeEncoder();
EXPECT_EQ(2, svc_.spatial_layers);
- EXPECT_EQ(INTER_LAYER_PREDICTION_IP, svc_.encoding_mode);
}
TEST_F(SvcTest, SetScaleFactorsOption) {
@@ -159,314 +380,418 @@
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- res = vpx_svc_set_options(&svc_, "scale-factors=1/3,2/3");
+ res = vpx_svc_set_options(&svc_, "scale-factors=1/3, 3*3");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "scale-factors=1/3");
EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "scale-factors=1/3,2/3");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ InitializeEncoder();
}
TEST_F(SvcTest, SetQuantizersOption) {
svc_.spatial_layers = 2;
- vpx_codec_err_t res = vpx_svc_set_options(&svc_, "quantizers=not-quantizers");
+ vpx_codec_err_t res = vpx_svc_set_options(&svc_, "max-quantizers=nothing");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- vpx_svc_set_options(&svc_, "quantizers=40,45");
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ res = vpx_svc_set_options(&svc_, "min-quantizers=nothing");
EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "max-quantizers=40");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "min-quantizers=40");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "max-quantizers=30,30 min-quantizers=40,40");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
+
+ res = vpx_svc_set_options(&svc_, "max-quantizers=40,40 min-quantizers=30,30");
+ InitializeEncoder();
}
-TEST_F(SvcTest, SetKeyFrameQuantizersOption) {
- svc_.spatial_layers = 2;
- vpx_codec_err_t res = vpx_svc_set_options(&svc_,
- "quantizers-keyframe=not-quantizers");
+TEST_F(SvcTest, SetAutoAltRefOption) {
+ svc_.spatial_layers = 5;
+ vpx_codec_err_t res = vpx_svc_set_options(&svc_, "auto-alt-refs=none");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- vpx_svc_set_options(&svc_, "quantizers-keyframe=40,45");
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-}
-
-TEST_F(SvcTest, SetQuantizers) {
- vpx_codec_err_t res = vpx_svc_set_quantizers(NULL, "40,30", 0);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_quantizers(&svc_, NULL, 0);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- svc_.spatial_layers = 2;
- res = vpx_svc_set_quantizers(&svc_, "40", 0);
+ res = vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1,1,0");
EXPECT_EQ(VPX_CODEC_OK, res);
res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- res = vpx_svc_set_quantizers(&svc_, "40,30", 0);
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-}
-
-TEST_F(SvcTest, SetKeyFrameQuantizers) {
- vpx_codec_err_t res = vpx_svc_set_quantizers(NULL, "40,31", 1);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_quantizers(&svc_, NULL, 1);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_quantizers(&svc_, "40,30", 1);
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-}
-
-TEST_F(SvcTest, SetScaleFactors) {
- vpx_codec_err_t res = vpx_svc_set_scale_factors(NULL, "4/16,16/16");
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_scale_factors(&svc_, NULL);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- svc_.spatial_layers = 2;
- res = vpx_svc_set_scale_factors(&svc_, "4/16");
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- EXPECT_EQ(VPX_CODEC_OK, res);
- res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=0,1,1,1,0");
+ InitializeEncoder();
}
// Test that decoder can handle an SVC frame as the first frame in a sequence.
-TEST_F(SvcTest, FirstFrameHasLayers) {
- svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
-
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- video.Begin();
-
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- EXPECT_EQ(VPX_CODEC_OK, res);
-
- const vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
-
- // this test fails with a decoder error
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, OnePassEncodeOneFrame) {
+ codec_enc_.g_pass = VPX_RC_ONE_PASS;
+ vpx_fixed_buf output = {0};
+ Pass2EncodeNFrames(NULL, 1, 2, &output);
+ DecodeNFrames(&output, 1);
+ FreeBitstreamBuffers(&output, 1);
}
-TEST_F(SvcTest, EncodeThreeFrames) {
- svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
-
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- ASSERT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- // FRAME 0
- video.Begin();
- // This frame is a keyframe.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
-
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
-
- // FRAME 1
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
-
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
-
- // FRAME 2
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
-
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, OnePassEncodeThreeFrames) {
+ codec_enc_.g_pass = VPX_RC_ONE_PASS;
+ codec_enc_.g_lag_in_frames = 0;
+ vpx_fixed_buf outputs[3];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(NULL, 3, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 3);
+ FreeBitstreamBuffers(&outputs[0], 3);
}
-TEST_F(SvcTest, GetLayerResolution) {
- svc_.spatial_layers = 2;
- vpx_svc_set_scale_factors(&svc_, "4/16,8/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
+TEST_F(SvcTest, TwoPassEncode10Frames) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- EXPECT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- // ensure that requested layer is a valid layer
- uint32_t layer_width, layer_height;
- res = vpx_svc_get_layer_resolution(&svc_, svc_.spatial_layers,
- &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(NULL, 0, &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(&svc_, 0, NULL, &layer_height);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(&svc_, 0, &layer_width, NULL);
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
-
- res = vpx_svc_get_layer_resolution(&svc_, 0, &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(kWidth * 4 / 16, layer_width);
- EXPECT_EQ(kHeight * 4 / 16, layer_height);
-
- res = vpx_svc_get_layer_resolution(&svc_, 1, &layer_width, &layer_height);
- EXPECT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(kWidth * 8 / 16, layer_width);
- EXPECT_EQ(kHeight * 8 / 16, layer_height);
-}
-
-TEST_F(SvcTest, FirstPassEncode) {
- svc_.spatial_layers = 2;
- codec_enc_.g_pass = VPX_RC_FIRST_PASS;
- vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
- vpx_svc_set_quantizers(&svc_, "40,30", 0);
-
- vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- ASSERT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
-
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- // FRAME 0
- video.Begin();
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_GT(vpx_svc_get_rc_stats_buffer_size(&svc_), 0U);
-
- // FRAME 1
- video.Next();
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_GT(vpx_svc_get_rc_stats_buffer_size(&svc_), 0U);
-
- // Flush encoder and test EOS packet
- res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_GT(vpx_svc_get_rc_stats_buffer_size(&svc_), 0U);
-}
-
-TEST_F(SvcTest, SecondPassEncode) {
- svc_.spatial_layers = 2;
+ // Second pass encode
codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
- FILE *const stats_file = libvpx_test::OpenTestDataFile(stats_file_name_);
- ASSERT_TRUE(stats_file != NULL) << "Stats file open failed. Filename: "
- << stats_file;
+TEST_F(SvcTest, TwoPassEncode20FramesWithAltRef) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(20, 2, &stats_buf);
- struct vpx_fixed_buf stats_buf;
- fseek(stats_file, 0, SEEK_END);
- stats_buf.sz = static_cast<size_t>(ftell(stats_file));
- fseek(stats_file, 0, SEEK_SET);
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1");
+ vpx_fixed_buf outputs[20];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 20);
+ FreeBitstreamBuffers(&outputs[0], 20);
+}
- stats_buf.buf = malloc(stats_buf.sz);
- ASSERT_TRUE(stats_buf.buf != NULL);
- const size_t bytes_read = fread(stats_buf.buf, 1, stats_buf.sz, stats_file);
- ASSERT_EQ(bytes_read, stats_buf.sz);
- fclose(stats_file);
- codec_enc_.rc_twopass_stats_in = stats_buf;
+TEST_F(SvcTest, TwoPassEncode2SpatialLayersDecodeBaseLayerOnly) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode5SpatialLayersDecode54321Layers) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 5, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=0,1,1,1,0");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 5, &outputs[0]);
+
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 4);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 3);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 2);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2SNRLayers) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1");
+ Pass1EncodeNFrames(20, 2, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_,
+ "auto-alt-refs=1,1 scale-factors=1/1,1/1");
+ vpx_fixed_buf outputs[20];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 20);
+ FreeBitstreamBuffers(&outputs[0], 20);
+}
+
+TEST_F(SvcTest, TwoPassEncode3SNRLayersDecode321Layers) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1,1/1");
+ Pass1EncodeNFrames(20, 3, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ vpx_svc_set_options(&svc_,
+ "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1");
+ vpx_fixed_buf outputs[20];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 20, 3, &outputs[0]);
+ DecodeNFrames(&outputs[0], 20);
+ DropEnhancementLayers(&outputs[0], 20, 2);
+ DecodeNFrames(&outputs[0], 20);
+ DropEnhancementLayers(&outputs[0], 20, 1);
+ DecodeNFrames(&outputs[0], 20);
+
+ FreeBitstreamBuffers(&outputs[0], 20);
+}
+
+TEST_F(SvcTest, SetMultipleFrameContextsOption) {
+ svc_.spatial_layers = 5;
vpx_codec_err_t res =
- vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
- ASSERT_EQ(VPX_CODEC_OK, res);
- codec_initialized_ = true;
+ vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res);
- libvpx_test::I420VideoSource video(test_file_name_, kWidth, kHeight,
- codec_enc_.g_timebase.den,
- codec_enc_.g_timebase.num, 0, 30);
- // FRAME 0
- video.Begin();
- // This frame is a keyframe.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
+ svc_.spatial_layers = 2;
+ res = vpx_svc_set_options(&svc_, "multi-frame-contexts=1");
+ InitializeEncoder();
+}
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, TwoPassEncode2SpatialLayersWithMultipleFrameContexts) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- // FRAME 1
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest,
+ TwoPassEncode2SpatialLayersWithMultipleFrameContextsDecodeBaselayer) {
+ // First pass encode
+ std::string stats_buf;
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- // FRAME 2
- video.Next();
- // This is a P-frame.
- res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
- video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+TEST_F(SvcTest, TwoPassEncode2SNRLayersWithMultipleFrameContexts) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1");
+ Pass1EncodeNFrames(10, 2, &stats_buf);
- free(stats_buf.buf);
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 scale-factors=1/1,1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 2, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest,
+ TwoPassEncode3SNRLayersWithMultipleFrameContextsDecode321Layer) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1,1/1,1/1");
+ Pass1EncodeNFrames(10, 3, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 3, &outputs[0]);
+
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 2);
+ DecodeNFrames(&outputs[0], 10);
+ DropEnhancementLayers(&outputs[0], 10, 1);
+ DecodeNFrames(&outputs[0], 10);
+
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayers) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithMultipleFrameContexts) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersDecodeBaseLayer) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+
+ vpx_fixed_buf base_layer[5];
+ for (int i = 0; i < 5; ++i)
+ base_layer[i] = outputs[i * 2];
+
+ DecodeNFrames(&base_layer[0], 5);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest,
+ TwoPassEncode2TemporalLayersWithMultipleFrameContextsDecodeBaseLayer) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ codec_enc_.g_error_resilient = 0;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+
+ vpx_fixed_buf base_layer[5];
+ for (int i = 0; i < 5; ++i)
+ base_layer[i] = outputs[i * 2];
+
+ DecodeNFrames(&base_layer[0], 5);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithTiles) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1");
+ codec_enc_.g_w = 704;
+ codec_enc_.g_h = 144;
+ tile_columns_ = 1;
+ tile_rows_ = 1;
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
+}
+
+TEST_F(SvcTest,
+ TwoPassEncode2TemporalLayersWithMultipleFrameContextsAndTiles) {
+ // First pass encode
+ std::string stats_buf;
+ vpx_svc_set_options(&svc_, "scale-factors=1/1");
+ svc_.temporal_layers = 2;
+ Pass1EncodeNFrames(10, 1, &stats_buf);
+
+ // Second pass encode
+ codec_enc_.g_pass = VPX_RC_LAST_PASS;
+ svc_.temporal_layers = 2;
+ codec_enc_.g_error_resilient = 0;
+ codec_enc_.g_w = 704;
+ codec_enc_.g_h = 144;
+ tile_columns_ = 1;
+ tile_rows_ = 1;
+ vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 "
+ "multi-frame-contexts=1");
+ vpx_fixed_buf outputs[10];
+ memset(&outputs[0], 0, sizeof(outputs));
+ Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]);
+ DecodeNFrames(&outputs[0], 10);
+ FreeBitstreamBuffers(&outputs[0], 10);
}
} // namespace
|
CWE-119
|
vpx_svc_release(&svc_);
|
ReleaseEncoder();
}
void InitializeEncoder() {
const vpx_codec_err_t res =
vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
EXPECT_EQ(VPX_CODEC_OK, res);
vpx_codec_control(&codec_, VP8E_SET_CPUUSED, 4); // Make the test faster
vpx_codec_control(&codec_, VP9E_SET_TILE_COLUMNS, tile_columns_);
vpx_codec_control(&codec_, VP9E_SET_TILE_ROWS, tile_rows_);
codec_initialized_ = true;
}
void ReleaseEncoder() {
vpx_svc_release(&svc_);
codec_initialized_ = false;
}
void GetStatsData(std::string *const stats_buf) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *cx_pkt;
while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
if (cx_pkt->kind == VPX_CODEC_STATS_PKT) {
EXPECT_GT(cx_pkt->data.twopass_stats.sz, 0U);
ASSERT_TRUE(cx_pkt->data.twopass_stats.buf != NULL);
stats_buf->append(static_cast<char*>(cx_pkt->data.twopass_stats.buf),
cx_pkt->data.twopass_stats.sz);
}
}
}
void Pass1EncodeNFrames(const int n, const int layers,
std::string *const stats_buf) {
vpx_codec_err_t res;
ASSERT_GT(n, 0);
ASSERT_GT(layers, 0);
svc_.spatial_layers = layers;
codec_enc_.g_pass = VPX_RC_FIRST_PASS;
InitializeEncoder();
libvpx_test::I420VideoSource video(test_file_name_,
codec_enc_.g_w, codec_enc_.g_h,
codec_enc_.g_timebase.den,
codec_enc_.g_timebase.num, 0, 30);
video.Begin();
for (int i = 0; i < n; ++i) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
GetStatsData(stats_buf);
video.Next();
}
// Flush encoder and test EOS packet.
res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
GetStatsData(stats_buf);
ReleaseEncoder();
}
void StoreFrames(const size_t max_frame_received,
struct vpx_fixed_buf *const outputs,
size_t *const frame_received) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *cx_pkt;
while ((cx_pkt = vpx_codec_get_cx_data(&codec_, &iter)) != NULL) {
if (cx_pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const size_t frame_size = cx_pkt->data.frame.sz;
EXPECT_GT(frame_size, 0U);
ASSERT_TRUE(cx_pkt->data.frame.buf != NULL);
ASSERT_LT(*frame_received, max_frame_received);
if (*frame_received == 0)
EXPECT_EQ(1, !!(cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY));
outputs[*frame_received].buf = malloc(frame_size + 16);
ASSERT_TRUE(outputs[*frame_received].buf != NULL);
memcpy(outputs[*frame_received].buf, cx_pkt->data.frame.buf,
frame_size);
outputs[*frame_received].sz = frame_size;
++(*frame_received);
}
}
}
void Pass2EncodeNFrames(std::string *const stats_buf,
const int n, const int layers,
struct vpx_fixed_buf *const outputs) {
vpx_codec_err_t res;
size_t frame_received = 0;
ASSERT_TRUE(outputs != NULL);
ASSERT_GT(n, 0);
ASSERT_GT(layers, 0);
svc_.spatial_layers = layers;
codec_enc_.rc_target_bitrate = 500;
if (codec_enc_.g_pass == VPX_RC_LAST_PASS) {
ASSERT_TRUE(stats_buf != NULL);
ASSERT_GT(stats_buf->size(), 0U);
codec_enc_.rc_twopass_stats_in.buf = &(*stats_buf)[0];
codec_enc_.rc_twopass_stats_in.sz = stats_buf->size();
}
InitializeEncoder();
libvpx_test::I420VideoSource video(test_file_name_,
codec_enc_.g_w, codec_enc_.g_h,
codec_enc_.g_timebase.den,
codec_enc_.g_timebase.num, 0, 30);
video.Begin();
for (int i = 0; i < n; ++i) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
StoreFrames(n, outputs, &frame_received);
video.Next();
}
// Flush encoder.
res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
video.duration(), VPX_DL_GOOD_QUALITY);
EXPECT_EQ(VPX_CODEC_OK, res);
StoreFrames(n, outputs, &frame_received);
EXPECT_EQ(frame_received, static_cast<size_t>(n));
ReleaseEncoder();
}
void DecodeNFrames(const struct vpx_fixed_buf *const inputs, const int n) {
int decoded_frames = 0;
int received_frames = 0;
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(n, 0);
for (int i = 0; i < n; ++i) {
ASSERT_TRUE(inputs[i].buf != NULL);
ASSERT_GT(inputs[i].sz, 0U);
const vpx_codec_err_t res_dec =
decoder_->DecodeFrame(static_cast<const uint8_t *>(inputs[i].buf),
inputs[i].sz);
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
++decoded_frames;
DxDataIterator dec_iter = decoder_->GetDxData();
while (dec_iter.Next() != NULL) {
++received_frames;
}
}
EXPECT_EQ(decoded_frames, n);
EXPECT_EQ(received_frames, n);
}
void DropEnhancementLayers(struct vpx_fixed_buf *const inputs,
const int num_super_frames,
const int remained_spatial_layers) {
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(num_super_frames, 0);
ASSERT_GT(remained_spatial_layers, 0);
for (int i = 0; i < num_super_frames; ++i) {
uint32_t frame_sizes[8] = {0};
int frame_count = 0;
int frames_found = 0;
int frame;
ASSERT_TRUE(inputs[i].buf != NULL);
ASSERT_GT(inputs[i].sz, 0U);
vpx_codec_err_t res =
vp9_parse_superframe_index(static_cast<const uint8_t*>(inputs[i].buf),
inputs[i].sz, frame_sizes, &frame_count,
NULL, NULL);
ASSERT_EQ(VPX_CODEC_OK, res);
if (frame_count == 0) {
// There's no super frame but only a single frame.
ASSERT_EQ(1, remained_spatial_layers);
} else {
// Found a super frame.
uint8_t *frame_data = static_cast<uint8_t*>(inputs[i].buf);
uint8_t *frame_start = frame_data;
for (frame = 0; frame < frame_count; ++frame) {
// Looking for a visible frame.
if (frame_data[0] & 0x02) {
++frames_found;
if (frames_found == remained_spatial_layers)
break;
}
frame_data += frame_sizes[frame];
}
ASSERT_LT(frame, frame_count) << "Couldn't find a visible frame. "
<< "remained_spatial_layers: " << remained_spatial_layers
<< " super_frame: " << i;
if (frame == frame_count - 1)
continue;
frame_data += frame_sizes[frame];
// We need to add one more frame for multiple frame contexts.
uint8_t marker =
static_cast<const uint8_t*>(inputs[i].buf)[inputs[i].sz - 1];
const uint32_t mag = ((marker >> 3) & 0x3) + 1;
const size_t index_sz = 2 + mag * frame_count;
const size_t new_index_sz = 2 + mag * (frame + 1);
marker &= 0x0f8;
marker |= frame;
// Copy existing frame sizes.
memmove(frame_data + 1, frame_start + inputs[i].sz - index_sz + 1,
new_index_sz - 2);
// New marker.
frame_data[0] = marker;
frame_data += (mag * (frame + 1) + 1);
*frame_data++ = marker;
inputs[i].sz = frame_data - frame_start;
}
}
}
void FreeBitstreamBuffers(struct vpx_fixed_buf *const inputs, const int n) {
ASSERT_TRUE(inputs != NULL);
ASSERT_GT(n, 0);
for (int i = 0; i < n; ++i) {
free(inputs[i].buf);
inputs[i].buf = NULL;
inputs[i].sz = 0;
}
|
150,882 |
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
#if ARCH_X86 || ARCH_X86_64
const int simd_caps = x86_simd_caps();
if (!(simd_caps & HAS_MMX))
append_negative_gtest_filter(":MMX/*");
if (!(simd_caps & HAS_SSE))
append_negative_gtest_filter(":SSE/*");
if (!(simd_caps & HAS_SSE2))
append_negative_gtest_filter(":SSE2/*");
if (!(simd_caps & HAS_SSE3))
append_negative_gtest_filter(":SSE3/*");
if (!(simd_caps & HAS_SSSE3))
append_negative_gtest_filter(":SSSE3/*");
if (!(simd_caps & HAS_SSE4_1))
append_negative_gtest_filter(":SSE4_1/*");
if (!(simd_caps & HAS_AVX))
append_negative_gtest_filter(":AVX/*");
if (!(simd_caps & HAS_AVX2))
append_negative_gtest_filter(":AVX2/*");
#endif
#if !CONFIG_SHARED
#if CONFIG_VP8
vp8_rtcd();
#endif
#if CONFIG_VP9
vp9_rtcd();
#endif
#endif
return RUN_ALL_TESTS();
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
#if ARCH_X86 || ARCH_X86_64
const int simd_caps = x86_simd_caps();
if (!(simd_caps & HAS_MMX))
append_negative_gtest_filter(":MMX.*:MMX/*");
if (!(simd_caps & HAS_SSE))
append_negative_gtest_filter(":SSE.*:SSE/*");
if (!(simd_caps & HAS_SSE2))
append_negative_gtest_filter(":SSE2.*:SSE2/*");
if (!(simd_caps & HAS_SSE3))
append_negative_gtest_filter(":SSE3.*:SSE3/*");
if (!(simd_caps & HAS_SSSE3))
append_negative_gtest_filter(":SSSE3.*:SSSE3/*");
if (!(simd_caps & HAS_SSE4_1))
append_negative_gtest_filter(":SSE4_1.*:SSE4_1/*");
if (!(simd_caps & HAS_AVX))
append_negative_gtest_filter(":AVX.*:AVX/*");
if (!(simd_caps & HAS_AVX2))
append_negative_gtest_filter(":AVX2.*:AVX2/*");
#endif
#if !CONFIG_SHARED
#if CONFIG_VP8
vp8_rtcd();
#endif // CONFIG_VP8
#if CONFIG_VP9
vp9_rtcd();
#endif // CONFIG_VP9
vpx_dsp_rtcd();
vpx_scale_rtcd();
#endif // !CONFIG_SHARED
return RUN_ALL_TESTS();
}
|
@@ -8,6 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
#include "./vpx_config.h"
#if ARCH_X86 || ARCH_X86_64
#include "vpx_ports/x86.h"
@@ -15,12 +18,13 @@
extern "C" {
#if CONFIG_VP8
extern void vp8_rtcd();
-#endif
+#endif // CONFIG_VP8
#if CONFIG_VP9
extern void vp9_rtcd();
-#endif
+#endif // CONFIG_VP9
+extern void vpx_dsp_rtcd();
+extern void vpx_scale_rtcd();
}
-#include "third_party/googletest/src/include/gtest/gtest.h"
static void append_negative_gtest_filter(const char *str) {
std::string filter = ::testing::FLAGS_gtest_filter;
@@ -36,21 +40,21 @@
#if ARCH_X86 || ARCH_X86_64
const int simd_caps = x86_simd_caps();
if (!(simd_caps & HAS_MMX))
- append_negative_gtest_filter(":MMX/*");
+ append_negative_gtest_filter(":MMX.*:MMX/*");
if (!(simd_caps & HAS_SSE))
- append_negative_gtest_filter(":SSE/*");
+ append_negative_gtest_filter(":SSE.*:SSE/*");
if (!(simd_caps & HAS_SSE2))
- append_negative_gtest_filter(":SSE2/*");
+ append_negative_gtest_filter(":SSE2.*:SSE2/*");
if (!(simd_caps & HAS_SSE3))
- append_negative_gtest_filter(":SSE3/*");
+ append_negative_gtest_filter(":SSE3.*:SSE3/*");
if (!(simd_caps & HAS_SSSE3))
- append_negative_gtest_filter(":SSSE3/*");
+ append_negative_gtest_filter(":SSSE3.*:SSSE3/*");
if (!(simd_caps & HAS_SSE4_1))
- append_negative_gtest_filter(":SSE4_1/*");
+ append_negative_gtest_filter(":SSE4_1.*:SSE4_1/*");
if (!(simd_caps & HAS_AVX))
- append_negative_gtest_filter(":AVX/*");
+ append_negative_gtest_filter(":AVX.*:AVX/*");
if (!(simd_caps & HAS_AVX2))
- append_negative_gtest_filter(":AVX2/*");
+ append_negative_gtest_filter(":AVX2.*:AVX2/*");
#endif
#if !CONFIG_SHARED
@@ -59,11 +63,13 @@
#if CONFIG_VP8
vp8_rtcd();
-#endif
+#endif // CONFIG_VP8
#if CONFIG_VP9
vp9_rtcd();
-#endif
-#endif
+#endif // CONFIG_VP9
+ vpx_dsp_rtcd();
+ vpx_scale_rtcd();
+#endif // !CONFIG_SHARED
return RUN_ALL_TESTS();
}
|
CWE-119
|
append_negative_gtest_filter(":MMX/*");
append_negative_gtest_filter(":SSE/*");
append_negative_gtest_filter(":SSE2/*");
append_negative_gtest_filter(":SSE3/*");
append_negative_gtest_filter(":SSSE3/*");
append_negative_gtest_filter(":SSE4_1/*");
append_negative_gtest_filter(":AVX/*");
append_negative_gtest_filter(":AVX2/*");
#endif
#endif
#endif
|
append_negative_gtest_filter(":MMX.*:MMX/*");
append_negative_gtest_filter(":SSE.*:SSE/*");
append_negative_gtest_filter(":SSE2.*:SSE2/*");
append_negative_gtest_filter(":SSE3.*:SSE3/*");
append_negative_gtest_filter(":SSSE3.*:SSSE3/*");
append_negative_gtest_filter(":SSE4_1.*:SSE4_1/*");
append_negative_gtest_filter(":AVX.*:AVX/*");
append_negative_gtest_filter(":AVX2.*:AVX2/*");
#endif // CONFIG_VP8
#endif // CONFIG_VP9
vpx_dsp_rtcd();
vpx_scale_rtcd();
#endif // !CONFIG_SHARED
|
150,883 |
TileIndependenceTest()
: EncoderTest(GET_PARAM(0)),
md5_fw_order_(),
md5_inv_order_(),
n_tiles_(GET_PARAM(1)) {
init_flags_ = VPX_CODEC_USE_PSNR;
vpx_codec_dec_cfg_t cfg;
cfg.w = 704;
cfg.h = 144;
cfg.threads = 1;
fw_dec_ = codec_->CreateDecoder(cfg, 0);
inv_dec_ = codec_->CreateDecoder(cfg, 0);
inv_dec_->Control(VP9_INVERT_TILE_DECODE_ORDER, 1);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
TileIndependenceTest()
: EncoderTest(GET_PARAM(0)),
md5_fw_order_(),
md5_inv_order_(),
n_tiles_(GET_PARAM(1)) {
init_flags_ = VPX_CODEC_USE_PSNR;
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
cfg.w = 704;
cfg.h = 144;
cfg.threads = 1;
fw_dec_ = codec_->CreateDecoder(cfg, 0);
inv_dec_ = codec_->CreateDecoder(cfg, 0);
inv_dec_->Control(VP9_INVERT_TILE_DECODE_ORDER, 1);
}
|
@@ -29,7 +29,7 @@
md5_inv_order_(),
n_tiles_(GET_PARAM(1)) {
init_flags_ = VPX_CODEC_USE_PSNR;
- vpx_codec_dec_cfg_t cfg;
+ vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
cfg.w = 704;
cfg.h = 144;
cfg.threads = 1;
@@ -104,4 +104,5 @@
VP9_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1));
+VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1));
} // namespace
|
CWE-119
|
vpx_codec_dec_cfg_t cfg;
|
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
|
150,884 |
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
memset(src_, 255, block_size_);
const int half = block_size_ / 2;
memset(ref_, 255, half);
memset(ref_ + half, 0, half);
unsigned int sse;
unsigned int var;
REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
const int half = block_size_ / 2;
if (!use_high_bit_depth_) {
memset(src_, 255, block_size_);
memset(ref_, 255, half);
memset(ref_ + half, 0, half);
#if CONFIG_VP9_HIGHBITDEPTH
} else {
vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
block_size_);
vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse;
unsigned int var;
ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
memset(src_, 255, block_size_);
memset(ref_, 255, half);
memset(ref_ + half, 0, half);
REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
|
if (!use_high_bit_depth_) {
memset(src_, 255, block_size_);
memset(ref_, 255, half);
memset(ref_ + half, 0, half);
#if CONFIG_VP9_HIGHBITDEPTH
} else {
vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
block_size_);
vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
|
150,885 |
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
src_[j] = rnd.Rand8();
ref_[j] = rnd.Rand8();
}
unsigned int sse1, sse2;
unsigned int var1;
REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
log2height_, &sse2);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
if (!use_high_bit_depth_) {
src_[j] = rnd_.Rand8();
ref_[j] = rnd_.Rand8();
#if CONFIG_VP9_HIGHBITDEPTH
} else {
CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
unsigned int sse1, sse2;
unsigned int var1;
const int stride_coeff = 1;
ASM_REGISTER_STATE_CHECK(
var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
log2height_, stride_coeff,
stride_coeff, &sse2,
use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
}
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::RefStrideTest() {
for (int i = 0; i < 10; ++i) {
int ref_stride_coeff = i % 2;
int src_stride_coeff = (i >> 1) % 2;
for (int j = 0; j < block_size_; j++) {
int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
if (!use_high_bit_depth_) {
src_[src_ind] = rnd_.Rand8();
ref_[ref_ind] = rnd_.Rand8();
#if CONFIG_VP9_HIGHBITDEPTH
} else {
CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
unsigned int sse1, sse2;
unsigned int var1;
ASM_REGISTER_STATE_CHECK(
var1 = variance_(src_, width_ * src_stride_coeff,
ref_, width_ * ref_stride_coeff, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
log2height_, src_stride_coeff,
ref_stride_coeff, &sse2,
use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
}
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
src_[j] = rnd.Rand8();
ref_[j] = rnd.Rand8();
REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
log2height_, &sse2);
|
if (!use_high_bit_depth_) {
src_[j] = rnd_.Rand8();
ref_[j] = rnd_.Rand8();
#if CONFIG_VP9_HIGHBITDEPTH
} else {
CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
const int stride_coeff = 1;
ASM_REGISTER_STATE_CHECK(
var1 = variance_(src_, width_, ref_, width_, &sse1));
log2height_, stride_coeff,
stride_coeff, &sse2,
use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
}
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::RefStrideTest() {
for (int i = 0; i < 10; ++i) {
int ref_stride_coeff = i % 2;
int src_stride_coeff = (i >> 1) % 2;
for (int j = 0; j < block_size_; j++) {
int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
if (!use_high_bit_depth_) {
src_[src_ind] = rnd_.Rand8();
ref_[ref_ind] = rnd_.Rand8();
#if CONFIG_VP9_HIGHBITDEPTH
} else {
CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
unsigned int sse1, sse2;
unsigned int var1;
ASM_REGISTER_STATE_CHECK(
var1 = variance_(src_, width_ * src_stride_coeff,
ref_, width_ * ref_stride_coeff, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
log2height_, src_stride_coeff,
ref_stride_coeff, &sse2,
use_high_bit_depth_, bit_depth_);
|
150,886 |
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
for (int x = 0; x < 16; ++x) {
for (int y = 0; y < 16; ++y) {
for (int j = 0; j < block_size_; j++) {
src_[j] = rnd.Rand8();
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd.Rand8();
}
unsigned int sse1, sse2;
unsigned int var1;
REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
src_, width_, &sse1));
const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
log2height_, x, y, &sse2);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
for (int x = 0; x < 8; ++x) {
for (int y = 0; y < 8; ++y) {
if (!use_high_bit_depth_) {
for (int j = 0; j < block_size_; j++) {
src_[j] = rnd_.Rand8();
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd_.Rand8();
}
#if CONFIG_VP9_HIGHBITDEPTH
} else {
for (int j = 0; j < block_size_; j++) {
CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
src_, width_, &sse1));
const unsigned int var2 = subpel_variance_ref(ref_, src_,
log2width_, log2height_,
x, y, &sse2,
use_high_bit_depth_,
bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
for (int x = 0; x < 16; ++x) {
for (int y = 0; y < 16; ++y) {
for (int j = 0; j < block_size_; j++) {
src_[j] = rnd.Rand8();
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd.Rand8();
REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
src_, width_, &sse1));
const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
log2height_, x, y, &sse2);
|
for (int x = 0; x < 8; ++x) {
for (int y = 0; y < 8; ++y) {
if (!use_high_bit_depth_) {
for (int j = 0; j < block_size_; j++) {
src_[j] = rnd_.Rand8();
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd_.Rand8();
}
#if CONFIG_VP9_HIGHBITDEPTH
} else {
for (int j = 0; j < block_size_; j++) {
CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
src_, width_, &sse1));
const unsigned int var2 = subpel_variance_ref(ref_, src_,
log2width_, log2height_,
x, y, &sse2,
use_high_bit_depth_,
bit_depth_);
|
150,887 |
void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
for (int x = 0; x < 16; ++x) {
for (int y = 0; y < 16; ++y) {
for (int j = 0; j < block_size_; j++) {
src_[j] = rnd.Rand8();
sec_[j] = rnd.Rand8();
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd.Rand8();
}
unsigned int sse1, sse2;
unsigned int var1;
REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
x, y, &sse2);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
for (int x = 0; x < 8; ++x) {
for (int y = 0; y < 8; ++y) {
if (!use_high_bit_depth_) {
for (int j = 0; j < block_size_; j++) {
src_[j] = rnd_.Rand8();
sec_[j] = rnd_.Rand8();
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd_.Rand8();
}
#if CONFIG_VP9_HIGHBITDEPTH
} else {
for (int j = 0; j < block_size_; j++) {
CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
ASM_REGISTER_STATE_CHECK(
var1 = subpel_variance_(ref_, width_ + 1, x, y,
src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
x, y, &sse2,
use_high_bit_depth_,
bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
for (int x = 0; x < 16; ++x) {
for (int y = 0; y < 16; ++y) {
for (int j = 0; j < block_size_; j++) {
src_[j] = rnd.Rand8();
sec_[j] = rnd.Rand8();
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd.Rand8();
REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
src_, width_, &sse1, sec_));
x, y, &sse2);
|
void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
for (int x = 0; x < 8; ++x) {
for (int y = 0; y < 8; ++y) {
if (!use_high_bit_depth_) {
for (int j = 0; j < block_size_; j++) {
src_[j] = rnd_.Rand8();
sec_[j] = rnd_.Rand8();
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
ref_[j] = rnd_.Rand8();
}
#if CONFIG_VP9_HIGHBITDEPTH
} else {
for (int j = 0; j < block_size_; j++) {
CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
}
for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
ASM_REGISTER_STATE_CHECK(
var1 = subpel_variance_(ref_, width_ + 1, x, y,
src_, width_, &sse1, sec_));
x, y, &sse2,
use_high_bit_depth_,
bit_depth_);
|
150,888 |
virtual void SetUp() {
const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = new uint8_t[block_size_];
ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
if (get<3>(params)) {
bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
use_high_bit_depth_ = true;
} else {
bit_depth_ = VPX_BITS_8;
use_high_bit_depth_ = false;
}
mask_ = (1 << bit_depth_) - 1;
rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
if (!use_high_bit_depth_) {
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
ref_ = new uint8_t[block_size_ * 2];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
rnd(ACMRandom::DeterministicSeed());
src_ = new uint8_t[block_size_];
ref_ = new uint8_t[block_size_];
|
const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
if (get<3>(params)) {
bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
use_high_bit_depth_ = true;
} else {
bit_depth_ = VPX_BITS_8;
use_high_bit_depth_ = false;
}
mask_ = (1 << bit_depth_) - 1;
rnd_.Reset(ACMRandom::DeterministicSeed());
if (!use_high_bit_depth_) {
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
ref_ = new uint8_t[block_size_ * 2];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
|
150,889 |
virtual void SetUp() {
const tuple<int, int, SubpelVarianceFunctionType>& params =
this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
subpel_variance_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void SetUp() {
const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
const tuple<int, int, SubpelVarianceFunctionType>& params =
this->GetParam();
subpel_variance_ = get<2>(params);
sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
ASSERT_TRUE(sec_ != NULL);
|
const tuple<int, int, MseFunctionType>& params = this->GetParam();
mse_ = get<2>(params);
ref_ = new uint8_t[block_size_];
|
150,890 |
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
memset(src_, i, block_size_);
for (int j = 0; j <= 255; ++j) {
memset(ref_, j, block_size_);
unsigned int sse;
unsigned int var;
REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
}
}
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
if (!use_high_bit_depth_) {
memset(src_, i, block_size_);
#if CONFIG_VP9_HIGHBITDEPTH
} else {
vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
block_size_);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
for (int j = 0; j <= 255; ++j) {
if (!use_high_bit_depth_) {
memset(ref_, j, block_size_);
#if CONFIG_VP9_HIGHBITDEPTH
} else {
vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
block_size_);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse;
unsigned int var;
ASM_REGISTER_STATE_CHECK(
var = variance_(src_, width_, ref_, width_, &sse));
EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
memset(src_, i, block_size_);
memset(ref_, j, block_size_);
REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
|
if (!use_high_bit_depth_) {
memset(src_, i, block_size_);
#if CONFIG_VP9_HIGHBITDEPTH
} else {
vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
block_size_);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
if (!use_high_bit_depth_) {
memset(ref_, j, block_size_);
#if CONFIG_VP9_HIGHBITDEPTH
} else {
vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
block_size_);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
ASM_REGISTER_STATE_CHECK(
var = variance_(src_, width_, ref_, width_, &sse));
EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
|
150,891 |
unsigned int subpel_avg_variance_ref(const uint8_t *ref,
const uint8_t *src,
const uint8_t *second_pred,
int l2w, int l2h,
int xoff, int yoff,
unsigned int *sse_ptr) {
int se = 0;
unsigned int sse = 0;
const int w = 1 << l2w, h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
const int a1 = ref[(w + 1) * (y + 0) + x + 0];
const int a2 = ref[(w + 1) * (y + 0) + x + 1];
const int b1 = ref[(w + 1) * (y + 1) + x + 0];
const int b2 = ref[(w + 1) * (y + 1) + x + 1];
const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
const int r = a + (((b - a) * yoff + 8) >> 4);
int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
se += diff;
sse += diff * diff;
}
}
*sse_ptr = sse;
return sse - (((int64_t) se * se) >> (l2w + l2h));
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
unsigned int subpel_avg_variance_ref(const uint8_t *ref,
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
const uint8_t *src,
const uint8_t *second_pred,
int l2w, int l2h,
int xoff, int yoff,
unsigned int *sse_ptr) {
int se = 0;
unsigned int sse = 0;
const int w = 1 << l2w, h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
const int a1 = ref[(w + 1) * (y + 0) + x + 0];
const int a2 = ref[(w + 1) * (y + 0) + x + 1];
const int b1 = ref[(w + 1) * (y + 1) + x + 0];
const int b2 = ref[(w + 1) * (y + 1) + x + 1];
const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
const int r = a + (((b - a) * yoff + 8) >> 4);
int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
se += diff;
sse += diff * diff;
}
}
*sse_ptr = sse;
return sse - (((int64_t) se * se) >> (l2w + l2h));
}
| null |
150,892 |
static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
int l2w, int l2h, int xoff, int yoff,
unsigned int *sse_ptr) {
int se = 0;
unsigned int sse = 0;
const int w = 1 << l2w, h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
const int a1 = ref[(w + 1) * (y + 0) + x + 0];
const int a2 = ref[(w + 1) * (y + 0) + x + 1];
const int b1 = ref[(w + 1) * (y + 1) + x + 0];
const int b2 = ref[(w + 1) * (y + 1) + x + 1];
const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
const int r = a + (((b - a) * yoff + 8) >> 4);
int diff = r - src[w * y + x];
se += diff;
sse += diff * diff;
}
}
*sse_ptr = sse;
return sse - (((int64_t) se * se) >> (l2w + l2h));
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
static unsigned int mb_ss_ref(const int16_t *src) {
unsigned int res = 0;
for (int i = 0; i < 256; ++i) {
res += src[i] * src[i];
}
return res;
}
static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
int l2w, int l2h, int src_stride_coeff,
int ref_stride_coeff, uint32_t *sse_ptr,
bool use_high_bit_depth_,
vpx_bit_depth_t bit_depth) {
int64_t se = 0;
uint64_t sse = 0;
const int w = 1 << l2w;
const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int diff;
if (!use_high_bit_depth_) {
diff = ref[w * y * ref_stride_coeff + x] -
src[w * y * src_stride_coeff + x];
se += diff;
sse += diff * diff;
#if CONFIG_VP9_HIGHBITDEPTH
} else {
diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
se += diff;
sse += diff * diff;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
}
RoundHighBitDepth(bit_depth, &se, &sse);
*sse_ptr = static_cast<uint32_t>(sse);
return static_cast<uint32_t>(sse -
((static_cast<int64_t>(se) * se) >>
(l2w + l2h)));
}
/* The subpel reference functions differ from the codec version in one aspect:
* they calculate the bilinear factors directly instead of using a lookup table
* and therefore upshift xoff and yoff by 1. Only every other calculated value
* is used so the codec version shrinks the table to save space and maintain
* compatibility with vp8.
*/
static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
int l2w, int l2h, int xoff, int yoff,
uint32_t *sse_ptr,
bool use_high_bit_depth_,
vpx_bit_depth_t bit_depth) {
int64_t se = 0;
uint64_t sse = 0;
const int w = 1 << l2w;
const int h = 1 << l2h;
xoff <<= 1;
yoff <<= 1;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
// Bilinear interpolation at a 16th pel step.
if (!use_high_bit_depth_) {
const int a1 = ref[(w + 1) * (y + 0) + x + 0];
const int a2 = ref[(w + 1) * (y + 0) + x + 1];
const int b1 = ref[(w + 1) * (y + 1) + x + 0];
const int b2 = ref[(w + 1) * (y + 1) + x + 1];
const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
const int r = a + (((b - a) * yoff + 8) >> 4);
const int diff = r - src[w * y + x];
se += diff;
sse += diff * diff;
#if CONFIG_VP9_HIGHBITDEPTH
} else {
uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
const int r = a + (((b - a) * yoff + 8) >> 4);
const int diff = r - src16[w * y + x];
se += diff;
sse += diff * diff;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
}
RoundHighBitDepth(bit_depth, &se, &sse);
*sse_ptr = static_cast<uint32_t>(sse);
return static_cast<uint32_t>(sse -
((static_cast<int64_t>(se) * se) >>
(l2w + l2h)));
}
class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
public:
SumOfSquaresTest() : func_(GetParam()) {}
virtual ~SumOfSquaresTest() {
libvpx_test::ClearSystemState();
}
protected:
void ConstTest();
void RefTest();
SumOfSquaresFunction func_;
ACMRandom rnd_;
};
void SumOfSquaresTest::ConstTest() {
int16_t mem[256];
unsigned int res;
for (int v = 0; v < 256; ++v) {
for (int i = 0; i < 256; ++i) {
mem[i] = v;
}
ASM_REGISTER_STATE_CHECK(res = func_(mem));
EXPECT_EQ(256u * (v * v), res);
}
}
void SumOfSquaresTest::RefTest() {
int16_t mem[256];
for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 256; ++j) {
mem[j] = rnd_.Rand8() - rnd_.Rand8();
}
const unsigned int expected = mb_ss_ref(mem);
unsigned int res;
ASM_REGISTER_STATE_CHECK(res = func_(mem));
EXPECT_EQ(expected, res);
}
}
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
int l2w, int l2h, int xoff, int yoff,
unsigned int *sse_ptr) {
int se = 0;
unsigned int sse = 0;
const int w = 1 << l2w, h = 1 << l2h;
const int a1 = ref[(w + 1) * (y + 0) + x + 0];
const int a2 = ref[(w + 1) * (y + 0) + x + 1];
const int b1 = ref[(w + 1) * (y + 1) + x + 0];
const int b2 = ref[(w + 1) * (y + 1) + x + 1];
const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
const int r = a + (((b - a) * yoff + 8) >> 4);
int diff = r - src[w * y + x];
se += diff;
sse += diff * diff;
*sse_ptr = sse;
return sse - (((int64_t) se * se) >> (l2w + l2h));
|
static unsigned int mb_ss_ref(const int16_t *src) {
unsigned int res = 0;
for (int i = 0; i < 256; ++i) {
res += src[i] * src[i];
}
return res;
}
static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
int l2w, int l2h, int src_stride_coeff,
int ref_stride_coeff, uint32_t *sse_ptr,
bool use_high_bit_depth_,
vpx_bit_depth_t bit_depth) {
int64_t se = 0;
uint64_t sse = 0;
const int w = 1 << l2w;
const int h = 1 << l2h;
int diff;
if (!use_high_bit_depth_) {
diff = ref[w * y * ref_stride_coeff + x] -
src[w * y * src_stride_coeff + x];
se += diff;
sse += diff * diff;
#if CONFIG_VP9_HIGHBITDEPTH
} else {
diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
se += diff;
sse += diff * diff;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
RoundHighBitDepth(bit_depth, &se, &sse);
*sse_ptr = static_cast<uint32_t>(sse);
return static_cast<uint32_t>(sse -
((static_cast<int64_t>(se) * se) >>
(l2w + l2h)));
}
/* The subpel reference functions differ from the codec version in one aspect:
* they calculate the bilinear factors directly instead of using a lookup table
* and therefore upshift xoff and yoff by 1. Only every other calculated value
* is used so the codec version shrinks the table to save space and maintain
* compatibility with vp8.
*/
static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
int l2w, int l2h, int xoff, int yoff,
uint32_t *sse_ptr,
bool use_high_bit_depth_,
vpx_bit_depth_t bit_depth) {
int64_t se = 0;
uint64_t sse = 0;
const int w = 1 << l2w;
const int h = 1 << l2h;
xoff <<= 1;
yoff <<= 1;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
// Bilinear interpolation at a 16th pel step.
if (!use_high_bit_depth_) {
const int a1 = ref[(w + 1) * (y + 0) + x + 0];
const int a2 = ref[(w + 1) * (y + 0) + x + 1];
const int b1 = ref[(w + 1) * (y + 1) + x + 0];
const int b2 = ref[(w + 1) * (y + 1) + x + 1];
const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
const int r = a + (((b - a) * yoff + 8) >> 4);
const int diff = r - src[w * y + x];
se += diff;
sse += diff * diff;
#if CONFIG_VP9_HIGHBITDEPTH
} else {
uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
const int r = a + (((b - a) * yoff + 8) >> 4);
const int diff = r - src16[w * y + x];
se += diff;
sse += diff * diff;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
}
RoundHighBitDepth(bit_depth, &se, &sse);
*sse_ptr = static_cast<uint32_t>(sse);
return static_cast<uint32_t>(sse -
((static_cast<int64_t>(se) * se) >>
(l2w + l2h)));
}
class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
public:
SumOfSquaresTest() : func_(GetParam()) {}
virtual ~SumOfSquaresTest() {
libvpx_test::ClearSystemState();
}
protected:
void ConstTest();
void RefTest();
SumOfSquaresFunction func_;
ACMRandom rnd_;
};
void SumOfSquaresTest::ConstTest() {
int16_t mem[256];
unsigned int res;
for (int v = 0; v < 256; ++v) {
for (int i = 0; i < 256; ++i) {
mem[i] = v;
}
ASM_REGISTER_STATE_CHECK(res = func_(mem));
EXPECT_EQ(256u * (v * v), res);
}
}
void SumOfSquaresTest::RefTest() {
int16_t mem[256];
for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 256; ++j) {
mem[j] = rnd_.Rand8() - rnd_.Rand8();
}
const unsigned int expected = mb_ss_ref(mem);
unsigned int res;
ASM_REGISTER_STATE_CHECK(res = func_(mem));
EXPECT_EQ(expected, res);
}
|
150,893 |
static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
int l2w, int l2h, unsigned int *sse_ptr) {
int se = 0;
unsigned int sse = 0;
const int w = 1 << l2w, h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int diff = ref[w * y + x] - src[w * y + x];
se += diff;
sse += diff * diff;
}
//// Truncate high bit depth results by downshifting (with rounding) by:
//// 2 * (bit_depth - 8) for sse
//// (bit_depth - 8) for se
}
*sse_ptr = sse;
return sse - (((int64_t) se * se) >> (l2w + l2h));
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
//// Truncate high bit depth results by downshifting (with rounding) by:
//// 2 * (bit_depth - 8) for sse
//// (bit_depth - 8) for se
static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
switch (bit_depth) {
case VPX_BITS_12:
*sse = (*sse + 128) >> 8;
*se = (*se + 8) >> 4;
break;
case VPX_BITS_10:
*sse = (*sse + 8) >> 4;
*se = (*se + 2) >> 2;
break;
case VPX_BITS_8:
default:
break;
}
}
|
@@ -7,111 +7,271 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdlib.h>
+
+#include <cstdlib>
#include <new>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-#include "./vpx_config.h"
#include "vpx_mem/vpx_mem.h"
-#if CONFIG_VP8_ENCODER
-# include "./vp8_rtcd.h"
-# include "vp8/common/variance.h"
-#endif
-#if CONFIG_VP9_ENCODER
-# include "./vp9_rtcd.h"
-# include "vp9/encoder/vp9_variance.h"
-#endif
-#include "test/acm_random.h"
+#include "vpx_ports/mem.h"
namespace {
+typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse);
+typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
+ int xoffset, int yoffset,
+ const uint8_t *b, int b_stride,
+ uint32_t *sse,
+ const uint8_t *second_pred);
+typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride);
+typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
+
+
using ::std::tr1::get;
using ::std::tr1::make_tuple;
using ::std::tr1::tuple;
using libvpx_test::ACMRandom;
-static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- int diff = ref[w * y + x] - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
+// Truncate high bit depth results by downshifting (with rounding) by:
+// 2 * (bit_depth - 8) for sse
+// (bit_depth - 8) for se
+static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
+ switch (bit_depth) {
+ case VPX_BITS_12:
+ *sse = (*sse + 128) >> 8;
+ *se = (*se + 8) >> 4;
+ break;
+ case VPX_BITS_10:
+ *sse = (*sse + 8) >> 4;
+ *se = (*se + 2) >> 2;
+ break;
+ case VPX_BITS_8:
+ default:
+ break;
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
}
-static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
- int l2w, int l2h, int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
+static unsigned int mb_ss_ref(const int16_t *src) {
+ unsigned int res = 0;
+ for (int i = 0; i < 256; ++i) {
+ res += src[i] * src[i];
+ }
+ return res;
+}
+
+static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
+ int l2w, int l2h, int src_stride_coeff,
+ int ref_stride_coeff, uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = r - src[w * y + x];
- se += diff;
- sse += diff * diff;
+ int diff;
+ if (!use_high_bit_depth_) {
+ diff = ref[w * y * ref_stride_coeff + x] -
+ src[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
+ CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
}
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+/* The subpel reference functions differ from the codec version in one aspect:
+ * they calculate the bilinear factors directly instead of using a lookup table
+ * and therefore upshift xoff and yoff by 1. Only every other calculated value
+ * is used so the codec version shrinks the table to save space and maintain
+ * compatibility with vp8.
+ */
+static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth_,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // Bilinear interpolation at a 16th pel step.
+ if (!use_high_bit_depth_) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = r - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
+ public:
+ SumOfSquaresTest() : func_(GetParam()) {}
+
+ virtual ~SumOfSquaresTest() {
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ConstTest();
+ void RefTest();
+
+ SumOfSquaresFunction func_;
+ ACMRandom rnd_;
+};
+
+void SumOfSquaresTest::ConstTest() {
+ int16_t mem[256];
+ unsigned int res;
+ for (int v = 0; v < 256; ++v) {
+ for (int i = 0; i < 256; ++i) {
+ mem[i] = v;
+ }
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(256u * (v * v), res);
+ }
+}
+
+void SumOfSquaresTest::RefTest() {
+ int16_t mem[256];
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 256; ++j) {
+ mem[j] = rnd_.Rand8() - rnd_.Rand8();
+ }
+
+ const unsigned int expected = mb_ss_ref(mem);
+ unsigned int res;
+ ASM_REGISTER_STATE_CHECK(res = func_(mem));
+ EXPECT_EQ(expected, res);
+ }
}
template<typename VarianceFunctionType>
class VarianceTest
- : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ : public ::testing::TestWithParam<tuple<int, int,
+ VarianceFunctionType, int> > {
public:
virtual void SetUp() {
- const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_) - 1;
- rnd(ACMRandom::DeterministicSeed());
+ rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
- src_ = new uint8_t[block_size_];
- ref_ = new uint8_t[block_size_];
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
+ ref_ = new uint8_t[block_size_ * 2];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
- delete[] src_;
- delete[] ref_;
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void ZeroTest();
void RefTest();
+ void RefStrideTest();
void OneQuarterTest();
- ACMRandom rnd;
- uint8_t* src_;
- uint8_t* ref_;
+ ACMRandom rnd_;
+ uint8_t *src_;
+ uint8_t *ref_;
int width_, log2width_;
int height_, log2height_;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
+ bool use_high_bit_depth_;
int block_size_;
VarianceFunctionType variance_;
};
@@ -119,13 +279,28 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::ZeroTest() {
for (int i = 0; i <= 255; ++i) {
- memset(src_, i, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(src_, i, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
for (int j = 0; j <= 255; ++j) {
- memset(ref_, j, block_size_);
+ if (!use_high_bit_depth_) {
+ memset(ref_, j, block_size_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
+ block_size_);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
- EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ ASM_REGISTER_STATE_CHECK(
+ var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
}
}
}
@@ -134,14 +309,58 @@
void VarianceTest<VarianceFunctionType>::RefTest() {
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- ref_[j] = rnd.Rand8();
+ if (!use_high_bit_depth_) {
+ src_[j] = rnd_.Rand8();
+ ref_[j] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_, ref_, width_, &sse1));
const unsigned int var2 = variance_ref(src_, ref_, log2width_,
- log2height_, &sse2);
+ log2height_, stride_coeff,
+ stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefStrideTest() {
+ for (int i = 0; i < 10; ++i) {
+ int ref_stride_coeff = i % 2;
+ int src_stride_coeff = (i >> 1) % 2;
+ for (int j = 0; j < block_size_; j++) {
+ int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
+ int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
+ if (!use_high_bit_depth_) {
+ src_[src_ind] = rnd_.Rand8();
+ ref_[ref_ind] = rnd_.Rand8();
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
+ CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+
+ ASM_REGISTER_STATE_CHECK(
+ var1 = variance_(src_, width_ * src_stride_coeff,
+ ref_, width_ * ref_stride_coeff, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, src_stride_coeff,
+ ref_stride_coeff, &sse2,
+ use_high_bit_depth_, bit_depth_);
EXPECT_EQ(sse1, sse2);
EXPECT_EQ(var1, var2);
}
@@ -149,561 +368,1673 @@
template<typename VarianceFunctionType>
void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
- memset(src_, 255, block_size_);
const int half = block_size_ / 2;
- memset(ref_, 255, half);
- memset(ref_ + half, 0, half);
+ if (!use_high_bit_depth_) {
+ memset(src_, 255, block_size_);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
+ block_size_);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
unsigned int sse;
unsigned int var;
- REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
-#if CONFIG_VP9_ENCODER
-
-unsigned int subpel_avg_variance_ref(const uint8_t *ref,
- const uint8_t *src,
- const uint8_t *second_pred,
- int l2w, int l2h,
- int xoff, int yoff,
- unsigned int *sse_ptr) {
- int se = 0;
- unsigned int sse = 0;
- const int w = 1 << l2w, h = 1 << l2h;
- for (int y = 0; y < h; y++) {
- for (int x = 0; x < w; x++) {
- // bilinear interpolation at a 16th pel step
- const int a1 = ref[(w + 1) * (y + 0) + x + 0];
- const int a2 = ref[(w + 1) * (y + 0) + x + 1];
- const int b1 = ref[(w + 1) * (y + 1) + x + 0];
- const int b2 = ref[(w + 1) * (y + 1) + x + 1];
- const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
- const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
- const int r = a + (((b - a) * yoff + 8) >> 4);
- int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
- se += diff;
- sse += diff * diff;
- }
- }
- *sse_ptr = sse;
- return sse - (((int64_t) se * se) >> (l2w + l2h));
-}
-
-template<typename SubpelVarianceFunctionType>
-class SubpelVarianceTest
- : public ::testing::TestWithParam<tuple<int, int,
- SubpelVarianceFunctionType> > {
+template<typename MseFunctionType>
+class MseTest
+ : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
public:
virtual void SetUp() {
- const tuple<int, int, SubpelVarianceFunctionType>& params =
- this->GetParam();
+ const tuple<int, int, MseFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
- subpel_variance_ = get<2>(params);
+ mse_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
- ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
- ASSERT_TRUE(sec_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
virtual void TearDown() {
vpx_free(src_);
delete[] ref_;
- vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest_mse();
+ void RefTest_sse();
+ void MaxTest_mse();
+ void MaxTest_sse();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ MseFunctionType mse_;
+};
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_mse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(sse1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::RefTest_sse() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse2;
+ unsigned int var1;
+ const int stride_coeff = 1;
+ ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
+ variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
+ stride_coeff, &sse2, false, VPX_BITS_8);
+ EXPECT_EQ(var1, sse2);
+ }
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_mse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int sse;
+ ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, sse);
+}
+
+template<typename MseFunctionType>
+void MseTest<MseFunctionType>::MaxTest_sse() {
+ memset(src_, 255, block_size_);
+ memset(ref_, 0, block_size_);
+ unsigned int var;
+ ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
+ const unsigned int expected = block_size_ * 255 * 255;
+ EXPECT_EQ(expected, var);
+}
+
+static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ uint32_t *sse_ptr,
+ bool use_high_bit_depth,
+ vpx_bit_depth_t bit_depth) {
+ int64_t se = 0;
+ uint64_t sse = 0;
+ const int w = 1 << l2w;
+ const int h = 1 << l2h;
+
+ xoff <<= 1;
+ yoff <<= 1;
+
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ if (!use_high_bit_depth) {
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
+ const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
+ se += diff;
+ sse += diff * diff;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+ }
+ RoundHighBitDepth(bit_depth, &se, &sse);
+ *sse_ptr = static_cast<uint32_t>(sse);
+ return static_cast<uint32_t>(sse -
+ ((static_cast<int64_t>(se) * se) >>
+ (l2w + l2h)));
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType, int> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType, int>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+ if (get<3>(params)) {
+ bit_depth_ = (vpx_bit_depth_t) get<3>(params);
+ use_high_bit_depth_ = true;
+ } else {
+ bit_depth_ = VPX_BITS_8;
+ use_high_bit_depth_ = false;
+ }
+ mask_ = (1 << bit_depth_)-1;
+
+ rnd_.Reset(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ if (!use_high_bit_depth_) {
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ sec_ = CONVERT_TO_BYTEPTR(
+ reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, block_size_*sizeof(uint16_t))));
+ ref_ = CONVERT_TO_BYTEPTR(
+ new uint16_t[block_size_ + width_ + height_ + 1]);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ if (!use_high_bit_depth_) {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_free(CONVERT_TO_SHORTPTR(src_));
+ delete[] CONVERT_TO_SHORTPTR(ref_);
+ vpx_free(CONVERT_TO_SHORTPTR(sec_));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
libvpx_test::ClearSystemState();
}
protected:
void RefTest();
+ void ExtremeRefTest();
- ACMRandom rnd;
+ ACMRandom rnd_;
uint8_t *src_;
uint8_t *ref_;
uint8_t *sec_;
+ bool use_high_bit_depth_;
+ vpx_bit_depth_t bit_depth_;
int width_, log2width_;
int height_, log2height_;
- int block_size_;
+ int block_size_, mask_;
SubpelVarianceFunctionType subpel_variance_;
};
template<typename SubpelVarianceFunctionType>
void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1));
- const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
- log2height_, x, y, &sse2);
+ ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_,
+ log2width_, log2height_,
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
+ // Compare against reference.
+ // Src: Set the first half of values to 0, the second half to the maximum.
+ // Ref: Set the first half of values to the maximum, the second half to 0.
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ const int half = block_size_ / 2;
+ if (!use_high_bit_depth_) {
+ memset(src_, 0, half);
+ memset(src_ + half, 255, half);
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half + width_ + height_ + 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
+ vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
+ half + width_ + height_ + 1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
+ const unsigned int var2 =
+ subpel_variance_ref(ref_, src_, log2width_, log2height_,
+ x, y, &sse2, use_high_bit_depth_, bit_depth_);
+ EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
+ EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
+ }
+ }
+}
+
template<>
-void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
- for (int x = 0; x < 16; ++x) {
- for (int y = 0; y < 16; ++y) {
- for (int j = 0; j < block_size_; j++) {
- src_[j] = rnd.Rand8();
- sec_[j] = rnd.Rand8();
- }
- for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
- ref_[j] = rnd.Rand8();
+void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
+ for (int x = 0; x < 8; ++x) {
+ for (int y = 0; y < 8; ++y) {
+ if (!use_high_bit_depth_) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd_.Rand8();
+ sec_[j] = rnd_.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd_.Rand8();
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ for (int j = 0; j < block_size_; j++) {
+ CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
+ CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse1, sse2;
unsigned int var1;
- REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
- src_, width_, &sse1, sec_));
+ ASM_REGISTER_STATE_CHECK(
+ var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
log2width_, log2height_,
- x, y, &sse2);
+ x, y, &sse2,
+ use_high_bit_depth_,
+ bit_depth_);
EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
}
}
}
-#endif // CONFIG_VP9_ENCODER
+typedef MseTest<Get4x4SseFunc> VpxSseTest;
+typedef MseTest<VarianceMxNFunc> VpxMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
-// -----------------------------------------------------------------------------
-// VP8 test cases.
+TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
+TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
+TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
+TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
-namespace vp8 {
+INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_c));
-#if CONFIG_VP8_ENCODER
-typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
+INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
-TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP8VarianceTest, Ref) { RefTest(); }
-TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
+const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
+const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
+const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_c),
+ make_tuple(4, 3, mse16x8_c),
+ make_tuple(3, 4, mse8x16_c),
+ make_tuple(3, 3, mse8x8_c)));
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
+const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
+const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
+const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
+const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
+const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
+const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
+const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
+const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
+const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
+const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
+const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
+const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ C, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
+ make_tuple(6, 5, variance64x32_c, 0),
+ make_tuple(5, 6, variance32x64_c, 0),
+ make_tuple(5, 5, variance32x32_c, 0),
+ make_tuple(5, 4, variance32x16_c, 0),
+ make_tuple(4, 5, variance16x32_c, 0),
+ make_tuple(4, 4, variance16x16_c, 0),
+ make_tuple(4, 3, variance16x8_c, 0),
+ make_tuple(3, 4, variance8x16_c, 0),
+ make_tuple(3, 3, variance8x8_c, 0),
+ make_tuple(3, 2, variance8x4_c, 0),
+ make_tuple(2, 3, variance4x8_c, 0),
+ make_tuple(2, 2, variance4x4_c, 0)));
-#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
+const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
INSTANTIATE_TEST_CASE_P(
- NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
-#endif
+ C, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
+ make_tuple(6, 5, subpel_var64x32_c, 0),
+ make_tuple(5, 6, subpel_var32x64_c, 0),
+ make_tuple(5, 5, subpel_var32x32_c, 0),
+ make_tuple(5, 4, subpel_var32x16_c, 0),
+ make_tuple(4, 5, subpel_var16x32_c, 0),
+ make_tuple(4, 4, subpel_var16x16_c, 0),
+ make_tuple(4, 3, subpel_var16x8_c, 0),
+ make_tuple(3, 4, subpel_var8x16_c, 0),
+ make_tuple(3, 3, subpel_var8x8_c, 0),
+ make_tuple(3, 2, subpel_var8x4_c, 0),
+ make_tuple(2, 3, subpel_var4x8_c, 0),
+ make_tuple(2, 2, subpel_var4x4_c, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
+ vpx_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
+ vpx_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
+ vpx_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
+ vpx_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
+ vpx_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
+ vpx_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
+ vpx_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
+ vpx_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
+ vpx_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
+ make_tuple(6, 5, subpel_avg_var64x32_c, 0),
+ make_tuple(5, 6, subpel_avg_var32x64_c, 0),
+ make_tuple(5, 5, subpel_avg_var32x32_c, 0),
+ make_tuple(5, 4, subpel_avg_var32x16_c, 0),
+ make_tuple(4, 5, subpel_avg_var16x32_c, 0),
+ make_tuple(4, 4, subpel_avg_var16x16_c, 0),
+ make_tuple(4, 3, subpel_avg_var16x8_c, 0),
+ make_tuple(3, 4, subpel_avg_var8x16_c, 0),
+ make_tuple(3, 3, subpel_avg_var8x8_c, 0),
+ make_tuple(3, 2, subpel_avg_var8x4_c, 0),
+ make_tuple(2, 3, subpel_avg_var4x8_c, 0),
+ make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
+typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
+typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
+typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
+ VpxHBDSubpelAvgVarianceTest;
+
+TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
+TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
+TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
+TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
+TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
+TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
+
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
+const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
+const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
+const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
+
+const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
+const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
+const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
+const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
+
+const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
+const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
+const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
+const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
+ make_tuple(4, 4, highbd_12_mse16x8_c),
+ make_tuple(4, 4, highbd_12_mse8x16_c),
+ make_tuple(4, 4, highbd_12_mse8x8_c),
+ make_tuple(4, 4, highbd_10_mse16x16_c),
+ make_tuple(4, 4, highbd_10_mse16x8_c),
+ make_tuple(4, 4, highbd_10_mse8x16_c),
+ make_tuple(4, 4, highbd_10_mse8x8_c),
+ make_tuple(4, 4, highbd_8_mse16x16_c),
+ make_tuple(4, 4, highbd_8_mse16x8_c),
+ make_tuple(4, 4, highbd_8_mse8x16_c),
+ make_tuple(4, 4, highbd_8_mse8x8_c)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
+const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
+const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
+const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
+const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
+const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
+const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
+const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
+const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
+const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
+const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
+const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
+const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
+const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
+const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
+const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
+const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
+const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
+const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
+const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
+const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
+const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
+const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
+const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
+const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
+const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
+const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
+const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
+const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
+const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
+const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
+const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
+const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
+const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
+const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
+const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
+const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
+const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
+const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_c, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_c, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_c, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_c, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_c, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_c, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_c, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_c, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_c, 12),
+ make_tuple(3, 2, highbd_12_variance8x4_c, 12),
+ make_tuple(2, 3, highbd_12_variance4x8_c, 12),
+ make_tuple(2, 2, highbd_12_variance4x4_c, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_c, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_c, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_c, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_c, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_c, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_c, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_c, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_c, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_c, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_c, 10),
+ make_tuple(3, 2, highbd_10_variance8x4_c, 10),
+ make_tuple(2, 3, highbd_10_variance4x8_c, 10),
+ make_tuple(2, 2, highbd_10_variance4x4_c, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_c, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_c, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_c, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_c, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_c, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_c, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_c, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_c, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_c, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_c, 8),
+ make_tuple(3, 2, highbd_8_variance8x4_c, 8),
+ make_tuple(2, 3, highbd_8_variance4x8_c, 8),
+ make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+
+const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
+ vpx_highbd_8_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
+ vpx_highbd_8_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
+ vpx_highbd_8_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
+ vpx_highbd_8_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
+ vpx_highbd_8_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
+ vpx_highbd_8_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
+ vpx_highbd_8_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
+ vpx_highbd_8_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
+ vpx_highbd_8_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
+ vpx_highbd_8_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
+ vpx_highbd_8_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
+ vpx_highbd_8_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
+ vpx_highbd_8_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
+ vpx_highbd_10_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
+ vpx_highbd_10_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
+ vpx_highbd_10_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
+ vpx_highbd_10_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
+ vpx_highbd_10_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
+ vpx_highbd_10_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
+ vpx_highbd_10_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
+ vpx_highbd_10_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
+ vpx_highbd_10_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
+ vpx_highbd_10_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
+ vpx_highbd_10_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
+ vpx_highbd_10_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
+ vpx_highbd_10_sub_pixel_variance4x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
+ vpx_highbd_12_sub_pixel_variance64x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
+ vpx_highbd_12_sub_pixel_variance64x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
+ vpx_highbd_12_sub_pixel_variance32x64_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
+ vpx_highbd_12_sub_pixel_variance32x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
+ vpx_highbd_12_sub_pixel_variance32x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
+ vpx_highbd_12_sub_pixel_variance16x32_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
+ vpx_highbd_12_sub_pixel_variance16x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
+ vpx_highbd_12_sub_pixel_variance16x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
+ vpx_highbd_12_sub_pixel_variance8x16_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
+ vpx_highbd_12_sub_pixel_variance8x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
+ vpx_highbd_12_sub_pixel_variance8x4_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
+ vpx_highbd_12_sub_pixel_variance4x8_c;
+const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
+ vpx_highbd_12_sub_pixel_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
+ vpx_highbd_8_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
+ vpx_highbd_10_sub_pixel_avg_variance4x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x8_c;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
+ vpx_highbd_12_sub_pixel_avg_variance4x4_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
+ make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
+ make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
+ make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
+ make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
+ make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
+ make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
+ make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
+ make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
+INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+
+INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_mmx));
+
+const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
+const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
+const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
+const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
+const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
INSTANTIATE_TEST_CASE_P(
- MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
+ MMX, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
+ make_tuple(4, 3, variance16x8_mmx, 0),
+ make_tuple(3, 4, variance8x16_mmx, 0),
+ make_tuple(3, 3, variance8x8_mmx, 0),
+ make_tuple(2, 2, variance4x4_mmx, 0)));
+
+const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
+const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
+const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
+const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
+const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
+ make_tuple(4, 3, subpel_var16x8_mmx, 0),
+ make_tuple(3, 4, subpel_var8x16_mmx, 0),
+ make_tuple(3, 3, subpel_var8x8_mmx, 0),
+ make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+#endif // HAVE_MMX
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_sse2));
+
+const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
+const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
+const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
+const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
+ make_tuple(4, 3, mse16x8_sse2),
+ make_tuple(3, 4, mse8x16_sse2),
+ make_tuple(3, 3, mse8x8_sse2)));
+
+const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
+const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
+const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
+const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
+const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
+const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
+const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
+const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
+const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
+const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
+const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
+const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
+const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
-#endif
-#endif // CONFIG_VP8_ENCODER
+ SSE2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
+ make_tuple(6, 5, variance64x32_sse2, 0),
+ make_tuple(5, 6, variance32x64_sse2, 0),
+ make_tuple(5, 5, variance32x32_sse2, 0),
+ make_tuple(5, 4, variance32x16_sse2, 0),
+ make_tuple(4, 5, variance16x32_sse2, 0),
+ make_tuple(4, 4, variance16x16_sse2, 0),
+ make_tuple(4, 3, variance16x8_sse2, 0),
+ make_tuple(3, 4, variance8x16_sse2, 0),
+ make_tuple(3, 3, variance8x8_sse2, 0),
+ make_tuple(3, 2, variance8x4_sse2, 0),
+ make_tuple(2, 3, variance4x8_sse2, 0),
+ make_tuple(2, 2, variance4x4_sse2, 0)));
-} // namespace vp8
-
-// -----------------------------------------------------------------------------
-// VP9 test cases.
-
-namespace vp9 {
-
-#if CONFIG_VP9_ENCODER
-typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
-typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
-typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
-
-TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
-TEST_P(VP9VarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
-TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
-INSTANTIATE_TEST_CASE_P(
- C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
-
-#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
-INSTANTIATE_TEST_CASE_P(
- MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
-#endif
-
-#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x64_sse2 =
+ vpx_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc subpel_variance64x32_sse2 =
+ vpx_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x64_sse2 =
+ vpx_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc subpel_variance32x32_sse2 =
+ vpx_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc subpel_variance32x16_sse2 =
+ vpx_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x32_sse2 =
+ vpx_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc subpel_variance16x16_sse2 =
+ vpx_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc subpel_variance16x8_sse2 =
+ vpx_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x16_sse2 =
+ vpx_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
+const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
+ SSE2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_variance4x4_sse, 0)));
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
+ vpx_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
+ vpx_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
+ vpx_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
+ vpx_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
+ vpx_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
+ vpx_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
+ vpx_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
+ vpx_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
+ vpx_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
+ vpx_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
+ vpx_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
+ vpx_sub_pixel_avg_variance4x8_sse;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
+ vpx_sub_pixel_avg_variance4x4_sse;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
+ SSE2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+#endif // CONFIG_USE_X86INC
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+#if CONFIG_VP9_HIGHBITDEPTH
+/* TODO(debargha): This test does not support the highbd version
+const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
+const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
+const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
+const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
+const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
+const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
+const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
+
+const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
+const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
+const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
+const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
INSTANTIATE_TEST_CASE_P(
- SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
-#endif
-#endif
+ SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
+ make_tuple(4, 3, highbd_12_mse16x8_sse2),
+ make_tuple(3, 4, highbd_12_mse8x16_sse2),
+ make_tuple(3, 3, highbd_12_mse8x8_sse2),
+ make_tuple(4, 4, highbd_10_mse16x16_sse2),
+ make_tuple(4, 3, highbd_10_mse16x8_sse2),
+ make_tuple(3, 4, highbd_10_mse8x16_sse2),
+ make_tuple(3, 3, highbd_10_mse8x8_sse2),
+ make_tuple(4, 4, highbd_8_mse16x16_sse2),
+ make_tuple(4, 3, highbd_8_mse16x8_sse2),
+ make_tuple(3, 4, highbd_8_mse8x16_sse2),
+ make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+*/
+
+const VarianceMxNFunc highbd_12_variance64x64_sse2 =
+ vpx_highbd_12_variance64x64_sse2;
+const VarianceMxNFunc highbd_12_variance64x32_sse2 =
+ vpx_highbd_12_variance64x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x64_sse2 =
+ vpx_highbd_12_variance32x64_sse2;
+const VarianceMxNFunc highbd_12_variance32x32_sse2 =
+ vpx_highbd_12_variance32x32_sse2;
+const VarianceMxNFunc highbd_12_variance32x16_sse2 =
+ vpx_highbd_12_variance32x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x32_sse2 =
+ vpx_highbd_12_variance16x32_sse2;
+const VarianceMxNFunc highbd_12_variance16x16_sse2 =
+ vpx_highbd_12_variance16x16_sse2;
+const VarianceMxNFunc highbd_12_variance16x8_sse2 =
+ vpx_highbd_12_variance16x8_sse2;
+const VarianceMxNFunc highbd_12_variance8x16_sse2 =
+ vpx_highbd_12_variance8x16_sse2;
+const VarianceMxNFunc highbd_12_variance8x8_sse2 =
+ vpx_highbd_12_variance8x8_sse2;
+const VarianceMxNFunc highbd_10_variance64x64_sse2 =
+ vpx_highbd_10_variance64x64_sse2;
+const VarianceMxNFunc highbd_10_variance64x32_sse2 =
+ vpx_highbd_10_variance64x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x64_sse2 =
+ vpx_highbd_10_variance32x64_sse2;
+const VarianceMxNFunc highbd_10_variance32x32_sse2 =
+ vpx_highbd_10_variance32x32_sse2;
+const VarianceMxNFunc highbd_10_variance32x16_sse2 =
+ vpx_highbd_10_variance32x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x32_sse2 =
+ vpx_highbd_10_variance16x32_sse2;
+const VarianceMxNFunc highbd_10_variance16x16_sse2 =
+ vpx_highbd_10_variance16x16_sse2;
+const VarianceMxNFunc highbd_10_variance16x8_sse2 =
+ vpx_highbd_10_variance16x8_sse2;
+const VarianceMxNFunc highbd_10_variance8x16_sse2 =
+ vpx_highbd_10_variance8x16_sse2;
+const VarianceMxNFunc highbd_10_variance8x8_sse2 =
+ vpx_highbd_10_variance8x8_sse2;
+const VarianceMxNFunc highbd_8_variance64x64_sse2 =
+ vpx_highbd_8_variance64x64_sse2;
+const VarianceMxNFunc highbd_8_variance64x32_sse2 =
+ vpx_highbd_8_variance64x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x64_sse2 =
+ vpx_highbd_8_variance32x64_sse2;
+const VarianceMxNFunc highbd_8_variance32x32_sse2 =
+ vpx_highbd_8_variance32x32_sse2;
+const VarianceMxNFunc highbd_8_variance32x16_sse2 =
+ vpx_highbd_8_variance32x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x32_sse2 =
+ vpx_highbd_8_variance16x32_sse2;
+const VarianceMxNFunc highbd_8_variance16x16_sse2 =
+ vpx_highbd_8_variance16x16_sse2;
+const VarianceMxNFunc highbd_8_variance16x8_sse2 =
+ vpx_highbd_8_variance16x8_sse2;
+const VarianceMxNFunc highbd_8_variance8x16_sse2 =
+ vpx_highbd_8_variance8x16_sse2;
+const VarianceMxNFunc highbd_8_variance8x8_sse2 =
+ vpx_highbd_8_variance8x8_sse2;
+
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
+ make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
+ make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+
+#if CONFIG_USE_X86INC
+const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_variance8x4_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance64x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x64_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance32x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x32_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance16x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x16_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x8_sse2;
+const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
+const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
+ vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VpxHBDSubpelAvgVarianceTest,
+ ::testing::Values(
+ make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
+ make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
+ make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
+ make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
+ make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
+ make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
+ make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
+ make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
+ make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
+ make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
+ make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
+ make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
+ make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
+ make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
+ make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
+ make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
+ make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
+ make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
+ make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
+ make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
+ make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
+ make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
+ make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
+ make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
+ make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
+ make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
+ make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
+ make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
+ make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
+ make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
+ make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
+ make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
+ make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+#endif // CONFIG_USE_X86INC
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
+ vpx_sub_pixel_variance64x64_ssse3;
+const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
+ vpx_sub_pixel_variance64x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
+ vpx_sub_pixel_variance32x64_ssse3;
+const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
+ vpx_sub_pixel_variance32x32_ssse3;
+const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
+ vpx_sub_pixel_variance32x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
+ vpx_sub_pixel_variance16x32_ssse3;
+const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
+ vpx_sub_pixel_variance16x16_ssse3;
+const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
+ vpx_sub_pixel_variance16x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
+ vpx_sub_pixel_variance8x16_ssse3;
+const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
+ vpx_sub_pixel_variance8x8_ssse3;
+const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
+ vpx_sub_pixel_variance8x4_ssse3;
+const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
+ vpx_sub_pixel_variance4x8_ssse3;
+const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
+ vpx_sub_pixel_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
+ SSSE3, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
+ vpx_sub_pixel_avg_variance64x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
+ vpx_sub_pixel_avg_variance64x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
+ vpx_sub_pixel_avg_variance32x64_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
+ vpx_sub_pixel_avg_variance32x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
+ vpx_sub_pixel_avg_variance32x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
+ vpx_sub_pixel_avg_variance16x32_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
+ vpx_sub_pixel_avg_variance16x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
+ vpx_sub_pixel_avg_variance16x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
+ vpx_sub_pixel_avg_variance8x16_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
+ vpx_sub_pixel_avg_variance8x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
+ vpx_sub_pixel_avg_variance8x4_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
+ vpx_sub_pixel_avg_variance4x8_ssse3;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
+ vpx_sub_pixel_avg_variance4x4_ssse3;
INSTANTIATE_TEST_CASE_P(
- SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
-#endif
-#endif
-#endif // CONFIG_VP9_ENCODER
+ SSSE3, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+#endif // CONFIG_USE_X86INC
+#endif // HAVE_SSSE3
-} // namespace vp9
+#if HAVE_AVX2
+const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
+INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
+const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
+const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
+const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
+const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
+ make_tuple(6, 5, variance64x32_avx2, 0),
+ make_tuple(5, 5, variance32x32_avx2, 0),
+ make_tuple(5, 4, variance32x16_avx2, 0),
+ make_tuple(4, 4, variance16x16_avx2, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_avx2 =
+ vpx_sub_pixel_variance64x64_avx2;
+const SubpixVarMxNFunc subpel_variance32x32_avx2 =
+ vpx_sub_pixel_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
+ vpx_sub_pixel_avg_variance64x64_avx2;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
+ vpx_sub_pixel_avg_variance32x32_avx2;
+INSTANTIATE_TEST_CASE_P(
+ AVX2, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+#endif // HAVE_AVX2
+
+#if HAVE_MEDIA
+const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
+INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+
+const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
+const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxVarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
+ make_tuple(3, 3, variance8x8_media, 0)));
+
+const SubpixVarMxNFunc subpel_variance16x16_media =
+ vpx_sub_pixel_variance16x16_media;
+const SubpixVarMxNFunc subpel_variance8x8_media =
+ vpx_sub_pixel_variance8x8_media;
+INSTANTIATE_TEST_CASE_P(
+ MEDIA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
+ make_tuple(3, 3, subpel_variance8x8_media, 0)));
+#endif // HAVE_MEDIA
+
+#if HAVE_NEON
+const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+
+const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
+INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+
+const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
+const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
+const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
+const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
+const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
+const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
+const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
+const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(4, 4, variance16x16_neon, 0),
+ make_tuple(4, 3, variance16x8_neon, 0),
+ make_tuple(3, 4, variance8x16_neon, 0),
+ make_tuple(3, 3, variance8x8_neon, 0)));
+
+const SubpixVarMxNFunc subpel_variance64x64_neon =
+ vpx_sub_pixel_variance64x64_neon;
+const SubpixVarMxNFunc subpel_variance32x32_neon =
+ vpx_sub_pixel_variance32x32_neon;
+const SubpixVarMxNFunc subpel_variance16x16_neon =
+ vpx_sub_pixel_variance16x16_neon;
+const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
+INSTANTIATE_TEST_CASE_P(
+ NEON, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(4, 4, subpel_variance16x16_neon, 0),
+ make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+#endif // HAVE_NEON
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
+ ::testing::Values(vpx_get_mb_ss_msa));
+
+const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
+ ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+
+const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
+const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
+const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
+const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
+INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
+ ::testing::Values(make_tuple(4, 4, mse16x16_msa),
+ make_tuple(4, 3, mse16x8_msa),
+ make_tuple(3, 4, mse8x16_msa),
+ make_tuple(3, 3, mse8x8_msa)));
+
+const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
+const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
+const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
+const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
+const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
+const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
+const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
+const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
+const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
+const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
+const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
+const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
+const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxVarianceTest,
+ ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
+ make_tuple(6, 5, variance64x32_msa, 0),
+ make_tuple(5, 6, variance32x64_msa, 0),
+ make_tuple(5, 5, variance32x32_msa, 0),
+ make_tuple(5, 4, variance32x16_msa, 0),
+ make_tuple(4, 5, variance16x32_msa, 0),
+ make_tuple(4, 4, variance16x16_msa, 0),
+ make_tuple(4, 3, variance16x8_msa, 0),
+ make_tuple(3, 4, variance8x16_msa, 0),
+ make_tuple(3, 3, variance8x8_msa, 0),
+ make_tuple(3, 2, variance8x4_msa, 0),
+ make_tuple(2, 3, variance4x8_msa, 0),
+ make_tuple(2, 2, variance4x4_msa, 0)));
+
+const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
+const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
+const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
+const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
+const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
+const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
+const SubpixVarMxNFunc subpel_variance16x16_msa =
+ vpx_sub_pixel_variance16x16_msa;
+const SubpixVarMxNFunc subpel_variance16x32_msa =
+ vpx_sub_pixel_variance16x32_msa;
+const SubpixVarMxNFunc subpel_variance32x16_msa =
+ vpx_sub_pixel_variance32x16_msa;
+const SubpixVarMxNFunc subpel_variance32x32_msa =
+ vpx_sub_pixel_variance32x32_msa;
+const SubpixVarMxNFunc subpel_variance32x64_msa =
+ vpx_sub_pixel_variance32x64_msa;
+const SubpixVarMxNFunc subpel_variance64x32_msa =
+ vpx_sub_pixel_variance64x32_msa;
+const SubpixVarMxNFunc subpel_variance64x64_msa =
+ vpx_sub_pixel_variance64x64_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
+ make_tuple(2, 3, subpel_variance4x8_msa, 0),
+ make_tuple(3, 2, subpel_variance8x4_msa, 0),
+ make_tuple(3, 3, subpel_variance8x8_msa, 0),
+ make_tuple(3, 4, subpel_variance8x16_msa, 0),
+ make_tuple(4, 3, subpel_variance16x8_msa, 0),
+ make_tuple(4, 4, subpel_variance16x16_msa, 0),
+ make_tuple(4, 5, subpel_variance16x32_msa, 0),
+ make_tuple(5, 4, subpel_variance32x16_msa, 0),
+ make_tuple(5, 5, subpel_variance32x32_msa, 0),
+ make_tuple(5, 6, subpel_variance32x64_msa, 0),
+ make_tuple(6, 5, subpel_variance64x32_msa, 0),
+ make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+
+const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
+ vpx_sub_pixel_avg_variance64x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
+ vpx_sub_pixel_avg_variance64x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
+ vpx_sub_pixel_avg_variance32x64_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
+ vpx_sub_pixel_avg_variance32x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
+ vpx_sub_pixel_avg_variance32x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
+ vpx_sub_pixel_avg_variance16x32_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
+ vpx_sub_pixel_avg_variance16x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
+ vpx_sub_pixel_avg_variance16x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
+ vpx_sub_pixel_avg_variance8x16_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
+ vpx_sub_pixel_avg_variance8x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
+ vpx_sub_pixel_avg_variance8x4_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
+ vpx_sub_pixel_avg_variance4x8_msa;
+const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
+ vpx_sub_pixel_avg_variance4x4_msa;
+INSTANTIATE_TEST_CASE_P(
+ MSA, VpxSubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
+ make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
+ make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
+ make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
+ make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
+ make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
+ make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
+ make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
+ make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
+ make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
+ make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
+ make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
+ make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+#endif // HAVE_MSA
} // namespace
|
CWE-119
|
int l2w, int l2h, unsigned int *sse_ptr) {
int se = 0;
unsigned int sse = 0;
const int w = 1 << l2w, h = 1 << l2h;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int diff = ref[w * y + x] - src[w * y + x];
se += diff;
sse += diff * diff;
}
*sse_ptr = sse;
return sse - (((int64_t) se * se) >> (l2w + l2h));
|
static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
switch (bit_depth) {
case VPX_BITS_12:
*sse = (*sse + 128) >> 8;
*se = (*se + 8) >> 4;
break;
case VPX_BITS_10:
*sse = (*sse + 8) >> 4;
*se = (*se + 2) >> 2;
break;
case VPX_BITS_8:
default:
break;
|
150,894 |
LosslessTestLarge()
: EncoderTest(GET_PARAM(0)),
psnr_(kMaxPsnr),
nframes_(0),
encoding_mode_(GET_PARAM(1)) {
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
LosslessTestLarge()
LosslessTest()
: EncoderTest(GET_PARAM(0)),
psnr_(kMaxPsnr),
nframes_(0),
encoding_mode_(GET_PARAM(1)) {
}
|
@@ -7,8 +7,10 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
+
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vpx_config.h"
#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
@@ -19,23 +21,34 @@
const int kMaxPsnr = 100;
-class LosslessTestLarge : public ::libvpx_test::EncoderTest,
+class LosslessTest : public ::libvpx_test::EncoderTest,
public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
- LosslessTestLarge()
+ LosslessTest()
: EncoderTest(GET_PARAM(0)),
psnr_(kMaxPsnr),
nframes_(0),
encoding_mode_(GET_PARAM(1)) {
}
- virtual ~LosslessTestLarge() {}
+ virtual ~LosslessTest() {}
virtual void SetUp() {
InitializeConfig();
SetMode(encoding_mode_);
}
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 1) {
+ // Only call Control if quantizer > 0 to verify that using quantizer
+ // alone will activate lossless
+ if (cfg_.rc_max_quantizer > 0 || cfg_.rc_min_quantizer > 0) {
+ encoder->Control(VP9E_SET_LOSSLESS, 1);
+ }
+ }
+ }
+
virtual void BeginPassHook(unsigned int /*pass*/) {
psnr_ = kMaxPsnr;
nframes_ = 0;
@@ -56,7 +69,7 @@
libvpx_test::TestMode encoding_mode_;
};
-TEST_P(LosslessTestLarge, TestLossLessEncoding) {
+TEST_P(LosslessTest, TestLossLessEncoding) {
const vpx_rational timebase = { 33333333, 1000000000 };
cfg_.g_timebase = timebase;
cfg_.rc_target_bitrate = 2000;
@@ -74,7 +87,7 @@
EXPECT_GE(psnr_lossless, kMaxPsnr);
}
-TEST_P(LosslessTestLarge, TestLossLessEncoding444) {
+TEST_P(LosslessTest, TestLossLessEncoding444) {
libvpx_test::Y4mVideoSource video("rush_hour_444.y4m", 0, 10);
cfg_.g_profile = 1;
@@ -91,5 +104,31 @@
EXPECT_GE(psnr_lossless, kMaxPsnr);
}
-VP9_INSTANTIATE_TEST_CASE(LosslessTestLarge, ALL_TEST_MODES);
+TEST_P(LosslessTest, TestLossLessEncodingCtrl) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 2000;
+ cfg_.g_lag_in_frames = 25;
+ // Intentionally set Q > 0, to make sure control can be used to activate
+ // lossless
+ cfg_.rc_min_quantizer = 10;
+ cfg_.rc_max_quantizer = 20;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ timebase.den, timebase.num, 0, 10);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ const double psnr_lossless = GetMinPsnr();
+ EXPECT_GE(psnr_lossless, kMaxPsnr);
+}
+
+VP9_INSTANTIATE_TEST_CASE(LosslessTest,
+ ::testing::Values(::libvpx_test::kRealTime,
+ ::libvpx_test::kOnePassGood,
+ ::libvpx_test::kTwoPassGood));
+
+VP10_INSTANTIATE_TEST_CASE(LosslessTest,
+ ::testing::Values(::libvpx_test::kOnePassGood,
+ ::libvpx_test::kTwoPassGood));
} // namespace
|
CWE-119
| null |
LosslessTest()
|
150,895 |
string DecodeFile(const string& filename, int num_threads) {
libvpx_test::WebMVideoSource video(filename);
video.Init();
vpx_codec_dec_cfg_t cfg = {0};
cfg.threads = num_threads;
libvpx_test::VP9Decoder decoder(cfg, 0);
libvpx_test::MD5 md5;
for (video.Begin(); video.cxdata(); video.Next()) {
const vpx_codec_err_t res =
decoder.DecodeFrame(video.cxdata(), video.frame_size());
if (res != VPX_CODEC_OK) {
EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
break;
}
libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
const vpx_image_t *img = NULL;
while ((img = dec_iter.Next())) {
md5.Add(img);
}
}
return string(md5.Get());
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
string DecodeFile(const string& filename, int num_threads) {
libvpx_test::WebMVideoSource video(filename);
video.Init();
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
cfg.threads = num_threads;
libvpx_test::VP9Decoder decoder(cfg, 0);
libvpx_test::MD5 md5;
for (video.Begin(); video.cxdata(); video.Next()) {
const vpx_codec_err_t res =
decoder.DecodeFrame(video.cxdata(), video.frame_size());
if (res != VPX_CODEC_OK) {
EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
break;
}
libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
const vpx_image_t *img = NULL;
while ((img = dec_iter.Next())) {
md5.Add(img);
}
}
return string(md5.Get());
}
|
@@ -11,28 +11,40 @@
#include <string>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/md5_helper.h"
+#if CONFIG_WEBM_IO
#include "test/webm_video_source.h"
-#include "vp9/decoder/vp9_thread.h"
+#endif
+#include "vpx_util/vpx_thread.h"
namespace {
using std::string;
-class VP9WorkerThreadTest : public ::testing::TestWithParam<bool> {
+class VPxWorkerThreadTest : public ::testing::TestWithParam<bool> {
protected:
- virtual ~VP9WorkerThreadTest() {}
+ virtual ~VPxWorkerThreadTest() {}
virtual void SetUp() {
- vp9_worker_init(&worker_);
+ vpx_get_worker_interface()->init(&worker_);
}
virtual void TearDown() {
- vp9_worker_end(&worker_);
+ vpx_get_worker_interface()->end(&worker_);
}
- VP9Worker worker_;
+ void Run(VPxWorker* worker) {
+ const bool synchronous = GetParam();
+ if (synchronous) {
+ vpx_get_worker_interface()->execute(worker);
+ } else {
+ vpx_get_worker_interface()->launch(worker);
+ }
+ }
+
+ VPxWorker worker_;
};
int ThreadHook(void* data, void* return_value) {
@@ -41,11 +53,12 @@
return *reinterpret_cast<int*>(return_value);
}
-TEST_P(VP9WorkerThreadTest, HookSuccess) {
- EXPECT_NE(vp9_worker_sync(&worker_), 0); // should be a no-op.
+TEST_P(VPxWorkerThreadTest, HookSuccess) {
+ // should be a no-op.
+ EXPECT_NE(vpx_get_worker_interface()->sync(&worker_), 0);
for (int i = 0; i < 2; ++i) {
- EXPECT_NE(vp9_worker_reset(&worker_), 0);
+ EXPECT_NE(vpx_get_worker_interface()->reset(&worker_), 0);
int hook_data = 0;
int return_value = 1; // return successfully from the hook
@@ -53,22 +66,18 @@
worker_.data1 = &hook_data;
worker_.data2 = &return_value;
- const bool synchronous = GetParam();
- if (synchronous) {
- vp9_worker_execute(&worker_);
- } else {
- vp9_worker_launch(&worker_);
- }
- EXPECT_NE(vp9_worker_sync(&worker_), 0);
+ Run(&worker_);
+ EXPECT_NE(vpx_get_worker_interface()->sync(&worker_), 0);
EXPECT_FALSE(worker_.had_error);
EXPECT_EQ(5, hook_data);
- EXPECT_NE(vp9_worker_sync(&worker_), 0); // should be a no-op.
+ // should be a no-op.
+ EXPECT_NE(vpx_get_worker_interface()->sync(&worker_), 0);
}
}
-TEST_P(VP9WorkerThreadTest, HookFailure) {
- EXPECT_NE(vp9_worker_reset(&worker_), 0);
+TEST_P(VPxWorkerThreadTest, HookFailure) {
+ EXPECT_NE(vpx_get_worker_interface()->reset(&worker_), 0);
int hook_data = 0;
int return_value = 0; // return failure from the hook
@@ -76,33 +85,85 @@
worker_.data1 = &hook_data;
worker_.data2 = &return_value;
- const bool synchronous = GetParam();
- if (synchronous) {
- vp9_worker_execute(&worker_);
- } else {
- vp9_worker_launch(&worker_);
- }
- EXPECT_FALSE(vp9_worker_sync(&worker_));
+ Run(&worker_);
+ EXPECT_FALSE(vpx_get_worker_interface()->sync(&worker_));
EXPECT_EQ(1, worker_.had_error);
// Ensure _reset() clears the error and _launch() can be called again.
return_value = 1;
- EXPECT_NE(vp9_worker_reset(&worker_), 0);
+ EXPECT_NE(vpx_get_worker_interface()->reset(&worker_), 0);
EXPECT_FALSE(worker_.had_error);
- vp9_worker_launch(&worker_);
- EXPECT_NE(vp9_worker_sync(&worker_), 0);
+ vpx_get_worker_interface()->launch(&worker_);
+ EXPECT_NE(vpx_get_worker_interface()->sync(&worker_), 0);
EXPECT_FALSE(worker_.had_error);
}
+TEST_P(VPxWorkerThreadTest, EndWithoutSync) {
+ // Create a large number of threads to increase the chances of detecting a
+ // race. Doing more work in the hook is no guarantee as any race would occur
+ // post hook execution in the main thread loop driver.
+ static const int kNumWorkers = 64;
+ VPxWorker workers[kNumWorkers];
+ int hook_data[kNumWorkers];
+ int return_value[kNumWorkers];
+
+ for (int n = 0; n < kNumWorkers; ++n) {
+ vpx_get_worker_interface()->init(&workers[n]);
+ return_value[n] = 1; // return successfully from the hook
+ workers[n].hook = ThreadHook;
+ workers[n].data1 = &hook_data[n];
+ workers[n].data2 = &return_value[n];
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int n = 0; n < kNumWorkers; ++n) {
+ EXPECT_NE(vpx_get_worker_interface()->reset(&workers[n]), 0);
+ hook_data[n] = 0;
+ }
+
+ for (int n = 0; n < kNumWorkers; ++n) {
+ Run(&workers[n]);
+ }
+
+ for (int n = kNumWorkers - 1; n >= 0; --n) {
+ vpx_get_worker_interface()->end(&workers[n]);
+ }
+ }
+}
+
+TEST(VPxWorkerThreadTest, TestInterfaceAPI) {
+ EXPECT_EQ(0, vpx_set_worker_interface(NULL));
+ EXPECT_TRUE(vpx_get_worker_interface() != NULL);
+ for (int i = 0; i < 6; ++i) {
+ VPxWorkerInterface winterface = *vpx_get_worker_interface();
+ switch (i) {
+ default:
+ case 0: winterface.init = NULL; break;
+ case 1: winterface.reset = NULL; break;
+ case 2: winterface.sync = NULL; break;
+ case 3: winterface.launch = NULL; break;
+ case 4: winterface.execute = NULL; break;
+ case 5: winterface.end = NULL; break;
+ }
+ EXPECT_EQ(0, vpx_set_worker_interface(&winterface));
+ }
+}
+
// -----------------------------------------------------------------------------
// Multi-threaded decode tests
+#if CONFIG_WEBM_IO
+struct FileList {
+ const char *name;
+ const char *expected_md5;
+};
+
// Decodes |filename| with |num_threads|. Returns the md5 of the decoded frames.
string DecodeFile(const string& filename, int num_threads) {
libvpx_test::WebMVideoSource video(filename);
video.Init();
- vpx_codec_dec_cfg_t cfg = {0};
+ vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
cfg.threads = num_threads;
libvpx_test::VP9Decoder decoder(cfg, 0);
@@ -126,39 +187,77 @@
return string(md5.Get());
}
-TEST(VP9DecodeMTTest, MTDecode) {
- // no tiles or frame parallel; this exercises loop filter threading.
- EXPECT_STREQ("b35a1b707b28e82be025d960aba039bc",
- DecodeFile("vp90-2-03-size-226x226.webm", 2).c_str());
+void DecodeFiles(const FileList files[]) {
+ for (const FileList *iter = files; iter->name != NULL; ++iter) {
+ SCOPED_TRACE(iter->name);
+ for (int t = 2; t <= 8; ++t) {
+ EXPECT_EQ(iter->expected_md5, DecodeFile(iter->name, t))
+ << "threads = " << t;
+ }
+ }
}
-TEST(VP9DecodeMTTest, MTDecode2) {
- static const struct {
- const char *name;
- const char *expected_md5;
- } files[] = {
+// Trivial serialized thread worker interface implementation.
+// Note any worker that requires synchronization between other workers will
+// hang.
+namespace impl {
+
+void Init(VPxWorker *const worker) { memset(worker, 0, sizeof(*worker)); }
+int Reset(VPxWorker *const /*worker*/) { return 1; }
+int Sync(VPxWorker *const worker) { return !worker->had_error; }
+
+void Execute(VPxWorker *const worker) {
+ worker->had_error |= !worker->hook(worker->data1, worker->data2);
+}
+
+void Launch(VPxWorker *const worker) { Execute(worker); }
+void End(VPxWorker *const /*worker*/) {}
+
+} // namespace impl
+
+TEST(VPxWorkerThreadTest, TestSerialInterface) {
+ static const VPxWorkerInterface serial_interface = {
+ impl::Init, impl::Reset, impl::Sync, impl::Launch, impl::Execute, impl::End
+ };
+ // TODO(jzern): Avoid using a file that will use the row-based thread
+ // loopfilter, with the simple serialized implementation it will hang. This is
+ // due to its expectation that rows will be run in parallel as they wait on
+ // progress in the row above before proceeding.
+ static const char expected_md5[] = "b35a1b707b28e82be025d960aba039bc";
+ static const char filename[] = "vp90-2-03-size-226x226.webm";
+ VPxWorkerInterface default_interface = *vpx_get_worker_interface();
+
+ EXPECT_NE(vpx_set_worker_interface(&serial_interface), 0);
+ EXPECT_EQ(expected_md5, DecodeFile(filename, 2));
+
+ // Reset the interface.
+ EXPECT_NE(vpx_set_worker_interface(&default_interface), 0);
+ EXPECT_EQ(expected_md5, DecodeFile(filename, 2));
+}
+
+TEST(VP9DecodeMultiThreadedTest, Decode) {
+ // no tiles or frame parallel; this exercises loop filter threading.
+ EXPECT_EQ("b35a1b707b28e82be025d960aba039bc",
+ DecodeFile("vp90-2-03-size-226x226.webm", 2));
+}
+
+TEST(VP9DecodeMultiThreadedTest, Decode2) {
+ static const FileList files[] = {
{ "vp90-2-08-tile_1x2_frame_parallel.webm",
"68ede6abd66bae0a2edf2eb9232241b6" },
{ "vp90-2-08-tile_1x4_frame_parallel.webm",
"368ebc6ebf3a5e478d85b2c3149b2848" },
{ "vp90-2-08-tile_1x8_frame_parallel.webm",
"17e439da2388aff3a0f69cb22579c6c1" },
+ { NULL, NULL }
};
- for (int i = 0; i < static_cast<int>(sizeof(files) / sizeof(files[0])); ++i) {
- for (int t = 2; t <= 8; ++t) {
- EXPECT_STREQ(files[i].expected_md5, DecodeFile(files[i].name, t).c_str())
- << "threads = " << t;
- }
- }
+ DecodeFiles(files);
}
// Test tile quantity changes within one file.
-TEST(VP9DecodeMTTest, MTDecode3) {
- static const struct {
- const char *name;
- const char *expected_md5;
- } files[] = {
+TEST(VP9DecodeMultiThreadedTest, Decode3) {
+ static const FileList files[] = {
{ "vp90-2-14-resize-fp-tiles-1-16.webm",
"0cd5e632c326297e975f38949c31ea94" },
{ "vp90-2-14-resize-fp-tiles-1-2-4-8-16.webm",
@@ -203,16 +302,13 @@
"ae96f21f21b6370cc0125621b441fc52" },
{ "vp90-2-14-resize-fp-tiles-8-4.webm",
"3eb4f24f10640d42218f7fd7b9fd30d4" },
+ { NULL, NULL }
};
- for (int i = 0; i < static_cast<int>(sizeof(files) / sizeof(files[0])); ++i) {
- for (int t = 2; t <= 8; ++t) {
- EXPECT_STREQ(files[i].expected_md5, DecodeFile(files[i].name, t).c_str())
- << "threads = " << t;
- }
- }
+ DecodeFiles(files);
}
+#endif // CONFIG_WEBM_IO
-INSTANTIATE_TEST_CASE_P(Synchronous, VP9WorkerThreadTest, ::testing::Bool());
+INSTANTIATE_TEST_CASE_P(Synchronous, VPxWorkerThreadTest, ::testing::Bool());
} // namespace
|
CWE-119
|
vpx_codec_dec_cfg_t cfg = {0};
|
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
|
150,896 |
virtual void TearDown() {
vp9_worker_end(&worker_);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
virtual void TearDown() {
vpx_get_worker_interface()->end(&worker_);
}
|
@@ -11,28 +11,40 @@
#include <string>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/md5_helper.h"
+#if CONFIG_WEBM_IO
#include "test/webm_video_source.h"
-#include "vp9/decoder/vp9_thread.h"
+#endif
+#include "vpx_util/vpx_thread.h"
namespace {
using std::string;
-class VP9WorkerThreadTest : public ::testing::TestWithParam<bool> {
+class VPxWorkerThreadTest : public ::testing::TestWithParam<bool> {
protected:
- virtual ~VP9WorkerThreadTest() {}
+ virtual ~VPxWorkerThreadTest() {}
virtual void SetUp() {
- vp9_worker_init(&worker_);
+ vpx_get_worker_interface()->init(&worker_);
}
virtual void TearDown() {
- vp9_worker_end(&worker_);
+ vpx_get_worker_interface()->end(&worker_);
}
- VP9Worker worker_;
+ void Run(VPxWorker* worker) {
+ const bool synchronous = GetParam();
+ if (synchronous) {
+ vpx_get_worker_interface()->execute(worker);
+ } else {
+ vpx_get_worker_interface()->launch(worker);
+ }
+ }
+
+ VPxWorker worker_;
};
int ThreadHook(void* data, void* return_value) {
@@ -41,11 +53,12 @@
return *reinterpret_cast<int*>(return_value);
}
-TEST_P(VP9WorkerThreadTest, HookSuccess) {
- EXPECT_NE(vp9_worker_sync(&worker_), 0); // should be a no-op.
+TEST_P(VPxWorkerThreadTest, HookSuccess) {
+ // should be a no-op.
+ EXPECT_NE(vpx_get_worker_interface()->sync(&worker_), 0);
for (int i = 0; i < 2; ++i) {
- EXPECT_NE(vp9_worker_reset(&worker_), 0);
+ EXPECT_NE(vpx_get_worker_interface()->reset(&worker_), 0);
int hook_data = 0;
int return_value = 1; // return successfully from the hook
@@ -53,22 +66,18 @@
worker_.data1 = &hook_data;
worker_.data2 = &return_value;
- const bool synchronous = GetParam();
- if (synchronous) {
- vp9_worker_execute(&worker_);
- } else {
- vp9_worker_launch(&worker_);
- }
- EXPECT_NE(vp9_worker_sync(&worker_), 0);
+ Run(&worker_);
+ EXPECT_NE(vpx_get_worker_interface()->sync(&worker_), 0);
EXPECT_FALSE(worker_.had_error);
EXPECT_EQ(5, hook_data);
- EXPECT_NE(vp9_worker_sync(&worker_), 0); // should be a no-op.
+ // should be a no-op.
+ EXPECT_NE(vpx_get_worker_interface()->sync(&worker_), 0);
}
}
-TEST_P(VP9WorkerThreadTest, HookFailure) {
- EXPECT_NE(vp9_worker_reset(&worker_), 0);
+TEST_P(VPxWorkerThreadTest, HookFailure) {
+ EXPECT_NE(vpx_get_worker_interface()->reset(&worker_), 0);
int hook_data = 0;
int return_value = 0; // return failure from the hook
@@ -76,33 +85,85 @@
worker_.data1 = &hook_data;
worker_.data2 = &return_value;
- const bool synchronous = GetParam();
- if (synchronous) {
- vp9_worker_execute(&worker_);
- } else {
- vp9_worker_launch(&worker_);
- }
- EXPECT_FALSE(vp9_worker_sync(&worker_));
+ Run(&worker_);
+ EXPECT_FALSE(vpx_get_worker_interface()->sync(&worker_));
EXPECT_EQ(1, worker_.had_error);
// Ensure _reset() clears the error and _launch() can be called again.
return_value = 1;
- EXPECT_NE(vp9_worker_reset(&worker_), 0);
+ EXPECT_NE(vpx_get_worker_interface()->reset(&worker_), 0);
EXPECT_FALSE(worker_.had_error);
- vp9_worker_launch(&worker_);
- EXPECT_NE(vp9_worker_sync(&worker_), 0);
+ vpx_get_worker_interface()->launch(&worker_);
+ EXPECT_NE(vpx_get_worker_interface()->sync(&worker_), 0);
EXPECT_FALSE(worker_.had_error);
}
+TEST_P(VPxWorkerThreadTest, EndWithoutSync) {
+ // Create a large number of threads to increase the chances of detecting a
+ // race. Doing more work in the hook is no guarantee as any race would occur
+ // post hook execution in the main thread loop driver.
+ static const int kNumWorkers = 64;
+ VPxWorker workers[kNumWorkers];
+ int hook_data[kNumWorkers];
+ int return_value[kNumWorkers];
+
+ for (int n = 0; n < kNumWorkers; ++n) {
+ vpx_get_worker_interface()->init(&workers[n]);
+ return_value[n] = 1; // return successfully from the hook
+ workers[n].hook = ThreadHook;
+ workers[n].data1 = &hook_data[n];
+ workers[n].data2 = &return_value[n];
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int n = 0; n < kNumWorkers; ++n) {
+ EXPECT_NE(vpx_get_worker_interface()->reset(&workers[n]), 0);
+ hook_data[n] = 0;
+ }
+
+ for (int n = 0; n < kNumWorkers; ++n) {
+ Run(&workers[n]);
+ }
+
+ for (int n = kNumWorkers - 1; n >= 0; --n) {
+ vpx_get_worker_interface()->end(&workers[n]);
+ }
+ }
+}
+
+TEST(VPxWorkerThreadTest, TestInterfaceAPI) {
+ EXPECT_EQ(0, vpx_set_worker_interface(NULL));
+ EXPECT_TRUE(vpx_get_worker_interface() != NULL);
+ for (int i = 0; i < 6; ++i) {
+ VPxWorkerInterface winterface = *vpx_get_worker_interface();
+ switch (i) {
+ default:
+ case 0: winterface.init = NULL; break;
+ case 1: winterface.reset = NULL; break;
+ case 2: winterface.sync = NULL; break;
+ case 3: winterface.launch = NULL; break;
+ case 4: winterface.execute = NULL; break;
+ case 5: winterface.end = NULL; break;
+ }
+ EXPECT_EQ(0, vpx_set_worker_interface(&winterface));
+ }
+}
+
// -----------------------------------------------------------------------------
// Multi-threaded decode tests
+#if CONFIG_WEBM_IO
+struct FileList {
+ const char *name;
+ const char *expected_md5;
+};
+
// Decodes |filename| with |num_threads|. Returns the md5 of the decoded frames.
string DecodeFile(const string& filename, int num_threads) {
libvpx_test::WebMVideoSource video(filename);
video.Init();
- vpx_codec_dec_cfg_t cfg = {0};
+ vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
cfg.threads = num_threads;
libvpx_test::VP9Decoder decoder(cfg, 0);
@@ -126,39 +187,77 @@
return string(md5.Get());
}
-TEST(VP9DecodeMTTest, MTDecode) {
- // no tiles or frame parallel; this exercises loop filter threading.
- EXPECT_STREQ("b35a1b707b28e82be025d960aba039bc",
- DecodeFile("vp90-2-03-size-226x226.webm", 2).c_str());
+void DecodeFiles(const FileList files[]) {
+ for (const FileList *iter = files; iter->name != NULL; ++iter) {
+ SCOPED_TRACE(iter->name);
+ for (int t = 2; t <= 8; ++t) {
+ EXPECT_EQ(iter->expected_md5, DecodeFile(iter->name, t))
+ << "threads = " << t;
+ }
+ }
}
-TEST(VP9DecodeMTTest, MTDecode2) {
- static const struct {
- const char *name;
- const char *expected_md5;
- } files[] = {
+// Trivial serialized thread worker interface implementation.
+// Note any worker that requires synchronization between other workers will
+// hang.
+namespace impl {
+
+void Init(VPxWorker *const worker) { memset(worker, 0, sizeof(*worker)); }
+int Reset(VPxWorker *const /*worker*/) { return 1; }
+int Sync(VPxWorker *const worker) { return !worker->had_error; }
+
+void Execute(VPxWorker *const worker) {
+ worker->had_error |= !worker->hook(worker->data1, worker->data2);
+}
+
+void Launch(VPxWorker *const worker) { Execute(worker); }
+void End(VPxWorker *const /*worker*/) {}
+
+} // namespace impl
+
+TEST(VPxWorkerThreadTest, TestSerialInterface) {
+ static const VPxWorkerInterface serial_interface = {
+ impl::Init, impl::Reset, impl::Sync, impl::Launch, impl::Execute, impl::End
+ };
+ // TODO(jzern): Avoid using a file that will use the row-based thread
+ // loopfilter, with the simple serialized implementation it will hang. This is
+ // due to its expectation that rows will be run in parallel as they wait on
+ // progress in the row above before proceeding.
+ static const char expected_md5[] = "b35a1b707b28e82be025d960aba039bc";
+ static const char filename[] = "vp90-2-03-size-226x226.webm";
+ VPxWorkerInterface default_interface = *vpx_get_worker_interface();
+
+ EXPECT_NE(vpx_set_worker_interface(&serial_interface), 0);
+ EXPECT_EQ(expected_md5, DecodeFile(filename, 2));
+
+ // Reset the interface.
+ EXPECT_NE(vpx_set_worker_interface(&default_interface), 0);
+ EXPECT_EQ(expected_md5, DecodeFile(filename, 2));
+}
+
+TEST(VP9DecodeMultiThreadedTest, Decode) {
+ // no tiles or frame parallel; this exercises loop filter threading.
+ EXPECT_EQ("b35a1b707b28e82be025d960aba039bc",
+ DecodeFile("vp90-2-03-size-226x226.webm", 2));
+}
+
+TEST(VP9DecodeMultiThreadedTest, Decode2) {
+ static const FileList files[] = {
{ "vp90-2-08-tile_1x2_frame_parallel.webm",
"68ede6abd66bae0a2edf2eb9232241b6" },
{ "vp90-2-08-tile_1x4_frame_parallel.webm",
"368ebc6ebf3a5e478d85b2c3149b2848" },
{ "vp90-2-08-tile_1x8_frame_parallel.webm",
"17e439da2388aff3a0f69cb22579c6c1" },
+ { NULL, NULL }
};
- for (int i = 0; i < static_cast<int>(sizeof(files) / sizeof(files[0])); ++i) {
- for (int t = 2; t <= 8; ++t) {
- EXPECT_STREQ(files[i].expected_md5, DecodeFile(files[i].name, t).c_str())
- << "threads = " << t;
- }
- }
+ DecodeFiles(files);
}
// Test tile quantity changes within one file.
-TEST(VP9DecodeMTTest, MTDecode3) {
- static const struct {
- const char *name;
- const char *expected_md5;
- } files[] = {
+TEST(VP9DecodeMultiThreadedTest, Decode3) {
+ static const FileList files[] = {
{ "vp90-2-14-resize-fp-tiles-1-16.webm",
"0cd5e632c326297e975f38949c31ea94" },
{ "vp90-2-14-resize-fp-tiles-1-2-4-8-16.webm",
@@ -203,16 +302,13 @@
"ae96f21f21b6370cc0125621b441fc52" },
{ "vp90-2-14-resize-fp-tiles-8-4.webm",
"3eb4f24f10640d42218f7fd7b9fd30d4" },
+ { NULL, NULL }
};
- for (int i = 0; i < static_cast<int>(sizeof(files) / sizeof(files[0])); ++i) {
- for (int t = 2; t <= 8; ++t) {
- EXPECT_STREQ(files[i].expected_md5, DecodeFile(files[i].name, t).c_str())
- << "threads = " << t;
- }
- }
+ DecodeFiles(files);
}
+#endif // CONFIG_WEBM_IO
-INSTANTIATE_TEST_CASE_P(Synchronous, VP9WorkerThreadTest, ::testing::Bool());
+INSTANTIATE_TEST_CASE_P(Synchronous, VPxWorkerThreadTest, ::testing::Bool());
} // namespace
|
CWE-119
|
vp9_worker_end(&worker_);
|
vpx_get_worker_interface()->end(&worker_);
|
150,897 |
void WT_Interpolate (S_WT_VOICE *pWTVoice, S_WT_INT_FRAME *pWTIntFrame)
{
EAS_PCM *pOutputBuffer;
EAS_I32 phaseInc;
EAS_I32 phaseFrac;
EAS_I32 acc0;
const EAS_SAMPLE *pSamples;
const EAS_SAMPLE *loopEnd;
EAS_I32 samp1;
EAS_I32 samp2;
EAS_I32 numSamples;
/* initialize some local variables */
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
loopEnd = (const EAS_SAMPLE*) pWTVoice->loopEnd + 1;
pSamples = (const EAS_SAMPLE*) pWTVoice->phaseAccum;
/*lint -e{713} truncation is OK */
phaseFrac = pWTVoice->phaseFrac;
phaseInc = pWTIntFrame->frame.phaseIncrement;
/* fetch adjacent samples */
#if defined(_8_BIT_SAMPLES)
/*lint -e{701} <avoid multiply for performance>*/
samp1 = pSamples[0] << 8;
/*lint -e{701} <avoid multiply for performance>*/
samp2 = pSamples[1] << 8;
#else
samp1 = pSamples[0];
samp2 = pSamples[1];
#endif
while (numSamples--) {
/* linear interpolation */
acc0 = samp2 - samp1;
acc0 = acc0 * phaseFrac;
/*lint -e{704} <avoid divide>*/
acc0 = samp1 + (acc0 >> NUM_PHASE_FRAC_BITS);
/* save new output sample in buffer */
/*lint -e{704} <avoid divide>*/
*pOutputBuffer++ = (EAS_I16)(acc0 >> 2);
/* increment phase */
phaseFrac += phaseInc;
/*lint -e{704} <avoid divide>*/
acc0 = phaseFrac >> NUM_PHASE_FRAC_BITS;
/* next sample */
if (acc0 > 0) {
/* advance sample pointer */
pSamples += acc0;
phaseFrac = (EAS_I32)((EAS_U32)phaseFrac & PHASE_FRAC_MASK);
/* check for loop end */
acc0 = (EAS_I32) (pSamples - loopEnd);
if (acc0 >= 0)
pSamples = (const EAS_SAMPLE*) pWTVoice->loopStart + acc0;
/* fetch new samples */
#if defined(_8_BIT_SAMPLES)
/*lint -e{701} <avoid multiply for performance>*/
samp1 = pSamples[0] << 8;
/*lint -e{701} <avoid multiply for performance>*/
samp2 = pSamples[1] << 8;
#else
samp1 = pSamples[0];
samp2 = pSamples[1];
#endif
}
}
/* save pointer and phase */
pWTVoice->phaseAccum = (EAS_U32) pSamples;
pWTVoice->phaseFrac = (EAS_U32) phaseFrac;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void WT_Interpolate (S_WT_VOICE *pWTVoice, S_WT_INT_FRAME *pWTIntFrame)
{
EAS_PCM *pOutputBuffer;
EAS_I32 phaseInc;
EAS_I32 phaseFrac;
EAS_I32 acc0;
const EAS_SAMPLE *pSamples;
const EAS_SAMPLE *loopEnd;
EAS_I32 samp1;
EAS_I32 samp2;
EAS_I32 numSamples;
/* initialize some local variables */
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
loopEnd = (const EAS_SAMPLE*) pWTVoice->loopEnd + 1;
pSamples = (const EAS_SAMPLE*) pWTVoice->phaseAccum;
/*lint -e{713} truncation is OK */
phaseFrac = pWTVoice->phaseFrac;
phaseInc = pWTIntFrame->frame.phaseIncrement;
/* fetch adjacent samples */
#if defined(_8_BIT_SAMPLES)
/*lint -e{701} <avoid multiply for performance>*/
samp1 = pSamples[0] << 8;
/*lint -e{701} <avoid multiply for performance>*/
samp2 = pSamples[1] << 8;
#else
samp1 = pSamples[0];
samp2 = pSamples[1];
#endif
while (numSamples--) {
/* linear interpolation */
acc0 = samp2 - samp1;
acc0 = acc0 * phaseFrac;
/*lint -e{704} <avoid divide>*/
acc0 = samp1 + (acc0 >> NUM_PHASE_FRAC_BITS);
/* save new output sample in buffer */
/*lint -e{704} <avoid divide>*/
*pOutputBuffer++ = (EAS_I16)(acc0 >> 2);
/* increment phase */
phaseFrac += phaseInc;
/*lint -e{704} <avoid divide>*/
acc0 = phaseFrac >> NUM_PHASE_FRAC_BITS;
/* next sample */
if (acc0 > 0) {
/* advance sample pointer */
pSamples += acc0;
phaseFrac = (EAS_I32)((EAS_U32)phaseFrac & PHASE_FRAC_MASK);
/* check for loop end */
acc0 = (EAS_I32) (pSamples - loopEnd);
if (acc0 >= 0)
pSamples = (const EAS_SAMPLE*) pWTVoice->loopStart + acc0;
/* fetch new samples */
#if defined(_8_BIT_SAMPLES)
/*lint -e{701} <avoid multiply for performance>*/
samp1 = pSamples[0] << 8;
/*lint -e{701} <avoid multiply for performance>*/
samp2 = pSamples[1] << 8;
#else
samp1 = pSamples[0];
samp2 = pSamples[1];
#endif
}
}
/* save pointer and phase */
pWTVoice->phaseAccum = (EAS_U32) pSamples;
pWTVoice->phaseFrac = (EAS_U32) phaseFrac;
}
|
@@ -33,6 +33,7 @@
*------------------------------------
*/
#include "log/log.h"
+#include <cutils/log.h>
#include "eas_types.h"
#include "eas_math.h"
@@ -92,6 +93,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pMixBuffer = pWTIntFrame->pMixBuffer;
@@ -190,6 +192,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
@@ -287,6 +290,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
@@ -379,6 +383,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pAudioBuffer = pWTIntFrame->pAudioBuffer;
@@ -446,6 +451,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
@@ -593,6 +599,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pMixBuffer = pWTIntFrame->pMixBuffer;
|
CWE-119
| null |
android_errorWriteLog(0x534e4554, "26366256");
|
150,898 |
void WT_InterpolateMono (S_WT_VOICE *pWTVoice, S_WT_INT_FRAME *pWTIntFrame)
{
EAS_I32 *pMixBuffer;
const EAS_I8 *pLoopEnd;
const EAS_I8 *pCurrentPhaseInt;
EAS_I32 numSamples;
EAS_I32 gain;
EAS_I32 gainIncrement;
EAS_I32 currentPhaseFrac;
EAS_I32 phaseInc;
EAS_I32 tmp0;
EAS_I32 tmp1;
EAS_I32 tmp2;
EAS_I8 *pLoopStart;
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
return;
}
pMixBuffer = pWTIntFrame->pMixBuffer;
/* calculate gain increment */
gainIncrement = (pWTIntFrame->gainTarget - pWTIntFrame->prevGain) << (16 - SYNTH_UPDATE_PERIOD_IN_BITS);
if (gainIncrement < 0)
gainIncrement++;
gain = pWTIntFrame->prevGain << 16;
pCurrentPhaseInt = pWTVoice->pPhaseAccum;
currentPhaseFrac = pWTVoice->phaseFrac;
phaseInc = pWTIntFrame->phaseIncrement;
pLoopStart = pWTVoice->pLoopStart;
pLoopEnd = pWTVoice->pLoopEnd + 1;
InterpolationLoop:
tmp0 = (EAS_I32)(pCurrentPhaseInt - pLoopEnd);
if (tmp0 >= 0)
pCurrentPhaseInt = pLoopStart + tmp0;
tmp0 = *pCurrentPhaseInt;
tmp1 = *(pCurrentPhaseInt + 1);
tmp2 = phaseInc + currentPhaseFrac;
tmp1 = tmp1 - tmp0;
tmp1 = tmp1 * currentPhaseFrac;
tmp1 = tmp0 + (tmp1 >> NUM_EG1_FRAC_BITS);
pCurrentPhaseInt += (tmp2 >> NUM_PHASE_FRAC_BITS);
currentPhaseFrac = tmp2 & PHASE_FRAC_MASK;
gain += gainIncrement;
tmp2 = (gain >> SYNTH_UPDATE_PERIOD_IN_BITS);
tmp0 = *pMixBuffer;
tmp2 = tmp1 * tmp2;
tmp2 = (tmp2 >> 9);
tmp0 = tmp2 + tmp0;
*pMixBuffer++ = tmp0;
numSamples--;
if (numSamples > 0)
goto InterpolationLoop;
pWTVoice->pPhaseAccum = pCurrentPhaseInt;
pWTVoice->phaseFrac = currentPhaseFrac;
/*lint -e{702} <avoid divide>*/
pWTVoice->gain = (EAS_I16)(gain >> SYNTH_UPDATE_PERIOD_IN_BITS);
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void WT_InterpolateMono (S_WT_VOICE *pWTVoice, S_WT_INT_FRAME *pWTIntFrame)
{
EAS_I32 *pMixBuffer;
const EAS_I8 *pLoopEnd;
const EAS_I8 *pCurrentPhaseInt;
EAS_I32 numSamples;
EAS_I32 gain;
EAS_I32 gainIncrement;
EAS_I32 currentPhaseFrac;
EAS_I32 phaseInc;
EAS_I32 tmp0;
EAS_I32 tmp1;
EAS_I32 tmp2;
EAS_I8 *pLoopStart;
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
android_errorWriteLog(0x534e4554, "26366256");
return;
}
pMixBuffer = pWTIntFrame->pMixBuffer;
/* calculate gain increment */
gainIncrement = (pWTIntFrame->gainTarget - pWTIntFrame->prevGain) << (16 - SYNTH_UPDATE_PERIOD_IN_BITS);
if (gainIncrement < 0)
gainIncrement++;
gain = pWTIntFrame->prevGain << 16;
pCurrentPhaseInt = pWTVoice->pPhaseAccum;
currentPhaseFrac = pWTVoice->phaseFrac;
phaseInc = pWTIntFrame->phaseIncrement;
pLoopStart = pWTVoice->pLoopStart;
pLoopEnd = pWTVoice->pLoopEnd + 1;
InterpolationLoop:
tmp0 = (EAS_I32)(pCurrentPhaseInt - pLoopEnd);
if (tmp0 >= 0)
pCurrentPhaseInt = pLoopStart + tmp0;
tmp0 = *pCurrentPhaseInt;
tmp1 = *(pCurrentPhaseInt + 1);
tmp2 = phaseInc + currentPhaseFrac;
tmp1 = tmp1 - tmp0;
tmp1 = tmp1 * currentPhaseFrac;
tmp1 = tmp0 + (tmp1 >> NUM_EG1_FRAC_BITS);
pCurrentPhaseInt += (tmp2 >> NUM_PHASE_FRAC_BITS);
currentPhaseFrac = tmp2 & PHASE_FRAC_MASK;
gain += gainIncrement;
tmp2 = (gain >> SYNTH_UPDATE_PERIOD_IN_BITS);
tmp0 = *pMixBuffer;
tmp2 = tmp1 * tmp2;
tmp2 = (tmp2 >> 9);
tmp0 = tmp2 + tmp0;
*pMixBuffer++ = tmp0;
numSamples--;
if (numSamples > 0)
goto InterpolationLoop;
pWTVoice->pPhaseAccum = pCurrentPhaseInt;
pWTVoice->phaseFrac = currentPhaseFrac;
/*lint -e{702} <avoid divide>*/
pWTVoice->gain = (EAS_I16)(gain >> SYNTH_UPDATE_PERIOD_IN_BITS);
}
|
@@ -33,6 +33,7 @@
*------------------------------------
*/
#include "log/log.h"
+#include <cutils/log.h>
#include "eas_types.h"
#include "eas_math.h"
@@ -92,6 +93,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pMixBuffer = pWTIntFrame->pMixBuffer;
@@ -190,6 +192,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
@@ -287,6 +290,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
@@ -379,6 +383,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pAudioBuffer = pWTIntFrame->pAudioBuffer;
@@ -446,6 +451,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
@@ -593,6 +599,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pMixBuffer = pWTIntFrame->pMixBuffer;
|
CWE-119
| null |
android_errorWriteLog(0x534e4554, "26366256");
|
150,899 |
void WT_InterpolateNoLoop (S_WT_VOICE *pWTVoice, S_WT_INT_FRAME *pWTIntFrame)
{
EAS_PCM *pOutputBuffer;
EAS_I32 phaseInc;
EAS_I32 phaseFrac;
EAS_I32 acc0;
const EAS_SAMPLE *pSamples;
EAS_I32 samp1;
EAS_I32 samp2;
EAS_I32 numSamples;
/* initialize some local variables */
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
phaseInc = pWTIntFrame->frame.phaseIncrement;
pSamples = (const EAS_SAMPLE*) pWTVoice->phaseAccum;
phaseFrac = (EAS_I32)pWTVoice->phaseFrac;
/* fetch adjacent samples */
#if defined(_8_BIT_SAMPLES)
/*lint -e{701} <avoid multiply for performance>*/
samp1 = pSamples[0] << 8;
/*lint -e{701} <avoid multiply for performance>*/
samp2 = pSamples[1] << 8;
#else
samp1 = pSamples[0];
samp2 = pSamples[1];
#endif
while (numSamples--) {
/* linear interpolation */
acc0 = samp2 - samp1;
acc0 = acc0 * phaseFrac;
/*lint -e{704} <avoid divide>*/
acc0 = samp1 + (acc0 >> NUM_PHASE_FRAC_BITS);
/* save new output sample in buffer */
/*lint -e{704} <avoid divide>*/
*pOutputBuffer++ = (EAS_I16)(acc0 >> 2);
/* increment phase */
phaseFrac += phaseInc;
/*lint -e{704} <avoid divide>*/
acc0 = phaseFrac >> NUM_PHASE_FRAC_BITS;
/* next sample */
if (acc0 > 0) {
/* advance sample pointer */
pSamples += acc0;
phaseFrac = (EAS_I32)((EAS_U32)phaseFrac & PHASE_FRAC_MASK);
/* fetch new samples */
#if defined(_8_BIT_SAMPLES)
/*lint -e{701} <avoid multiply for performance>*/
samp1 = pSamples[0] << 8;
/*lint -e{701} <avoid multiply for performance>*/
samp2 = pSamples[1] << 8;
#else
samp1 = pSamples[0];
samp2 = pSamples[1];
#endif
}
}
/* save pointer and phase */
pWTVoice->phaseAccum = (EAS_U32) pSamples;
pWTVoice->phaseFrac = (EAS_U32) phaseFrac;
}
|
DoS Exec Code Overflow Mem. Corr.
| 1 |
void WT_InterpolateNoLoop (S_WT_VOICE *pWTVoice, S_WT_INT_FRAME *pWTIntFrame)
{
EAS_PCM *pOutputBuffer;
EAS_I32 phaseInc;
EAS_I32 phaseFrac;
EAS_I32 acc0;
const EAS_SAMPLE *pSamples;
EAS_I32 samp1;
EAS_I32 samp2;
EAS_I32 numSamples;
/* initialize some local variables */
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
phaseInc = pWTIntFrame->frame.phaseIncrement;
pSamples = (const EAS_SAMPLE*) pWTVoice->phaseAccum;
phaseFrac = (EAS_I32)pWTVoice->phaseFrac;
/* fetch adjacent samples */
#if defined(_8_BIT_SAMPLES)
/*lint -e{701} <avoid multiply for performance>*/
samp1 = pSamples[0] << 8;
/*lint -e{701} <avoid multiply for performance>*/
samp2 = pSamples[1] << 8;
#else
samp1 = pSamples[0];
samp2 = pSamples[1];
#endif
while (numSamples--) {
/* linear interpolation */
acc0 = samp2 - samp1;
acc0 = acc0 * phaseFrac;
/*lint -e{704} <avoid divide>*/
acc0 = samp1 + (acc0 >> NUM_PHASE_FRAC_BITS);
/* save new output sample in buffer */
/*lint -e{704} <avoid divide>*/
*pOutputBuffer++ = (EAS_I16)(acc0 >> 2);
/* increment phase */
phaseFrac += phaseInc;
/*lint -e{704} <avoid divide>*/
acc0 = phaseFrac >> NUM_PHASE_FRAC_BITS;
/* next sample */
if (acc0 > 0) {
/* advance sample pointer */
pSamples += acc0;
phaseFrac = (EAS_I32)((EAS_U32)phaseFrac & PHASE_FRAC_MASK);
/* fetch new samples */
#if defined(_8_BIT_SAMPLES)
/*lint -e{701} <avoid multiply for performance>*/
samp1 = pSamples[0] << 8;
/*lint -e{701} <avoid multiply for performance>*/
samp2 = pSamples[1] << 8;
#else
samp1 = pSamples[0];
samp2 = pSamples[1];
#endif
}
}
/* save pointer and phase */
pWTVoice->phaseAccum = (EAS_U32) pSamples;
pWTVoice->phaseFrac = (EAS_U32) phaseFrac;
}
|
@@ -33,6 +33,7 @@
*------------------------------------
*/
#include "log/log.h"
+#include <cutils/log.h>
#include "eas_types.h"
#include "eas_math.h"
@@ -92,6 +93,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pMixBuffer = pWTIntFrame->pMixBuffer;
@@ -190,6 +192,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
@@ -287,6 +290,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
@@ -379,6 +383,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pAudioBuffer = pWTIntFrame->pAudioBuffer;
@@ -446,6 +451,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pOutputBuffer = pWTIntFrame->pAudioBuffer;
@@ -593,6 +599,7 @@
numSamples = pWTIntFrame->numSamples;
if (numSamples <= 0) {
ALOGE("b/26366256");
+ android_errorWriteLog(0x534e4554, "26366256");
return;
}
pMixBuffer = pWTIntFrame->pMixBuffer;
|
CWE-119
| null |
android_errorWriteLog(0x534e4554, "26366256");
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.