diff --git a/TODOLIST.txt b/TODOLIST.txt new file mode 100644 index 0000000..30be318 --- /dev/null +++ b/TODOLIST.txt @@ -0,0 +1,42 @@ +TODOLIST +-------- + +If you want a really fast Drupal 8: + + * Drupal\Core\KeyValueStore\DatabaseStorage + Drupal\Core\KeyValueStore\DatabaseStorageExpirable + Notes: + - Both are easy to implement. + - Must be able to separate it from the sharded pool since it needs to + be reliable and consistent over time. The client/server pool + implementation from 7.x-3.x must be port too. + - The first bring the complexity of the data migration. + + * Drupal\Core\Routing + Notes: + - Quite easy one too + - I'm not sure if there is other components using it or not, case in + which this is not sure anymore this is easy. + + * Drupal\Core\Config\DatabaseStorage + Note: + - Easy one. + + * Drupal\Core\Path\AliasStorage + Note: + - Already done in 7.x-2.x version, and if the schema didn't change much + this a rather easy one too. + - If the same schema is used that the 7.x version, then there is no use + in sharding it, and should be stored along the router table replacement. + + * Drupal\Core\Session\SessionHandler + Note: + - Easy one. + +The first two will get rid of almost 30 out of the 50 remaining SQL queries +on a simple homepage with no content displayed. The third one will get rid of +5 or so remaining. + +If all of those are took care of, it will remain less than 10 SQL queries on +a standard profile home page. After that, real profiling needs to be done over +a site with contents, blocks and views all around the place, on various pages. diff --git a/redis.services.yml b/redis.services.yml index bc193dc..4818f5b 100644 --- a/redis.services.yml +++ b/redis.services.yml @@ -3,4 +3,4 @@ services: class: Drupal\redis\Cache\CacheBackendFactory arguments: ['@redis.factory', '@cache_tags.invalidator.checksum'] redis.factory: - class: Drupal\redis\ClientFactory + class: Drupal\redis\ClientFactory \ No newline at end of file diff --git a/src/Cache/CacheBase.php b/src/Cache/CacheBase.php index a449ff8..ddf5bc8 100644 --- a/src/Cache/CacheBase.php +++ b/src/Cache/CacheBase.php @@ -21,7 +21,9 @@ */ abstract class CacheBase implements CacheBackendInterface { - use RedisPrefixTrait; + use RedisPrefixTrait { + getKey as getParentKey; + } /** * Temporary cache items lifetime is infinite. @@ -34,17 +36,6 @@ abstract class CacheBase implements CacheBackendInterface { */ const LIFETIME_PERM_DEFAULT = 31536000; - /** - * Computed keys are let's say arround 60 characters length due to - * key prefixing, which makes 1,000 keys DEL command to be something - * arround 50,000 bytes length: this is huge and may not pass into - * Redis, let's split this off. - * Some recommend to never get higher than 1,500 bytes within the same - * command which makes us forced to split this at a very low threshold: - * 20 seems a safe value here (1,280 average length). - */ - const KEY_THRESHOLD = 20; - /** * Latest delete all flush KEY name. */ @@ -132,18 +123,6 @@ public function invalidate($cid) { $this->invalidateMultiple([$cid]); } - /** - * Return the key for the given cache key. - */ - public function getKey($cid = NULL) { - if (NULL === $cid) { - return $this->getPrefix() . ':' . $this->bin; - } - else { - return $this->getPrefix() . ':' . $this->bin . ':' . $cid; - } - } - /** * Calculate the correct expiration time. * @@ -158,7 +137,7 @@ protected function getExpiration($expire) { if ($expire == Cache::PERMANENT || $expire > $this->permTtl) { return $this->permTtl; } - return $expire - REQUEST_TIME; + return $expire - time(); } /** @@ -205,4 +184,15 @@ public function setPermTtl($ttl = NULL) { } } + /** + * {@inheritdoc} + */ + public function getKey($parts) { + if (is_string($parts)) { + $parts = [$parts]; + } + array_unshift($parts, $this->bin); + return $this->getParentKey($parts); + } + } diff --git a/src/Cache/PhpRedis.php b/src/Cache/PhpRedis.php index 11e6aa7..1d5df32 100644 --- a/src/Cache/PhpRedis.php +++ b/src/Cache/PhpRedis.php @@ -104,7 +104,7 @@ public function set($cid, $data, $expire = Cache::PERMANENT, array $tags = array // Build the cache item and save it as a hash array. $entry = $this->createEntryHash($cid, $data, $expire, $tags); - $pipe = $this->client->multi(\REdis::PIPELINE); + $pipe = $this->client->multi(\Redis::PIPELINE); $pipe->hMset($key, $entry); $pipe->expire($key, $ttl); $pipe->exec(); @@ -127,6 +127,7 @@ public function deleteAll() { // was written in the same millisecond. // @todo This is needed to make the tests pass, is this safe enough for real // usage? + // @todo (pounard) Using the getNextIncrement() will make it safe. usleep(1000); $this->lastDeleteAll = round(microtime(TRUE), 3); $this->client->set($this->getKey(static::LAST_DELETE_ALL_KEY), $this->lastDeleteAll); @@ -240,7 +241,7 @@ protected function expandEntry(array $values, $allow_invalid) { // Check expire time, allow to have a cache invalidated explicitly, don't // check if already invalid. if ($cache->valid) { - $cache->valid = $cache->expire == Cache::PERMANENT || $cache->expire >= REQUEST_TIME; + $cache->valid = $cache->expire == Cache::PERMANENT || $cache->expire >= time(); // Check if invalidateTags() has been called with any of the items's tags. if ($cache->valid && !$this->checksumProvider->isValid($cache->checksum, $cache->tags)) { diff --git a/src/Cache/RedisCacheTagsChecksum.php b/src/Cache/RedisCacheTagsChecksum.php index db1d224..585e478 100644 --- a/src/Cache/RedisCacheTagsChecksum.php +++ b/src/Cache/RedisCacheTagsChecksum.php @@ -24,7 +24,7 @@ class RedisCacheTagsChecksum implements CacheTagsChecksumInterface, CacheTagsInv * * @var array */ - protected $tagCache = array(); + protected $tagCache = []; /** * A list of tags that have already been invalidated in this request. @@ -33,7 +33,7 @@ class RedisCacheTagsChecksum implements CacheTagsChecksumInterface, CacheTagsInv * * @var array */ - protected $invalidatedTags = array(); + protected $invalidatedTags = []; /** * @var \Redis @@ -51,22 +51,21 @@ function __construct(ClientFactory $factory) { * {@inheritdoc} */ public function invalidateTags(array $tags) { - $keys_to_increment = []; foreach ($tags as $tag) { - // Only invalidate tags once per request unless they are written again. if (isset($this->invalidatedTags[$tag])) { + // Only invalidate tags once per request unless they are written again. continue; } + + $tagKey = $this->getKey(['tag', $tag]); + $current = $this->client->get($tagKey); + + $current = $this->getNextIncrement($current); + $this->client->set($tagKey, $current); + + // Rightly populate the tag cache with the new values. $this->invalidatedTags[$tag] = TRUE; - unset($this->tagCache[$tag]); - $keys_to_increment[] = $this->getTagKey($tag); - } - if ($keys_to_increment) { - $multi = $this->client->multi(\Redis::PIPELINE); - foreach ($keys_to_increment as $key) { - $multi->incr($key); - } - $multi->exec(); + $this->tagCache[$tag] = $current; } } @@ -88,7 +87,7 @@ public function getCurrentChecksum(array $tags) { * {@inheritdoc} */ public function isValid($checksum, array $tags) { - return $checksum == $this->calculateChecksum($tags); + return $this->calculateChecksum($tags) <= $checksum; } /** @@ -97,16 +96,35 @@ public function isValid($checksum, array $tags) { public function calculateChecksum(array $tags) { $checksum = 0; - $fetch = array_values(array_diff($tags, array_keys($this->tagCache))); - if ($fetch) { - $keys = array_map(array($this, 'getTagKey'), $fetch); - foreach ($this->client->mget($keys) as $index => $invalidations) { - $this->tagCache[$fetch[$index]] = $invalidations ?: 0; + foreach ($tags as $tag) { + + if (isset($this->tagCache[$tag])) { + $current = $this->tagCache[$tag]; + } + else { + $tagKey = $this->getKey(['tag', $tag]); + $current = $this->client->get($tagKey); + + if (!$current) { + // Tag has never been created yet, so ensure it has an entry in Redis + // database. When dealing in a sharded environment, the tag checksum + // itself might have been dropped silently, case in which giving back + // a 0 value can cause invalided cache entries to be considered as + // valid back. + // Note that doing that, in case a tag key was dropped by the holding + // Redis server, all items based upon the droppped tag will then become + // invalid, but that's the definitive price of trying to being + // consistent in all cases. + $current = $this->getNextIncrement(); + $this->client->set($tagKey, $current); + } + + $this->tagCache[$tag] = $current; } - } - foreach ($tags as $tag) { - $checksum += $this->tagCache[$tag]; + if ($checksum < $current) { + $checksum = $current; + } } return $checksum; @@ -116,21 +134,8 @@ public function calculateChecksum(array $tags) { * {@inheritdoc} */ public function reset() { - $this->tagCache = array(); - $this->invalidatedTags = array(); - } - - /** - * Return the key for the given cache tag. - * - * @param string $tag - * The cache tag. - * - * @return string - * The prefixed cache tag. - */ - protected function getTagKey($tag) { - return $this->getPrefix() . ':cachetags:' . $tag; + $this->tagCache = []; + $this->invalidatedTags = []; } } diff --git a/src/Cache/ShardedPhpRedis.php b/src/Cache/ShardedPhpRedis.php new file mode 100644 index 0000000..d5e7cd5 --- /dev/null +++ b/src/Cache/ShardedPhpRedis.php @@ -0,0 +1,326 @@ +client = $client; + $this->checksumProvider = $checksum_provider; + } + + /** + * Set the last flush timestamp + * + * @param boolean $overwrite + * If set the method won't try to load the existing value before + * + * @return string + */ + protected function setLastFlushTime($overwrite = false) { + + $key = $this->getKey('_flush'); + $time = time(); + + $flushTime = $this->client->get($key); + + if ($flushTime && $time === (int)$flushTime) { + $flushTime = $this->getNextIncrement($flushTime); + } else { + $flushTime = $this->getNextIncrement($time); + } + + $this->client->set($key, $flushTime); + + return $flushTime; + } + + /** + * Get the last flush timestamp + * + * @return string + */ + protected function getLastFlushTime() { + + $flushTime = $this->client->get($this->getKey('_flush')); + + if (!$flushTime) { + // In case there is no last flush data consider that the cache backend + // is actually pending an inconsistent state, the 'flush' key might + // disappear anytime a server is replaced or manually flushed. Please + // note that the initial flush timestamp is set when an entry is set + // too. + $flushTime = $this->setLastFlushTime(); + } + + return $flushTime; + } + + /** + * {@inheritdoc} + */ + public function get($cid, $allow_invalid = FALSE) { + + $entryKey = $this->getKey($cid); + $item = $this->client->hGetAll($entryKey); + $time = time(); + + if (!$item) { + return FALSE; + } + + $item = (object)$item; + // @todo Sometimes tags are inserted as an " " string case in which we end + // up with explode'ing it and get as a result [""] which breaks items + // validity at tags check. Explore this and find why. + $item->tags = array_filter(explode(',', $item->tags)); + $item->valid = (bool)$item->valid; + $item->expire = (int)$item->expire; + $item->ttl = (int)$item->ttl; + + if (!$item->valid && $item->ttl === self::INVALID_TTL ) { + // @todo This is ugly but we are int the case where an already expired + // entry was set previously, this means that we are probably in the unit + // tests and we should not delete this entry to make core tests happy. + if (!$allow_invalid) { + if ($item->created < $time - $item->ttl) { + // Force delete 10 mintes after the invalidation to keep some + // cleanup level for this ugly hack. + $this->client->del($entryKey); + } + return FALSE; + } + } else if ($item->valid && !$allow_invalid) { + + if (Cache::PERMANENT !== $item->expire && $item->expire < $time) { + $this->client->del($entryKey); + return FALSE; + } + + $lastFlush = $this->getLastFlushTime(); + if ($item->created < $lastFlush) { + $this->client->del($entryKey); + return FALSE; + } + + if (!$this->checksumProvider->isValid($item->checksum, $item->tags)) { + $this->client->del($entryKey); + return FALSE; + } + } + + $item->data = unserialize($item->data); + $item->created = (int)$item->created; + + return $item; + } + + /** + * {@inheritdoc} + */ + public function getMultiple(&$cids, $allow_invalid = FALSE) { + $ret = []; + + // @todo Unperformant, but in a sharded environement we + // cannot proceed another way, still there are some paths + // to explore + foreach ($cids as $index => $cid) { + $item = $this->get($cid, $allow_invalid); + if ($item) { + $ret[$cid] = $item; + unset($cids[$index]); + } + } + + return $ret; + } + + /** + * {@inheritdoc} + */ + public function set($cid, $data, $expire = Cache::PERMANENT, array $tags = array()) { + + Cache::validateTags($tags); + + $time = time(); + $created = null; + $entryKey = $this->getKey($cid); + $lastFlush = $this->getLastFlushTime(); + + if ($time === (int)$lastFlush) { + // Latest flush happened the exact same second. + $created = $lastFlush; + } else { + $created = $this->getNextIncrement($time); + } + + $valid = true; + $maxTtl = $this->getPermTtl(); + + if (Cache::PERMANENT !== $expire) { + + if ($expire <= $time) { + // And existing entry if any is stalled + // $this->client->del($entryKey); + // return; + // @todo This might happen during tests to check that invalid entries + // can be fetched, I do not like this. This invalid features mostly + // serves some edge caching cases, let's set a very small cache life + // time. 10 minutes is enought. See ::invalidate() method comment. + $valid = false; + $ttl = self::INVALID_TTL; + } else { + $ttl = $expire - $time; + } + + if ($maxTtl < $ttl) { + $ttl = $maxTtl; + } + // This feature might be deactivated by the site admin. + } else if ($maxTtl !== self::LIFETIME_INFINITE) { + $ttl = $maxTtl; + } else { + $ttl = $expire; + } + + //getExpiration + // 0 for tag means it never has been deleted + $checksum = $this->checksumProvider->getCurrentChecksum($tags); + + $this->client->hMset($entryKey, [ + 'cid' => $cid, + 'created' => $created, + 'checksum' => $checksum, + 'expire' => $expire, + 'ttl' => $ttl, + 'data' => serialize($data), + 'tags' => implode(',', $tags), + 'valid' => (int)$valid, + ]); + + if ($expire !== Cache::PERMANENT) { + $this->client->expire($entryKey, $ttl); + } + } + + /** + * {@inheritdoc} + */ + public function setMultiple(array $items) { + foreach ($items as $cid => $item) { + $item += [ + 'data' => null, + 'expire' => Cache::PERMANENT, + 'tags' => [], + ]; + $this->set($cid, $item['data'], $item['expire'], $item['tags']); + } + } + + /** + * {@inheritdoc} + */ + public function delete($cid) { + $this->client->del($this->getKey($cid)); + } + + /** + * {@inheritdoc} + */ + public function deleteMultiple(array $cids) { + foreach ($cids as $cid) { + $this->client->del($this->getKey($cid)); + } + } + + /** + * {@inheritdoc} + */ + public function deleteAll() { + $this->setLastFlushTime(); + } + + /** + * {@inheritdoc} + */ + public function invalidate($cid) { + $entryKey = $this->getKey($cid); + if ($this->client->hGet($entryKey, 'valid')) { + // @todo Note that the original algorithm was to delete the entry at + // this point instead of just invalidate it, but the bigger core unit + // test method actually goes down that path, so as a temporary solution + // we are just invalidating it this way. + $this->client->hMset($entryKey, [ + 'valid' => 0, + 'ttl' => self::INVALID_TTL, + ]); + } + } + + /** + * {@inheritdoc} + */ + public function invalidateMultiple(array $cids) { + foreach ($cids as $cid) { + $this->invalidate($cid); + } + } + + /** + * {@inheritdoc} + */ + public function invalidateAll() { + $this->setLastFlushTime(); + } + + /** + * {@inheritdoc} + */ + public function garbageCollection() { + // No need for garbage collection, Redis will do it for us based upon + // the entries TTL. Also, knowing that in a sharded environment we cannot + // predict where entries are going to be stored, especially when doing + // proxy assisted sharding, we can't really do anything in here. + } + + /** + * {@inheritdoc} + */ + public function removeBin() { + $this->deleteAll(); + } + +} diff --git a/src/Client/ShardedPhpRedis.php b/src/Client/ShardedPhpRedis.php new file mode 100644 index 0000000..4fb2265 --- /dev/null +++ b/src/Client/ShardedPhpRedis.php @@ -0,0 +1,24 @@ +prefix; } + /** + * From the given timestamp build an incremental safe time-based identifier. + * + * Due to potential accidental cache wipes, when a server goes down in the + * cluster or when a server triggers its LRU algorithm wipe-out, keys that + * matches flush or tags checksum might be dropped. + * + * Per default, each new inserted tag will trigger a checksum computation to + * be stored in the Redis server as a timestamp. In order to ensure a checksum + * validity a simple comparison between the tag checksum and the cache entry + * checksum will tell us if the entry pre-dates the current checksum or not, + * thus telling us its state. The main problem we experience is that Redis + * is being so fast it is able to create and drop entries at same second, + * sometime even the same micro second. The only safe way to avoid conflicts + * is to checksum using an arbitrary computed number (a sequence). + * + * Drupal core does exactly this thus tags checksums are additions of each tag + * individual checksum; each tag checksum is a independent arbitrary serial + * that gets incremented starting with 0 (no invalidation done yet) to n (n + * invalidations) which grows over time. This way the checksum computation + * always rises and we have a sensible default that works in all cases. + * + * This model works as long as you can ensure consistency for the serial + * storage over time. Nevertheless, as explained upper, in our case this + * serial might be dropped at some point for various valid technical reasons: + * if we start over to 0, we may accidentally compute a checksum which already + * existed in the past and make invalid entries turn back to valid again. + * + * In order to prevent this behavior, using a timestamp as part of the serial + * ensures that we won't experience this problem in a time range wider than a + * single second, which is safe enough for us. But using timestamp creates a + * new problem: Redis is so fast that we can set or delete hundreds of entries + * easily during the same second: an entry created then invalidated the same + * second will create false positives (entry is being considered as valid) - + * note that depending on the check algorithm, false negative may also happen + * the same way. Therefore we need to have an abitrary serial value to be + * incremented in order to enforce our checks to be more strict. + * + * The solution to both the first (the need for a time based checksum in case + * of checksum data being dropped) and the second (the need to have an + * arbitrary predictible serial value to avoid false positives or negatives) + * we are combining the two: every checksum will be built this way: + * + * UNIXTIMESTAMP.SERIAL + * + * For example: + * + * 1429789217.017 + * + * will reprensent the 17th invalidation of the 1429789217 exact second which + * happened while writing this documentation. The next tag being invalidated + * the same second will then have this checksum: + * + * 1429789217.018 + * + * And so on... + * + * In order to make it consitent with PHP string and float comparison we need + * to set fixed precision over the decimal, and store as a string to avoid + * possible float precision problems when comparing. + * + * This algorithm is not fully failsafe, but allows us to proceed to 1000 + * operations on the same checksum during the same second, which is a + * sufficiently great value to reduce the conflict probability to almost + * zero for most uses cases. + * + * @param int|string $timestamp + * "TIMESTAMP[.INCREMENT]" string + * + * @return string + * The next "TIMESTAMP.INCREMENT" string. + */ + public function getNextIncrement($timestamp = null) { + + if (!$timestamp) { + return time() . '.000'; + } + + if (FALSE !== ($pos = strpos($timestamp, '.'))) { + $inc = substr($timestamp, $pos + 1, 3); + + return ((int)$timestamp) . '.' . str_pad($inc + 1, 3, '0', STR_PAD_LEFT); + } + + return $timestamp . '.000'; + } + + /** + * Get prefixed key + * + * @param string[] $parts + * Arbitrary number of strings to compose the key + * + * @return string + */ + public function getKey($parts = []) { + if (!is_array($parts)) { + $parts = [$parts]; + } + array_unshift($parts, $this->getPrefix()); + return implode(':', $parts); + } } diff --git a/src/Tests/AbstractRedisCacheFixesUnitTestCase.php b/src/Tests/AbstractRedisCacheFixesUnitTestCase.php index 630d229..91d2074 100644 --- a/src/Tests/AbstractRedisCacheFixesUnitTestCase.php +++ b/src/Tests/AbstractRedisCacheFixesUnitTestCase.php @@ -35,7 +35,7 @@ public function testTemporaryCacheExpire() { $this->assertIdentical('bar', $data->data); // Expiring entry with negative lifetime. - $backend->set('test3', 'baz', REQUEST_TIME - 100); + $backend->set('test3', 'baz', time() - 100); $data = $backend->get('test3'); $this->assertEqual(false, $data); } diff --git a/src/Tests/Cache/PhpRedisUnitTest.php b/src/Tests/Cache/PhpRedisUnitTest.php index 85ded8a..600fa83 100644 --- a/src/Tests/Cache/PhpRedisUnitTest.php +++ b/src/Tests/Cache/PhpRedisUnitTest.php @@ -37,7 +37,6 @@ public function containerBuild(ContainerBuilder $container) { } } - /** * Creates a new instance of PhpRedis cache backend. * @@ -45,7 +44,11 @@ public function containerBuild(ContainerBuilder $container) { * A new PhpRedis cache backend. */ protected function createCacheBackend($bin) { - $cache = \Drupal::service('cache.backend.redis')->get($bin); + $cache = new PhpRedis( + $bin, + \Drupal::service('redis.factory')->getClient(), + \Drupal::service('cache_tags.invalidator.checksum') + ); $cache->setMinTtl(10); return $cache; } diff --git a/src/Tests/Cache/ShardedPhpRedisUnitTest.php b/src/Tests/Cache/ShardedPhpRedisUnitTest.php new file mode 100644 index 0000000..665f8d1 --- /dev/null +++ b/src/Tests/Cache/ShardedPhpRedisUnitTest.php @@ -0,0 +1,150 @@ +has('redis.factory')) { + $container->register('cache_tags.invalidator.checksum', 'Drupal\redis\Cache\RedisCacheTagsChecksum') + ->addArgument(new Reference('redis.factory')) + ->addTag('cache_tags_invalidator'); + } + } + + /** + * Creates a new instance of PhpRedis cache backend. + * + * @return \Drupal\redis\Cache\PhpRedis + * A new PhpRedis cache backend. + */ + protected function createCacheBackend($bin) { + $cache = new ShardedPhpRedis( + $bin, + \Drupal::service('redis.factory')->getClient(), + \Drupal::service('cache_tags.invalidator.checksum') + ); + $cache->setMinTtl(10); + return $cache; + } + + /** + * Tests Drupal\Core\Cache\CacheBackendInterface::invalidateTags(). + */ + function testInvalidateTags() { + $backend = $this->getCacheBackend(); + + // Create two cache entries with the same tag and tag value. + $backend->set('test_cid_invalidate1', $this->defaultValue, Cache::PERMANENT, array('test_tag:2')); + $backend->set('test_cid_invalidate2', $this->defaultValue, Cache::PERMANENT, array('test_tag:2')); + $this->assertTrue($backend->get('test_cid_invalidate1') && $backend->get('test_cid_invalidate2'), 'Two cache items were created.'); + + // Invalidate test_tag of value 1. This should invalidate both entries. + Cache::invalidateTags(array('test_tag:2')); + $this->assertFalse($backend->get('test_cid_invalidate1') || $backend->get('test_cid_invalidate2'), 'Two cache items invalidated after invalidating a cache tag.'); + + // Create two cache entries with the same tag and an array tag value. + $backend->set('test_cid_invalidate1', $this->defaultValue, Cache::PERMANENT, array('test_tag:1')); + $backend->set('test_cid_invalidate2', $this->defaultValue, Cache::PERMANENT, array('test_tag:1')); + $this->assertTrue($backend->get('test_cid_invalidate1') && $backend->get('test_cid_invalidate2'), 'Two cache items were created.'); + + // Invalidate test_tag of value 1. This should invalidate both entries. + Cache::invalidateTags(array('test_tag:1')); + $this->assertFalse($backend->get('test_cid_invalidate1') || $backend->get('test_cid_invalidate2'), 'Two caches removed after invalidating a cache tag.'); + + // Create three cache entries with a mix of tags and tag values. + $backend->set('test_cid_invalidate1', $this->defaultValue, Cache::PERMANENT, array('test_tag:1')); + $backend->set('test_cid_invalidate2', $this->defaultValue, Cache::PERMANENT, array('test_tag:2')); + $backend->set('test_cid_invalidate3', $this->defaultValue, Cache::PERMANENT, array('test_tag_foo:3')); + $this->assertTrue($backend->get('test_cid_invalidate1') && $backend->get('test_cid_invalidate2') && $backend->get('test_cid_invalidate3'), 'Three cached items were created.'); + Cache::invalidateTags(array('test_tag_foo:3')); + $this->assertTrue($backend->get('test_cid_invalidate1') && $backend->get('test_cid_invalidate2'), 'Cache items not matching the tag were not invalidated.'); + $this->assertFalse($backend->get('test_cid_invalidated3'), 'Cached item matching the tag was removed.'); + + // Create cache entry in multiple bins. Two cache entries + // (test_cid_invalidate1 and test_cid_invalidate2) still exist from previous + // tests. + $tags = array('test_tag:1', 'test_tag:2', 'test_tag:3'); + $bins = array('path', 'bootstrap', 'page'); + foreach ($bins as $bin) { + $this->getCacheBackend($bin)->set('test', $this->defaultValue, Cache::PERMANENT, $tags); + $this->assertTrue($this->getCacheBackend($bin)->get('test'), 'Cache item was set in bin.'); + } + + Cache::invalidateTags(array('test_tag:2')); + + // Test that the cache entry has been invalidated in multiple bins. + foreach ($bins as $bin) { + $this->assertFalse($this->getCacheBackend($bin)->get('test'), 'Tag invalidation affected item in bin.'); + } + // Test that the cache entry with a matching tag has been invalidated. + $this->assertFalse($this->getCacheBackend($bin)->get('test_cid_invalidate2'), 'Cache items matching tag were invalidated.'); + // Test that the cache entry with without a matching tag still exists. + $this->assertTrue($this->getCacheBackend($bin)->get('test_cid_invalidate1'), 'Cache items not matching tag were not invalidated.'); + } + + /** + * Test Drupal\Core\Cache\CacheBackendInterface::invalidateAll(). + */ + public function testInvalidateAll() { + $backend_a = $this->getCacheBackend(); + $backend_b = $this->getCacheBackend('bootstrap'); + + // Set both expiring and permanent keys. + $backend_a->set('test1', 1, Cache::PERMANENT); + $backend_a->set('test2', 3, time() + 1000); + $backend_b->set('test3', 4, Cache::PERMANENT); + + $backend_a->invalidateAll(); + + $this->assertFalse($backend_a->get('test1'), 'First key has been invalidated.'); + $this->assertFalse($backend_a->get('test2'), 'Second key has been invalidated.'); + $this->assertTrue($backend_b->get('test3'), 'Item in other bin is preserved.'); + } + + /** + * Tests Drupal\Core\Cache\CacheBackendInterface::removeBin(). + */ + public function testRemoveBin() { + $backend_a = $this->getCacheBackend(); + $backend_b = $this->getCacheBackend('bootstrap'); + + // Set both expiring and permanent keys. + $backend_a->set('test1', 1, Cache::PERMANENT); + $backend_a->set('test2', 3, time() + 1000); + $backend_b->set('test3', 4, Cache::PERMANENT); + + $backend_a->removeBin(); + + $this->assertFalse($backend_a->get('test1'), 'First key has been deleted.'); + $this->assertFalse($backend_a->get('test2'), 'Second key has been deleted.'); + $this->assertTrue($backend_b->get('test3'), 'Item in other bin is preserved.'); + } + +}