@@ -859,3 +859,96 @@ func TestQueryFrontendQueryRejection(t *testing.T) {
859859 require .Contains (t , string (body ), tripperware .QueryRejectErrorMessage )
860860
861861}
862+
863+ func TestQueryFrontendStatsFromResultsCacheShouldBeSame (t * testing.T ) {
864+
865+ s , err := e2e .NewScenario (networkName )
866+ require .NoError (t , err )
867+ defer s .Close ()
868+
869+ memcached := e2ecache .NewMemcached ()
870+ consul := e2edb .NewConsul ()
871+ require .NoError (t , s .StartAndWaitReady (consul , memcached ))
872+
873+ flags := mergeFlags (BlocksStorageFlags (), map [string ]string {
874+ "-querier.cache-results" : "true" ,
875+ "-querier.split-queries-by-interval" : "24h" ,
876+ "-querier.query-ingesters-within" : "12h" , // Required by the test on query /series out of ingesters time range
877+ "-querier.per-step-stats-enabled" : strconv .FormatBool (true ),
878+ "-frontend.memcached.addresses" : "dns+" + memcached .NetworkEndpoint (e2ecache .MemcachedPort ),
879+ "-frontend.query-stats-enabled" : strconv .FormatBool (true ),
880+ "-frontend.cache-queryable-samples-stats" : strconv .FormatBool (true ),
881+ })
882+
883+ minio := e2edb .NewMinio (9000 , flags ["-blocks-storage.s3.bucket-name" ])
884+ require .NoError (t , s .StartAndWaitReady (minio ))
885+
886+ // Start the query-scheduler
887+ queryScheduler := e2ecortex .NewQueryScheduler ("query-scheduler" , flags , "" )
888+ require .NoError (t , s .StartAndWaitReady (queryScheduler ))
889+ flags ["-frontend.scheduler-address" ] = queryScheduler .NetworkGRPCEndpoint ()
890+ flags ["-querier.scheduler-address" ] = queryScheduler .NetworkGRPCEndpoint ()
891+
892+ // Start the query-frontend.
893+ queryFrontend := e2ecortex .NewQueryFrontendWithConfigFile ("query-frontend" , "" , flags , "" )
894+ require .NoError (t , s .Start (queryFrontend ))
895+
896+ // Start all other services.
897+ ingester := e2ecortex .NewIngesterWithConfigFile ("ingester" , e2ecortex .RingStoreConsul , consul .NetworkHTTPEndpoint (), "" , flags , "" )
898+ distributor := e2ecortex .NewDistributorWithConfigFile ("distributor" , e2ecortex .RingStoreConsul , consul .NetworkHTTPEndpoint (), "" , flags , "" )
899+
900+ querier := e2ecortex .NewQuerierWithConfigFile ("querier" , e2ecortex .RingStoreConsul , consul .NetworkHTTPEndpoint (), "" , flags , "" )
901+
902+ require .NoError (t , s .StartAndWaitReady (querier , ingester , distributor ))
903+ require .NoError (t , s .WaitReady (queryFrontend ))
904+
905+ // Check if we're discovering memcache or not.
906+ require .NoError (t , queryFrontend .WaitSumMetrics (e2e .Equals (1 ), "cortex_memcache_client_servers" ))
907+ require .NoError (t , queryFrontend .WaitSumMetrics (e2e .Greater (0 ), "cortex_dns_lookups_total" ))
908+
909+ // Wait until both the distributor and querier have updated the ring.
910+ require .NoError (t , distributor .WaitSumMetrics (e2e .Equals (512 ), "cortex_ring_tokens_total" ))
911+ require .NoError (t , querier .WaitSumMetrics (e2e .Equals (512 ), "cortex_ring_tokens_total" ))
912+
913+ // Push some series to Cortex.
914+ c , err := e2ecortex .NewClient (distributor .HTTPEndpoint (), "" , "" , "" , "user-1" )
915+ require .NoError (t , err )
916+
917+ seriesTimestamp := time .Now ().Add (- 10 * time .Minute )
918+ series2Timestamp := seriesTimestamp .Add (1 * time .Minute )
919+ series1 , _ := generateSeries ("series_1" , seriesTimestamp , prompb.Label {Name : "job" , Value : "test" })
920+ series2 , _ := generateSeries ("series_2" , series2Timestamp , prompb.Label {Name : "job" , Value : "test" })
921+
922+ res , err := c .Push (series1 )
923+ require .NoError (t , err )
924+ require .Equal (t , 200 , res .StatusCode )
925+
926+ res , err = c .Push (series2 )
927+ require .NoError (t , err )
928+ require .Equal (t , 200 , res .StatusCode )
929+
930+ // Query back the series.
931+ c , err = e2ecortex .NewClient ("" , queryFrontend .HTTPEndpoint (), "" , "" , "user-1" )
932+ require .NoError (t , err )
933+
934+ // First request that will hit the datasource.
935+ resp , _ , err := c .QueryRangeRaw (`{job="test"}` , seriesTimestamp .Add (- 1 * time .Minute ), series2Timestamp .Add (1 * time .Minute ), 30 * time .Second , map [string ]string {})
936+ require .NoError (t , err )
937+ require .Equal (t , http .StatusOK , resp .StatusCode )
938+
939+ values , err := queryFrontend .SumMetrics ([]string {"cortex_query_samples_scanned_total" })
940+ require .NoError (t , err )
941+ numSamplesScannedTotal := e2e .SumValues (values )
942+
943+ // We send the same query to hit the results cache.
944+ resp , _ , err = c .QueryRangeRaw (`{job="test"}` , seriesTimestamp .Add (- 1 * time .Minute ), series2Timestamp .Add (1 * time .Minute ), 30 * time .Second , map [string ]string {})
945+ require .NoError (t , err )
946+ require .Equal (t , http .StatusOK , resp .StatusCode )
947+
948+ values , err = queryFrontend .SumMetrics ([]string {"cortex_query_samples_scanned_total" })
949+ require .NoError (t , err )
950+ numSamplesScannedTotal2 := e2e .SumValues (values )
951+
952+ // we expect same amount of samples_scanned added to the metric despite the second query hit the cache.
953+ require .Equal (t , numSamplesScannedTotal2 , numSamplesScannedTotal * 2 )
954+ }
0 commit comments