@@ -56,7 +56,7 @@ def clear!(local_types = Coverband::TYPES)
56
56
# sleep in between to avoid holding other redis commands..
57
57
# with a small random offset so runtime and eager types can be processed "at the same time"
58
58
def deferred_time
59
- rand ( 3 .0 ..4 .0)
59
+ rand ( 2 .0 ..3 .0)
60
60
end
61
61
62
62
def del ( local_type )
@@ -89,7 +89,7 @@ def unlock!(local_type)
89
89
# used to store data to redis. It is changed only when breaking changes to our
90
90
# redis format are required.
91
91
###
92
- REDIS_STORAGE_FORMAT_VERSION = "coverband_hash_3_3 "
92
+ REDIS_STORAGE_FORMAT_VERSION = "coverband_hash_4_0 "
93
93
94
94
JSON_PAYLOAD_EXPIRATION = 5 * 60
95
95
@@ -116,8 +116,8 @@ def initialize(redis, opts = {})
116
116
117
117
def supported?
118
118
Gem ::Version . new ( @redis . info [ "redis_version" ] ) >= Gem ::Version . new ( "2.6.0" )
119
- rescue Redis ::CannotConnectError => error
120
- Coverband . configuration . logger . info "Redis is not available (#{ error } ), Coverband not configured"
119
+ rescue Redis ::CannotConnectError => e
120
+ Coverband . configuration . logger . info "Redis is not available (#{ e } ), Coverband not configured"
121
121
Coverband . configuration . logger . info "If this is a setup task like assets:precompile feel free to ignore"
122
122
end
123
123
@@ -128,6 +128,7 @@ def clear!
128
128
file_keys = files_set
129
129
@redis . del ( *file_keys ) if file_keys . any?
130
130
@redis . del ( files_key )
131
+ @redis . del ( files_key ( type ) )
131
132
@get_coverage_cache . clear! ( type )
132
133
end
133
134
self . type = old_type
@@ -148,7 +149,7 @@ def save_report(report)
148
149
updated_time = ( type == Coverband ::EAGER_TYPE ) ? nil : report_time
149
150
keys = [ ]
150
151
report . each_slice ( @save_report_batch_size ) do |slice |
151
- files_data = slice . map { |( file , data ) |
152
+ files_data = slice . map do |( file , data ) |
152
153
relative_file = @relative_file_converter . convert ( file )
153
154
file_hash = file_hash ( relative_file )
154
155
key = key ( relative_file , file_hash : file_hash )
@@ -161,7 +162,7 @@ def save_report(report)
161
162
report_time : report_time ,
162
163
updated_time : updated_time
163
164
)
164
- }
165
+ end
165
166
next unless files_data . any?
166
167
167
168
arguments_key = [ @redis_namespace , SecureRandom . uuid ] . compact . join ( "." )
@@ -171,12 +172,24 @@ def save_report(report)
171
172
@redis . sadd ( files_key , keys ) if keys . any?
172
173
end
173
174
174
- def coverage ( local_type = nil )
175
+ # NOTE: This method should be used for full coverage or filename coverage look ups
176
+ # When paging code should use coverage_for_types and pull eager and runtime together as matched pairs
177
+ def coverage ( local_type = nil , opts = { } )
178
+ page_size = opts [ :page_size ] || 250
175
179
cached_results = @get_coverage_cache . fetch ( local_type || type ) do |sleep_time |
176
- files_set = files_set ( local_type )
177
-
178
- # use batches with a sleep in between to avoid overloading redis
179
- files_set . each_slice ( 250 ) . flat_map do |key_batch |
180
+ files_set = if opts [ :page ]
181
+ raise "call coverage_for_types with paging"
182
+ elsif opts [ :filename ]
183
+ type_key_prefix = key_prefix ( local_type )
184
+ # NOTE: a better way to extract filename from key would be better
185
+ files_set ( local_type ) . select do |cache_key |
186
+ cache_key . sub ( type_key_prefix , "" ) . match ( short_name ( opts [ :filename ] ) )
187
+ end || { }
188
+ else
189
+ files_set ( local_type )
190
+ end
191
+ # below uses batches with a sleep in between to avoid overloading redis
192
+ files_set . each_slice ( page_size ) . flat_map do |key_batch |
180
193
sleep sleep_time
181
194
@redis . pipelined do |pipeline |
182
195
key_batch . each do |key |
@@ -191,6 +204,70 @@ def coverage(local_type = nil)
191
204
end
192
205
end
193
206
207
+ def split_coverage ( types , coverage_cache , options = { } )
208
+ if types . is_a? ( Array ) && !options [ :filename ] && options [ :page ]
209
+ data = coverage_for_types ( types , options )
210
+ coverage_cache [ Coverband ::RUNTIME_TYPE ] = data [ Coverband ::RUNTIME_TYPE ]
211
+ coverage_cache [ Coverband ::EAGER_TYPE ] = data [ Coverband ::EAGER_TYPE ]
212
+ data
213
+ else
214
+ super
215
+ end
216
+ end
217
+
218
+ def coverage_for_types ( _types , opts = { } )
219
+ page_size = opts [ :page_size ] || 250
220
+ hash_data = { }
221
+
222
+ runtime_file_set = files_set ( Coverband ::RUNTIME_TYPE )
223
+ @cached_file_count = runtime_file_set . length
224
+ runtime_file_set = runtime_file_set . each_slice ( page_size ) . to_a [ opts [ :page ] - 1 ] || [ ]
225
+
226
+ hash_data [ Coverband ::RUNTIME_TYPE ] = runtime_file_set . each_slice ( page_size ) . flat_map do |key_batch |
227
+ @redis . pipelined do |pipeline |
228
+ key_batch . each do |key |
229
+ pipeline . hgetall ( key )
230
+ end
231
+ end
232
+ end
233
+
234
+ eager_key_pre = key_prefix ( Coverband ::EAGER_TYPE )
235
+ runtime_key_pre = key_prefix ( Coverband ::RUNTIME_TYPE )
236
+ matched_file_set = files_set ( Coverband ::EAGER_TYPE )
237
+ . select do |eager_key , _val |
238
+ runtime_file_set . any? do |runtime_key |
239
+ ( eager_key . sub ( eager_key_pre , "" ) == runtime_key . sub ( runtime_key_pre , "" ) )
240
+ end
241
+ end || [ ]
242
+ hash_data [ Coverband ::EAGER_TYPE ] = matched_file_set . each_slice ( page_size ) . flat_map do |key_batch |
243
+ @redis . pipelined do |pipeline |
244
+ key_batch . each do |key |
245
+ pipeline . hgetall ( key )
246
+ end
247
+ end
248
+ end
249
+ hash_data [ Coverband ::RUNTIME_TYPE ] = hash_data [ Coverband ::RUNTIME_TYPE ] . each_with_object ( { } ) do |data_from_redis , hash |
250
+ add_coverage_for_file ( data_from_redis , hash )
251
+ end
252
+ hash_data [ Coverband ::EAGER_TYPE ] = hash_data [ Coverband ::EAGER_TYPE ] . each_with_object ( { } ) do |data_from_redis , hash |
253
+ add_coverage_for_file ( data_from_redis , hash )
254
+ end
255
+ hash_data
256
+ end
257
+
258
+ def short_name ( filename )
259
+ filename . sub ( /^#{ Coverband . configuration . root } / , "." )
260
+ . gsub ( %r{^\. /} , "" )
261
+ end
262
+
263
+ def file_count ( local_type = nil )
264
+ files_set ( local_type ) . count { |filename | !Coverband . configuration . ignore . any? { |i | filename . match ( i ) } }
265
+ end
266
+
267
+ def cached_file_count
268
+ @cached_file_count ||= file_count ( Coverband ::RUNTIME_TYPE )
269
+ end
270
+
194
271
def raw_store
195
272
@redis
196
273
end
@@ -212,9 +289,13 @@ def add_coverage_for_file(data_from_redis, hash)
212
289
return unless file_hash ( file ) == data_from_redis [ FILE_HASH ]
213
290
214
291
data = coverage_data_from_redis ( data_from_redis )
215
- hash [ file ] = data_from_redis . select { |meta_data_key , _value | META_DATA_KEYS . include? ( meta_data_key ) } . merge! ( "data" => data )
216
- hash [ file ] [ LAST_UPDATED_KEY ] = ( hash [ file ] [ LAST_UPDATED_KEY ] . nil? || hash [ file ] [ LAST_UPDATED_KEY ] == "" ) ? nil : hash [ file ] [ LAST_UPDATED_KEY ] . to_i
217
- hash [ file ] . merge! ( LAST_UPDATED_KEY => hash [ file ] [ LAST_UPDATED_KEY ] , FIRST_UPDATED_KEY => hash [ file ] [ FIRST_UPDATED_KEY ] . to_i )
292
+ hash [ file ] = data_from_redis . select do |meta_data_key , _value |
293
+ META_DATA_KEYS . include? ( meta_data_key )
294
+ end . merge! ( "data" => data )
295
+ hash [ file ] [ LAST_UPDATED_KEY ] =
296
+ ( hash [ file ] [ LAST_UPDATED_KEY ] . nil? || hash [ file ] [ LAST_UPDATED_KEY ] == "" ) ? nil : hash [ file ] [ LAST_UPDATED_KEY ] . to_i
297
+ hash [ file ] . merge! ( LAST_UPDATED_KEY => hash [ file ] [ LAST_UPDATED_KEY ] ,
298
+ FIRST_UPDATED_KEY => hash [ file ] [ FIRST_UPDATED_KEY ] . to_i )
218
299
end
219
300
220
301
def coverage_data_from_redis ( data_from_redis )
@@ -226,9 +307,9 @@ def coverage_data_from_redis(data_from_redis)
226
307
end
227
308
228
309
def script_input ( key :, file :, file_hash :, data :, report_time :, updated_time :)
229
- coverage_data = data . each_with_index . each_with_object ( { } ) { |( coverage , index ) , hash |
310
+ coverage_data = data . each_with_index . each_with_object ( { } ) do |( coverage , index ) , hash |
230
311
hash [ index ] = coverage if coverage
231
- }
312
+ end
232
313
meta = {
233
314
first_updated_at : report_time ,
234
315
file : file ,
0 commit comments