classSolution { public: vector<double> sampleStats(vector<int>& count){ int n = count.size(); int total = accumulate(count.begin(), count.end(), 0); double mean = 0.0; double median = 0.0; int minnum = 256; int maxnum = 0; int mode = 0;
int left = (total + 1) / 2; int right = (total + 2) / 2; int cnt = 0; int maxfreq = 0; longlong sum = 0; for (int i = 0; i < n; i++) { sum += (longlong)count[i] * i; if (count[i] > maxfreq) { maxfreq = count[i]; mode = i; } if (count[i] > 0) { if (minnum == 256) { minnum = i; } maxnum = i; } // 这里的范围判断很灵性 if (cnt < right && cnt + count[i] >= right) { median += i; } if (cnt < left && cnt + count[i] >= left) { median += i; } cnt += count[i]; } mean = (double) sum / total; median = median / 2.0; return {(double)minnum, (double)maxnum, mean, median, (double)mode}; } };
✘ ⚡ 05/23|19:48:09 rocksdb 6.29.fb ● make sst_dump $DEBUG_LEVEL is 1 Makefile:170: Warning: Compiling in debug mode. Don't use the resulting binary in production CC tools/sst_dump.o CC tools/io_tracer_parser_tool.o CC tools/ldb_cmd.o CC tools/ldb_tool.o CC tools/sst_dump_tool.o CC utilities/blob_db/blob_dump_tool.o AR librocksdb_tools_debug.a /usr/bin/ar: creating librocksdb_tools_debug.a CCLD sst_dump
cmake编译会在 tool 目录下生成,直接 make sst_dump 则是生成在 rocksdb 根目录
⚡ 05/24|10:13:22 rocksdb 6.29.fb ./sst_dump --file=/tmp/rocksdb_tmp --command=raw options.env is 0x559da26cea00 Process /tmp/rocksdb_tmp/000013.sst Sst file format: block-based raw dump written to file /tmp/rocksdb_tmp/000013_dump.txt Process /tmp/rocksdb_tmp/000007.sst Sst file format: block-based raw dump written to file /tmp/rocksdb_tmp/000007_dump.txt Process /tmp/rocksdb_tmp/000019.sst Sst file format: block-based raw dump written to file /tmp/rocksdb_tmp/000019_dump.txt Process /tmp/rocksdb_tmp/000004.sst Sst file format: block-based raw dump written to file /tmp/rocksdb_tmp/000004_dump.txt
Table Properties: -------------------------------------- # data blocks: 1 # entries: 1 # deletions: 0 # merge operands: 0 # range deletions: 0 raw key size: 11 raw average key size: 11.000000 raw value size: 3 raw average value size: 3.000000 data block size: 30 index block size (user-key? 1, delta-value? 1): 20 filter block size: 0 # entries for filter: 0 (estimated) table size: 50 filter policy name: N/A prefix extractor name: nullptr column family ID: 0 column family name: default comparator name: leveldb.BytewiseComparator merge operator name: nullptr property collectors names: [] SST file compression algo: Snappy SST file compression options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; creation time: 1681356505 time stamp of earliest key: 0 file creation time: 0 slow compression estimated data size: 0 fast compression estimated data size: 0 DB identity: 86751c5f-624e-4582-a4aa-a7078979ab79 DB session identity: ZRB0TF2BT6GWRTCMNZD4 DB host id: YF-72166391D1 original file number: 19 unique ID: 80345056726F72BF-D677A376A5E0D45C-A2CFA81888233438
Index Details: -------------------------------------- Block key hex dump: Data block handle Block key ascii
HEX 666F6F: 0019 ASCII f o o ------
Data Block # 1 @ 0019 -------------------------------------- HEX 666F6F: 626172 ASCII f o o : b a r ------
Data Block Summary: -------------------------------------- # data blocks: 1 min data block size: 25 max data block size: 25 avg data block size: 25.000000