From 9aba01ccba4f9cebd873e46407706aa9fa75880d Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 16:22:41 +0100 Subject: [PATCH 01/28] adding slow queries from P_S --- mysql.py | 124 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/mysql.py b/mysql.py index c5182a2..127368b 100644 --- a/mysql.py +++ b/mysql.py @@ -447,6 +447,125 @@ def fetch_innodb_stats(conn): return stats +# Check if PENFORMANCE_SCHEMA is enabled +def is_ps_enabled(conn): + result = mysql_query(conn, 'SHOW GLOBAL VARIABLES LIKE "performance_schema"') + row = result.fetchOne() + if row['performance_schema'] == 'ON': + return True + else + return False + +def clean_string(digest): + clean_digest=digest + clean_digest=clean_digest.replace('`', '') + clean_digest=clean_digest.replace('?', '') + clean_digest=clean_digest.replace(' ', '_') + clean_digest=clean_digest.replace('(', '_') + clean_digest=clean_digest.replace(')', '_') + clean_digest=clean_digest.replace('.', '-') + return clean_digest + +MYSQL_MAX_SLOW_QUERIES=20 +# http://www.markleith.co.uk/2011/04/18/monitoring-table-and-index-io-with-performance_schema/ +# http://www.markleith.co.uk/2012/07/04/mysql-performance-schema-statement-digests/ +# +# 1) A high level overview of the statements like Query Analysis, sorted by those queries with the highest latency + +# 2) List all normalized statements that use temporary tables ordered by number of on disk temporary tables descending first, then by the number of memory tables. +# 3) List all normalized statements that have done sorts, ordered by sort_merge_passes, sort_scans and sort_rows, all descending. +# 4) List all normalized statements that use have done a full table scan ordered by the percentage of times a full scan was done, then by the number of times the statement executed +# 5) List all normalized statements that have raised errors or warnings. + +# New + +# Connections per account (user/host) +# 1) select * from accounts limit 100; + +# Connections per host +# 1) select * from hosts limit 100; + +# Connections per user +# 1) select * from users limit 100; + +# Operations (rows read / rows changed) per table +# SELECT pst.object_schema AS table_schema, +# pst.object_name AS table_name, +# pst.count_read AS rows_read, +# pst.count_write AS rows_changed, +# (pst.count_write * COUNT(psi.index_name)) AS rows_changed_x_indexes +# FROM performance_schema.table_io_waits_summary_by_table AS pst +# LEFT JOIN performance_schema.table_io_waits_summary_by_index_usage AS psi +# ON pst.object_schema = psi.object_schema AND pst.object_name = psi.object_name +# AND psi.index_name IS NOT NULL +# GROUP BY pst.object_schema, pst.object_name +# ORDER BY pst.sum_timer_wait DESC; + +# reads per index +# SELECT object_schema AS table_schema, object_name AS table_name, index_name, count_read AS rows_read from performance_schema.table_io_waits_summary_by_index_usage WHERE index_name IS NOT NULL ORDER BY sum_timer_wait DESC; + +# Indexes not being used +#SELECT object_schema, +#object_name, +#index_name +#FROM performance_schema.table_io_waits_summary_by_index_usage +#WHERE index_name IS NOT NULL +#AND count_star = 0 +#ORDER BY object_schema, object_name; + +# Queries that have raised errors/warnings +#SELECT IF(LENGTH(DIGEST_TEXT) > 64, CONCAT(LEFT(DIGEST_TEXT, 30), ' ... ', RIGHT(DIGEST_TEXT, 30)), DIGEST_TEXT) AS query, +#COUNT_STAR AS exec_count, +#SUM_ERRORS AS errors, +#(SUM_ERRORS / COUNT_STAR) * 100 as error_pct, +#SUM_WARNINGS AS warnings, +#(SUM_WARNINGS / COUNT_STAR) * 100 as warning_pct, +#DIGEST AS digest +#FROM performance_schema.events_statements_summary_by_digest +#WHERE SUM_ERRORS > 0 +#OR SUM_WARNINGS > 0 +#ORDER BY SUM_ERRORS DESC, SUM_WARNINGS DESC; + +def fetch_slow_queries(conn): + slow_queries = {} + try: + # Get the slow queries + result = mysql_query(conn, """ + SELECT IF(LENGTH(DIGEST_TEXT) > 64, CONCAT(LEFT(DIGEST_TEXT, 30), ' ... ', RIGHT(DIGEST_TEXT, 30)), DIGEST_TEXT) AS query, + IF(SUM_NO_GOOD_INDEX_USED > 0 OR SUM_NO_INDEX_USED > 0, '*', '') AS full_scan, + COUNT_STAR AS exec_count, + SUM_ERRORS AS err_count, + SUM_WARNINGS AS warn_count, + SEC_TO_TIME(SUM_TIMER_WAIT/1000000000000) AS exec_time_total, + SEC_TO_TIME(MAX_TIMER_WAIT/1000000000000) AS exec_time_max, + (AVG_TIMER_WAIT/1000000000) AS exec_time_avg_ms, + SUM_ROWS_SENT AS rows_sent, + ROUND(SUM_ROWS_SENT / COUNT_STAR) AS rows_sent_avg, + SUM_ROWS_EXAMINED AS rows_scanned + FROM performance_schema.events_statements_summary_by_digest + ORDER BY SUM_TIMER_WAIT DESC LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest=clean_digest(row['digest_text']) + slow_queries["full_scan_"+clean_digest] = row['full_scan'] + slow_queries["exec_count_"clean_digest] = row['exec_count'] + slow_queries["err_count_"+clean_digest] = row['err_count'] + slow_queries["warn_count_"+clean_digest] = row['warn_count'] + slow_queries["exec_time_total_"+clean_digest] = row['exec_time_total'] + slow_queries["exec_time_max_"+clean_digest] = row['exec_time_max'] + slow_queries["exec_time_avg_ms_"+clean_digest] = row['exec_time_avg_ms'] + slow_queries["rows_sent_"+clean_digest] = row['rows_sent'] + slow_queries["rows_sent_avg_"+clean_digest] = row['rows_sent_avg'] + slow_queries["rows_scanned_"+clean_digest] = row['rows_scanned'] + + except MySQLdb.OperationalError: + return {} + + return slow_queries + + + def log_verbose(msg): if MYSQL_CONFIG['Verbose'] == False: return @@ -522,5 +641,10 @@ def read_callback(): if key not in innodb_status: continue dispatch_value('innodb', key, innodb_status[key], MYSQL_INNODB_STATUS_VARS[key]) + slow_queries = fetch_slow_queries(conn) + for key in slow_queries: + dispatch_value('slow_query', key, slow_query[key], 'counter') + + collectd.register_read(read_callback) collectd.register_config(configure_callback) From 4216ffa710686e111d560ddb09fd7ac218089753 Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 22:17:54 +0100 Subject: [PATCH 02/28] bugfixes and cleaning up query string --- mysql.py | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/mysql.py b/mysql.py index 127368b..ce6bbd6 100644 --- a/mysql.py +++ b/mysql.py @@ -453,7 +453,7 @@ def is_ps_enabled(conn): row = result.fetchOne() if row['performance_schema'] == 'ON': return True - else + else: return False def clean_string(digest): @@ -463,7 +463,9 @@ def clean_string(digest): clean_digest=clean_digest.replace(' ', '_') clean_digest=clean_digest.replace('(', '_') clean_digest=clean_digest.replace(')', '_') + clean_digest=clean_digest.replace('__', '_') clean_digest=clean_digest.replace('.', '-') + clean_digest=clean_digest.replace(',_', ',') return clean_digest MYSQL_MAX_SLOW_QUERIES=20 @@ -532,12 +534,9 @@ def fetch_slow_queries(conn): # Get the slow queries result = mysql_query(conn, """ SELECT IF(LENGTH(DIGEST_TEXT) > 64, CONCAT(LEFT(DIGEST_TEXT, 30), ' ... ', RIGHT(DIGEST_TEXT, 30)), DIGEST_TEXT) AS query, - IF(SUM_NO_GOOD_INDEX_USED > 0 OR SUM_NO_INDEX_USED > 0, '*', '') AS full_scan, COUNT_STAR AS exec_count, - SUM_ERRORS AS err_count, - SUM_WARNINGS AS warn_count, - SEC_TO_TIME(SUM_TIMER_WAIT/1000000000000) AS exec_time_total, - SEC_TO_TIME(MAX_TIMER_WAIT/1000000000000) AS exec_time_max, + (SUM_TIMER_WAIT/1000000000) AS exec_time_total_ms, + (MAX_TIMER_WAIT/1000000000) AS exec_time_max_ms, (AVG_TIMER_WAIT/1000000000) AS exec_time_avg_ms, SUM_ROWS_SENT AS rows_sent, ROUND(SUM_ROWS_SENT / COUNT_STAR) AS rows_sent_avg, @@ -547,13 +546,10 @@ def fetch_slow_queries(conn): """) for row in result.fetchall(): # Clean the digest string - clean_digest=clean_digest(row['digest_text']) - slow_queries["full_scan_"+clean_digest] = row['full_scan'] - slow_queries["exec_count_"clean_digest] = row['exec_count'] - slow_queries["err_count_"+clean_digest] = row['err_count'] - slow_queries["warn_count_"+clean_digest] = row['warn_count'] - slow_queries["exec_time_total_"+clean_digest] = row['exec_time_total'] - slow_queries["exec_time_max_"+clean_digest] = row['exec_time_max'] + clean_digest=clean_string(row['query']) + slow_queries["exec_count_"+clean_digest] = row['exec_count'] + slow_queries["exec_time_total_"+clean_digest] = row['exec_time_total_ms'] + slow_queries["exec_time_max_"+clean_digest] = row['exec_time_max_ms'] slow_queries["exec_time_avg_ms_"+clean_digest] = row['exec_time_avg_ms'] slow_queries["rows_sent_"+clean_digest] = row['rows_sent'] slow_queries["rows_sent_avg_"+clean_digest] = row['rows_sent_avg'] @@ -643,7 +639,7 @@ def read_callback(): slow_queries = fetch_slow_queries(conn) for key in slow_queries: - dispatch_value('slow_query', key, slow_query[key], 'counter') + dispatch_value('slow_query', key, slow_queries[key], 'counter') collectd.register_read(read_callback) From 09ad4c00c301b50f978fae11bcbcac7e8bacbf9d Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 22:20:44 +0100 Subject: [PATCH 03/28] bugfixes and cleaning up query string --- mysql.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql.py b/mysql.py index ce6bbd6..d526611 100644 --- a/mysql.py +++ b/mysql.py @@ -466,6 +466,7 @@ def clean_string(digest): clean_digest=clean_digest.replace('__', '_') clean_digest=clean_digest.replace('.', '-') clean_digest=clean_digest.replace(',_', ',') + clean_digest=re.sub('_$', '',clean_digest) return clean_digest MYSQL_MAX_SLOW_QUERIES=20 From 012d63e459750ad639868f888222e52c9a7c78c2 Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 22:24:01 +0100 Subject: [PATCH 04/28] taking longer query details --- mysql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql.py b/mysql.py index d526611..61cc8e4 100644 --- a/mysql.py +++ b/mysql.py @@ -534,7 +534,7 @@ def fetch_slow_queries(conn): try: # Get the slow queries result = mysql_query(conn, """ - SELECT IF(LENGTH(DIGEST_TEXT) > 64, CONCAT(LEFT(DIGEST_TEXT, 30), ' ... ', RIGHT(DIGEST_TEXT, 30)), DIGEST_TEXT) AS query, + SELECT IF(LENGTH(DIGEST_TEXT) > 128, CONCAT(LEFT(DIGEST_TEXT, 60), ' ... ', RIGHT(DIGEST_TEXT, 60)), DIGEST_TEXT) AS query, COUNT_STAR AS exec_count, (SUM_TIMER_WAIT/1000000000) AS exec_time_total_ms, (MAX_TIMER_WAIT/1000000000) AS exec_time_max_ms, From c20347541c8db2bc3aaadf51b4c94db5459301e4 Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 22:31:21 +0100 Subject: [PATCH 05/28] taking longer query details --- mysql.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql.py b/mysql.py index 61cc8e4..30cfc91 100644 --- a/mysql.py +++ b/mysql.py @@ -539,7 +539,7 @@ def fetch_slow_queries(conn): (SUM_TIMER_WAIT/1000000000) AS exec_time_total_ms, (MAX_TIMER_WAIT/1000000000) AS exec_time_max_ms, (AVG_TIMER_WAIT/1000000000) AS exec_time_avg_ms, - SUM_ROWS_SENT AS rows_sent, + SUM_ROWS_SENT AS rows_sent_sum, ROUND(SUM_ROWS_SENT / COUNT_STAR) AS rows_sent_avg, SUM_ROWS_EXAMINED AS rows_scanned FROM performance_schema.events_statements_summary_by_digest @@ -552,7 +552,7 @@ def fetch_slow_queries(conn): slow_queries["exec_time_total_"+clean_digest] = row['exec_time_total_ms'] slow_queries["exec_time_max_"+clean_digest] = row['exec_time_max_ms'] slow_queries["exec_time_avg_ms_"+clean_digest] = row['exec_time_avg_ms'] - slow_queries["rows_sent_"+clean_digest] = row['rows_sent'] + slow_queries["rows_sent_sum_"+clean_digest] = row['rows_sent_sum'] slow_queries["rows_sent_avg_"+clean_digest] = row['rows_sent_avg'] slow_queries["rows_scanned_"+clean_digest] = row['rows_scanned'] From aff594588d3b77cc64a4d1dfb4510f6e7c8b3c51 Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 22:46:31 +0100 Subject: [PATCH 06/28] adding warn/err query support --- mysql.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/mysql.py b/mysql.py index 30cfc91..749dad4 100644 --- a/mysql.py +++ b/mysql.py @@ -529,6 +529,35 @@ def clean_string(digest): #OR SUM_WARNINGS > 0 #ORDER BY SUM_ERRORS DESC, SUM_WARNINGS DESC; +# Queries that raised errors +def fetch_warn_err_queries(conn): + queries = {} + try: + # Get the slow queries + result = mysql_query(conn, """ + SELECT IF(LENGTH(DIGEST_TEXT) > 128, CONCAT(LEFT(DIGEST_TEXT, 60), ' ... ', RIGHT(DIGEST_TEXT, 60)), DIGEST_TEXT) AS query, + COUNT_STAR AS exec_count + SUM_ERRORS AS errors, + SUM_WARNINGS AS warnings, + FROM performance_schema.events_statements_summary_by_digest + WHERE SUM_ERRORS > 0 + OR SUM_WARNINGS > 0 + ORDER BY SUM_ERRORS DESC, SUM_WARNINGS DESC; + LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest=clean_string(row['query']) + queries["exec_count_"+clean_digest] = row['exec_count'] + queries["errors_"+clean_digest] = row['exec_time_total_ms'] + queries["warnings_"+clean_digest] = row['exec_time_max_ms'] + + except MySQLdb.OperationalError: + return {} + + return queries + +# Slow queries, response time, rows scanned, rows returned def fetch_slow_queries(conn): slow_queries = {} try: @@ -642,6 +671,11 @@ def read_callback(): for key in slow_queries: dispatch_value('slow_query', key, slow_queries[key], 'counter') + queries = fetch_warning_error_queries(conn) + for key in queries: + dispatch_value('warn_err_query', key, queries[key], 'counter') + + collectd.register_read(read_callback) collectd.register_config(configure_callback) From ef1ba2d8c425a494efb8cbcc1d15b834eeb7beaf Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 22:47:43 +0100 Subject: [PATCH 07/28] misspelled function name --- mysql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql.py b/mysql.py index 749dad4..b8b4bd5 100644 --- a/mysql.py +++ b/mysql.py @@ -530,7 +530,7 @@ def clean_string(digest): #ORDER BY SUM_ERRORS DESC, SUM_WARNINGS DESC; # Queries that raised errors -def fetch_warn_err_queries(conn): +def fetch_warning_error_queries(conn): queries = {} try: # Get the slow queries From c39d8a99d78e434e94caa8fc4c7276f3c9566f95 Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 22:48:34 +0100 Subject: [PATCH 08/28] misspelled function name --- mysql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql.py b/mysql.py index b8b4bd5..d28b18c 100644 --- a/mysql.py +++ b/mysql.py @@ -536,7 +536,7 @@ def fetch_warning_error_queries(conn): # Get the slow queries result = mysql_query(conn, """ SELECT IF(LENGTH(DIGEST_TEXT) > 128, CONCAT(LEFT(DIGEST_TEXT, 60), ' ... ', RIGHT(DIGEST_TEXT, 60)), DIGEST_TEXT) AS query, - COUNT_STAR AS exec_count + COUNT_STAR AS exec_count, SUM_ERRORS AS errors, SUM_WARNINGS AS warnings, FROM performance_schema.events_statements_summary_by_digest From d43ff7610d2ee9078e4f76c7ad392574d0cfdb0a Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 22:49:28 +0100 Subject: [PATCH 09/28] misspelled function name --- mysql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql.py b/mysql.py index d28b18c..1c1df2a 100644 --- a/mysql.py +++ b/mysql.py @@ -542,7 +542,7 @@ def fetch_warning_error_queries(conn): FROM performance_schema.events_statements_summary_by_digest WHERE SUM_ERRORS > 0 OR SUM_WARNINGS > 0 - ORDER BY SUM_ERRORS DESC, SUM_WARNINGS DESC; + ORDER BY SUM_ERRORS DESC, SUM_WARNINGS DESC LIMIT 10; """) for row in result.fetchall(): From 99122ff6b15667496189d57f40c5ac9223b7c179 Mon Sep 17 00:00:00 2001 From: isart Date: Wed, 25 Nov 2015 22:51:15 +0100 Subject: [PATCH 10/28] misspelled function name --- mysql.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql.py b/mysql.py index 1c1df2a..b0d3d56 100644 --- a/mysql.py +++ b/mysql.py @@ -549,8 +549,8 @@ def fetch_warning_error_queries(conn): # Clean the digest string clean_digest=clean_string(row['query']) queries["exec_count_"+clean_digest] = row['exec_count'] - queries["errors_"+clean_digest] = row['exec_time_total_ms'] - queries["warnings_"+clean_digest] = row['exec_time_max_ms'] + queries["errors_"+clean_digest] = row['errors'] + queries["warnings_"+clean_digest] = row['warnings'] except MySQLdb.OperationalError: return {} From f618e77bf325181cd1476b354170fe6eaaca66a8 Mon Sep 17 00:00:00 2001 From: isart Date: Thu, 26 Nov 2015 16:15:21 +0100 Subject: [PATCH 11/28] removing non utf8 chars --- mysql.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mysql.py b/mysql.py index b0d3d56..d7acd0a 100644 --- a/mysql.py +++ b/mysql.py @@ -458,18 +458,18 @@ def is_ps_enabled(conn): def clean_string(digest): clean_digest=digest + clean_digest=re.sub(r'[^\x00-\x7F]+','_', clean_digest) clean_digest=clean_digest.replace('`', '') clean_digest=clean_digest.replace('?', '') clean_digest=clean_digest.replace(' ', '_') + clean_digest=clean_digest.replace(',', '_') clean_digest=clean_digest.replace('(', '_') clean_digest=clean_digest.replace(')', '_') - clean_digest=clean_digest.replace('__', '_') - clean_digest=clean_digest.replace('.', '-') - clean_digest=clean_digest.replace(',_', ',') + clean_digest=clean_digest.replace('.', '_') + clean_digest=re.sub(r'(__)', '',clean_digest) clean_digest=re.sub('_$', '',clean_digest) return clean_digest -MYSQL_MAX_SLOW_QUERIES=20 # http://www.markleith.co.uk/2011/04/18/monitoring-table-and-index-io-with-performance_schema/ # http://www.markleith.co.uk/2012/07/04/mysql-performance-schema-statement-digests/ # @@ -535,10 +535,10 @@ def fetch_warning_error_queries(conn): try: # Get the slow queries result = mysql_query(conn, """ - SELECT IF(LENGTH(DIGEST_TEXT) > 128, CONCAT(LEFT(DIGEST_TEXT, 60), ' ... ', RIGHT(DIGEST_TEXT, 60)), DIGEST_TEXT) AS query, + SELECT DIGEST_TEXT AS query, COUNT_STAR AS exec_count, SUM_ERRORS AS errors, - SUM_WARNINGS AS warnings, + SUM_WARNINGS AS warnings FROM performance_schema.events_statements_summary_by_digest WHERE SUM_ERRORS > 0 OR SUM_WARNINGS > 0 @@ -546,8 +546,8 @@ def fetch_warning_error_queries(conn): LIMIT 10; """) for row in result.fetchall(): - # Clean the digest string - clean_digest=clean_string(row['query']) + # Clean the digest string + clean_digest=clean_string(row['query']) queries["exec_count_"+clean_digest] = row['exec_count'] queries["errors_"+clean_digest] = row['errors'] queries["warnings_"+clean_digest] = row['warnings'] @@ -563,7 +563,7 @@ def fetch_slow_queries(conn): try: # Get the slow queries result = mysql_query(conn, """ - SELECT IF(LENGTH(DIGEST_TEXT) > 128, CONCAT(LEFT(DIGEST_TEXT, 60), ' ... ', RIGHT(DIGEST_TEXT, 60)), DIGEST_TEXT) AS query, + SELECT DIGEST_TEXT AS query, COUNT_STAR AS exec_count, (SUM_TIMER_WAIT/1000000000) AS exec_time_total_ms, (MAX_TIMER_WAIT/1000000000) AS exec_time_max_ms, @@ -576,7 +576,7 @@ def fetch_slow_queries(conn): """) for row in result.fetchall(): # Clean the digest string - clean_digest=clean_string(row['query']) + clean_digest=clean_string(row['query']) slow_queries["exec_count_"+clean_digest] = row['exec_count'] slow_queries["exec_time_total_"+clean_digest] = row['exec_time_total_ms'] slow_queries["exec_time_max_"+clean_digest] = row['exec_time_max_ms'] From cd814c6c86fef6a5f48be9ecaf6cda2f7a689da6 Mon Sep 17 00:00:00 2001 From: isart Date: Thu, 26 Nov 2015 17:28:09 +0100 Subject: [PATCH 12/28] adding indexes info --- mysql.py | 102 +++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 73 insertions(+), 29 deletions(-) diff --git a/mysql.py b/mysql.py index d7acd0a..3fd5ae2 100644 --- a/mysql.py +++ b/mysql.py @@ -507,27 +507,60 @@ def clean_string(digest): # reads per index # SELECT object_schema AS table_schema, object_name AS table_name, index_name, count_read AS rows_read from performance_schema.table_io_waits_summary_by_index_usage WHERE index_name IS NOT NULL ORDER BY sum_timer_wait DESC; +# number of reads per index +def fetch_number_of_reads_per_index(conn): + queries = {} + try: + result = mysql_query(conn, """ + SELECT + object_schema, + object_name, + index_name, + count_read AS rows_read + FROM performance_schema.table_io_waits_summary_by_index_usage + WHERE index_name IS NOT NULL + AND count_read>0 + ORDER BY sum_timer_wait DESC + LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest=clean_string(row['object_schema']+'_'+row['object_name']+'_'+row['index_name']) + queries["number_of_reads_per_index_"+clean_digest] = row['rows_read'] + + except MySQLdb.OperationalError: + return {} + + return queries + + + # Indexes not being used -#SELECT object_schema, -#object_name, -#index_name -#FROM performance_schema.table_io_waits_summary_by_index_usage -#WHERE index_name IS NOT NULL -#AND count_star = 0 -#ORDER BY object_schema, object_name; - -# Queries that have raised errors/warnings -#SELECT IF(LENGTH(DIGEST_TEXT) > 64, CONCAT(LEFT(DIGEST_TEXT, 30), ' ... ', RIGHT(DIGEST_TEXT, 30)), DIGEST_TEXT) AS query, -#COUNT_STAR AS exec_count, -#SUM_ERRORS AS errors, -#(SUM_ERRORS / COUNT_STAR) * 100 as error_pct, -#SUM_WARNINGS AS warnings, -#(SUM_WARNINGS / COUNT_STAR) * 100 as warning_pct, -#DIGEST AS digest -#FROM performance_schema.events_statements_summary_by_digest -#WHERE SUM_ERRORS > 0 -#OR SUM_WARNINGS > 0 -#ORDER BY SUM_ERRORS DESC, SUM_WARNINGS DESC; +def fetch_indexes_not_being_used(conn): + queries = {} + try: + result = mysql_query(conn, """ + SELECT object_schema, + object_name, + index_name + FROM performance_schema.table_io_waits_summary_by_index_usage + WHERE index_name IS NOT NULL + AND count_star = 0 + AND object_schema != 'mysql' + ORDER BY object_schema, object_name + LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest=clean_string(row['object_schema']+'_'+row['object_name']+'_'+row['index_name']) + queries["index_not_being_used_"+clean_digest] = 1 + + except MySQLdb.OperationalError: + return {} + + return queries + + # Queries that raised errors def fetch_warning_error_queries(conn): @@ -565,9 +598,9 @@ def fetch_slow_queries(conn): result = mysql_query(conn, """ SELECT DIGEST_TEXT AS query, COUNT_STAR AS exec_count, - (SUM_TIMER_WAIT/1000000000) AS exec_time_total_ms, - (MAX_TIMER_WAIT/1000000000) AS exec_time_max_ms, - (AVG_TIMER_WAIT/1000000000) AS exec_time_avg_ms, + round(SUM_TIMER_WAIT/1000000000) AS exec_time_total_ms, + round(MAX_TIMER_WAIT/1000000000) AS exec_time_max_ms, + round(AVG_TIMER_WAIT/1000000000) AS exec_time_avg_ms, SUM_ROWS_SENT AS rows_sent_sum, ROUND(SUM_ROWS_SENT / COUNT_STAR) AS rows_sent_avg, SUM_ROWS_EXAMINED AS rows_scanned @@ -667,13 +700,24 @@ def read_callback(): if key not in innodb_status: continue dispatch_value('innodb', key, innodb_status[key], MYSQL_INNODB_STATUS_VARS[key]) - slow_queries = fetch_slow_queries(conn) - for key in slow_queries: - dispatch_value('slow_query', key, slow_queries[key], 'counter') + # Performance_Schema metrics + if is_ps_enabled(conn): + slow_queries = fetch_slow_queries(conn) + for key in slow_queries: + dispatch_value('slow_query', key, slow_queries[key], 'counter') + + queries = fetch_warning_error_queries(conn) + for key in queries: + dispatch_value('warn_err_query', key, queries[key], 'counter') + + queries = fetch_indexes_not_being_used(conn) + for key in queries: + dispatch_value('indexes_not_being_used', key, queries[key], 'counter') + + queries = fetch_number_of_reads_per_index(conn) + for key in queries: + dispatch_value('number_of_reads_per_index', key, queries[key], 'counter') - queries = fetch_warning_error_queries(conn) - for key in queries: - dispatch_value('warn_err_query', key, queries[key], 'counter') From c922d6e690c6facc5e13e11b132c6e8f2731c259 Mon Sep 17 00:00:00 2001 From: isart Date: Thu, 26 Nov 2015 18:16:11 +0100 Subject: [PATCH 13/28] adding support for connections per user/account/host --- mysql.py | 84 ++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 72 insertions(+), 12 deletions(-) diff --git a/mysql.py b/mysql.py index 3fd5ae2..183f143 100644 --- a/mysql.py +++ b/mysql.py @@ -450,8 +450,8 @@ def fetch_innodb_stats(conn): # Check if PENFORMANCE_SCHEMA is enabled def is_ps_enabled(conn): result = mysql_query(conn, 'SHOW GLOBAL VARIABLES LIKE "performance_schema"') - row = result.fetchOne() - if row['performance_schema'] == 'ON': + row = result.fetchone() + if row['Value'] == 'ON': return True else: return False @@ -485,12 +485,6 @@ def clean_string(digest): # Connections per account (user/host) # 1) select * from accounts limit 100; -# Connections per host -# 1) select * from hosts limit 100; - -# Connections per user -# 1) select * from users limit 100; - # Operations (rows read / rows changed) per table # SELECT pst.object_schema AS table_schema, # pst.object_name AS table_name, @@ -504,8 +498,66 @@ def clean_string(digest): # GROUP BY pst.object_schema, pst.object_name # ORDER BY pst.sum_timer_wait DESC; -# reads per index -# SELECT object_schema AS table_schema, object_name AS table_name, index_name, count_read AS rows_read from performance_schema.table_io_waits_summary_by_index_usage WHERE index_name IS NOT NULL ORDER BY sum_timer_wait DESC; +# Connections per account +def fetch_connections_per_account(conn): + queries = {} + try: + result = mysql_query(conn, """ + SELECT user,host,current_connections,total_connections + FROM performance_schema.accounts + LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest=clean_string(row['user']+'_'+row['host']) + queries["current_connectinos_per_host_"+clean_digest] = row['current_connections'] + queries["total_connectinos_per_account_"+clean_digest] = row['total_connections'] + + except MySQLdb.OperationalError: + return {} + + return queries + + +# Connections per host +def fetch_connections_per_host(conn): + queries = {} + try: + result = mysql_query(conn, """ + SELECT host,current_connections,total_connections + FROM performance_schema.hosts + LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest=clean_string(row['host']) + queries["current_connectinos_per_host_"+clean_digest] = row['current_connections'] + queries["total_connectinos_per_host_"+clean_digest] = row['total_connections'] + + except MySQLdb.OperationalError: + return {} + + return queries + +# Connections per user +def fetch_connections_per_user(conn): + queries = {} + try: + result = mysql_query(conn, """ + SELECT user,current_connections,total_connections + FROM performance_schema.users + LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest=clean_string(row['user']) + queries["current_connectinos_per_user_"+clean_digest] = row['current_connections'] + queries["total_connectinos_per_user_"+clean_digest] = row['total_connections'] + + except MySQLdb.OperationalError: + return {} + + return queries # number of reads per index def fetch_number_of_reads_per_index(conn): @@ -533,8 +585,6 @@ def fetch_number_of_reads_per_index(conn): return queries - - # Indexes not being used def fetch_indexes_not_being_used(conn): queries = {} @@ -718,7 +768,17 @@ def read_callback(): for key in queries: dispatch_value('number_of_reads_per_index', key, queries[key], 'counter') + queries=fetch_connections_per_user(conn) + for key in queries: + dispatch_value('connections_per_user', key, queries[key], 'counter') + + queries=fetch_connections_per_host(conn) + for key in queries: + dispatch_value('connections_per_host', key, queries[key], 'counter') + queries=fetch_connections_per_account(conn) + for key in queries: + dispatch_value('connections_per_account', key, queries[key], 'counter') collectd.register_read(read_callback) From 20ead1d32d06ec956b08e69525cfe203ffacdf9f Mon Sep 17 00:00:00 2001 From: isart Date: Thu, 26 Nov 2015 18:24:28 +0100 Subject: [PATCH 14/28] adding support for connections per user/account/host --- mysql.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mysql.py b/mysql.py index 183f143..b1158bc 100644 --- a/mysql.py +++ b/mysql.py @@ -509,9 +509,9 @@ def fetch_connections_per_account(conn): """) for row in result.fetchall(): # Clean the digest string - clean_digest=clean_string(row['user']+'_'+row['host']) - queries["current_connectinos_per_host_"+clean_digest] = row['current_connections'] - queries["total_connectinos_per_account_"+clean_digest] = row['total_connections'] + clean_digest=str(row['user']+'_'+row['host']) + queries["current_connections_per_host_"+clean_digest] = row['current_connections'] + queries["total_connections_per_account_"+clean_digest] = row['total_connections'] except MySQLdb.OperationalError: return {} @@ -530,9 +530,9 @@ def fetch_connections_per_host(conn): """) for row in result.fetchall(): # Clean the digest string - clean_digest=clean_string(row['host']) - queries["current_connectinos_per_host_"+clean_digest] = row['current_connections'] - queries["total_connectinos_per_host_"+clean_digest] = row['total_connections'] + clean_digest=str(row['host']) + queries["current_connections_per_host_"+clean_digest] = row['current_connections'] + queries["total_connections_per_host_"+clean_digest] = row['total_connections'] except MySQLdb.OperationalError: return {} @@ -550,9 +550,9 @@ def fetch_connections_per_user(conn): """) for row in result.fetchall(): # Clean the digest string - clean_digest=clean_string(row['user']) - queries["current_connectinos_per_user_"+clean_digest] = row['current_connections'] - queries["total_connectinos_per_user_"+clean_digest] = row['total_connections'] + clean_digest=str(row['user']) + queries["current_connections_per_user_"+clean_digest] = row['current_connections'] + queries["total_connections_per_user_"+clean_digest] = row['total_connections'] except MySQLdb.OperationalError: return {} From 2739d0bdf9de9d66cc0e4358c6af49aef5bf2d7d Mon Sep 17 00:00:00 2001 From: isart Date: Mon, 30 Nov 2015 22:05:20 +0100 Subject: [PATCH 15/28] change counter per gauge --- mysql.py | 65 +++++++++++++++++++++++--------------------------------- 1 file changed, 26 insertions(+), 39 deletions(-) diff --git a/mysql.py b/mysql.py index b1158bc..f0baf86 100644 --- a/mysql.py +++ b/mysql.py @@ -480,24 +480,6 @@ def clean_string(digest): # 4) List all normalized statements that use have done a full table scan ordered by the percentage of times a full scan was done, then by the number of times the statement executed # 5) List all normalized statements that have raised errors or warnings. -# New - -# Connections per account (user/host) -# 1) select * from accounts limit 100; - -# Operations (rows read / rows changed) per table -# SELECT pst.object_schema AS table_schema, -# pst.object_name AS table_name, -# pst.count_read AS rows_read, -# pst.count_write AS rows_changed, -# (pst.count_write * COUNT(psi.index_name)) AS rows_changed_x_indexes -# FROM performance_schema.table_io_waits_summary_by_table AS pst -# LEFT JOIN performance_schema.table_io_waits_summary_by_index_usage AS psi -# ON pst.object_schema = psi.object_schema AND pst.object_name = psi.object_name -# AND psi.index_name IS NOT NULL -# GROUP BY pst.object_schema, pst.object_name -# ORDER BY pst.sum_timer_wait DESC; - # Connections per account def fetch_connections_per_account(conn): queries = {} @@ -509,8 +491,8 @@ def fetch_connections_per_account(conn): """) for row in result.fetchall(): # Clean the digest string - clean_digest=str(row['user']+'_'+row['host']) - queries["current_connections_per_host_"+clean_digest] = row['current_connections'] + clean_digest=str(row['user'])+'_'+str(row['host']) + queries["current_connections_per_account_"+clean_digest] = row['current_connections'] queries["total_connections_per_account_"+clean_digest] = row['total_connections'] except MySQLdb.OperationalError: @@ -558,27 +540,32 @@ def fetch_connections_per_user(conn): return {} return queries - -# number of reads per index +n +# number of reads/changed per index def fetch_number_of_reads_per_index(conn): queries = {} try: result = mysql_query(conn, """ - SELECT - object_schema, - object_name, - index_name, - count_read AS rows_read - FROM performance_schema.table_io_waits_summary_by_index_usage - WHERE index_name IS NOT NULL - AND count_read>0 - ORDER BY sum_timer_wait DESC + SELECT pst.object_schema AS table_schema, + pst.object_name AS table_name, + pst.count_read AS rows_read, + pst.count_write AS rows_changed, + (pst.count_write * COUNT(psi.index_name)) AS rows_changed_x_indexes + FROM performance_schema.table_io_waits_summary_by_table AS pst + LEFT JOIN performance_schema.table_io_waits_summary_by_index_usage AS psi + ON pst.object_schema = psi.object_schema AND pst.object_name = psi.object_name + AND psi.index_name IS NOT NULL + WHERE pst.sum_timer_wait > 0 + GROUP BY pst.object_schema, pst.object_name + ORDER BY pst.sum_timer_wait DESC LIMIT 10; + """) for row in result.fetchall(): # Clean the digest string clean_digest=clean_string(row['object_schema']+'_'+row['object_name']+'_'+row['index_name']) - queries["number_of_reads_per_index_"+clean_digest] = row['rows_read'] + queries["number_of_rows_reads_per_index_"+clean_digest] = row['rows_read'] + queries["number_of_rows_changed_per_index_"+clean_digest] = row['rows_changed'] except MySQLdb.OperationalError: return {} @@ -754,31 +741,31 @@ def read_callback(): if is_ps_enabled(conn): slow_queries = fetch_slow_queries(conn) for key in slow_queries: - dispatch_value('slow_query', key, slow_queries[key], 'counter') + dispatch_value('slow_query', key, slow_queries[key], 'gauge') queries = fetch_warning_error_queries(conn) for key in queries: - dispatch_value('warn_err_query', key, queries[key], 'counter') + dispatch_value('warn_err_query', key, queries[key], 'gauge') queries = fetch_indexes_not_being_used(conn) for key in queries: - dispatch_value('indexes_not_being_used', key, queries[key], 'counter') + dispatch_value('indexes_not_being_used', key, queries[key], 'gauge') queries = fetch_number_of_reads_per_index(conn) for key in queries: - dispatch_value('number_of_reads_per_index', key, queries[key], 'counter') + dispatch_value('number_of_reads_per_index', key, queries[key], 'gauge') queries=fetch_connections_per_user(conn) for key in queries: - dispatch_value('connections_per_user', key, queries[key], 'counter') + dispatch_value('connections_per_user', key, queries[key], 'gauge') queries=fetch_connections_per_host(conn) for key in queries: - dispatch_value('connections_per_host', key, queries[key], 'counter') + dispatch_value('connections_per_host', key, queries[key], 'gauge') queries=fetch_connections_per_account(conn) for key in queries: - dispatch_value('connections_per_account', key, queries[key], 'counter') + dispatch_value('connections_per_account', key, queries[key], 'gauge') collectd.register_read(read_callback) From ed46b751a6f60293a69cd88a86eb3ba3255356b7 Mon Sep 17 00:00:00 2001 From: isart Date: Mon, 30 Nov 2015 22:07:24 +0100 Subject: [PATCH 16/28] typo fix --- mysql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql.py b/mysql.py index f0baf86..4385e69 100644 --- a/mysql.py +++ b/mysql.py @@ -540,7 +540,7 @@ def fetch_connections_per_user(conn): return {} return queries -n + # number of reads/changed per index def fetch_number_of_reads_per_index(conn): queries = {} From bf3cb2888fda0091b7e791e22b715a5aebde1b16 Mon Sep 17 00:00:00 2001 From: isart Date: Tue, 1 Dec 2015 10:58:05 +0100 Subject: [PATCH 17/28] fixing more typos --- mysql.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mysql.py b/mysql.py index 4385e69..abd7c00 100644 --- a/mysql.py +++ b/mysql.py @@ -548,9 +548,9 @@ def fetch_number_of_reads_per_index(conn): result = mysql_query(conn, """ SELECT pst.object_schema AS table_schema, pst.object_name AS table_name, + psi.object_name AS index_name, pst.count_read AS rows_read, - pst.count_write AS rows_changed, - (pst.count_write * COUNT(psi.index_name)) AS rows_changed_x_indexes + pst.count_write AS rows_changed FROM performance_schema.table_io_waits_summary_by_table AS pst LEFT JOIN performance_schema.table_io_waits_summary_by_index_usage AS psi ON pst.object_schema = psi.object_schema AND pst.object_name = psi.object_name @@ -563,7 +563,7 @@ def fetch_number_of_reads_per_index(conn): """) for row in result.fetchall(): # Clean the digest string - clean_digest=clean_string(row['object_schema']+'_'+row['object_name']+'_'+row['index_name']) + clean_digest=clean_string(row['table_schema']+'_'+row['table_name']+'_'+row['index_name']) queries["number_of_rows_reads_per_index_"+clean_digest] = row['rows_read'] queries["number_of_rows_changed_per_index_"+clean_digest] = row['rows_changed'] From b25a1c7c56acdc790819f90866de4227173b1066 Mon Sep 17 00:00:00 2001 From: isart Date: Tue, 1 Dec 2015 12:00:08 +0100 Subject: [PATCH 18/28] updating README file --- README.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index bcf609c..0f2b476 100644 --- a/README.md +++ b/README.md @@ -322,5 +322,16 @@ For versions of MySQL with support for it and where enabled, `INFORMATION_SCHEMA response_time_total.14 response_time_count.14 +### Added support for PERFORMANCE_SCHEMA metrics +If Performance_schema is enabled you will get the following metrics as well. + + Number of connections per Account (host-user) - Total and current + Number of connections per User - Total and current + Number of connections per Host - Total and current + Number of rows read per index - schema, table, index name, rows read + Indexes not being used (didn't get any read) - schema, table, index_name + Queries that raised errors/warnings - Query, number of executions, errors, warnings + Slow queries - Query, number of executions, execution time (total,max,avg), rows sent (total, avg), scanned rows + ## License -MIT (http://www.opensource.org/licenses/mit-license.php) \ No newline at end of file +MIT (http://www.opensource.org/licenses/mit-license.php) From 25a910973cc671dc7257d29a8c18c1effa836c2e Mon Sep 17 00:00:00 2001 From: isart Date: Tue, 1 Dec 2015 16:07:01 +0100 Subject: [PATCH 19/28] fixing errors when query is None or Null --- mysql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql.py b/mysql.py index abd7c00..bb59fc7 100644 --- a/mysql.py +++ b/mysql.py @@ -457,7 +457,7 @@ def is_ps_enabled(conn): return False def clean_string(digest): - clean_digest=digest + clean_digest=str(digest) clean_digest=re.sub(r'[^\x00-\x7F]+','_', clean_digest) clean_digest=clean_digest.replace('`', '') clean_digest=clean_digest.replace('?', '') From 3d55b1e0a8536a00bd8e8cdb65eae4ee6c2cf210 Mon Sep 17 00:00:00 2001 From: isart Date: Tue, 1 Dec 2015 17:24:40 +0100 Subject: [PATCH 20/28] escaping \ on queries --- mysql.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql.py b/mysql.py index bb59fc7..731392e 100644 --- a/mysql.py +++ b/mysql.py @@ -460,6 +460,7 @@ def clean_string(digest): clean_digest=str(digest) clean_digest=re.sub(r'[^\x00-\x7F]+','_', clean_digest) clean_digest=clean_digest.replace('`', '') + clean_digest=clean_digest.replace('\\', '') clean_digest=clean_digest.replace('?', '') clean_digest=clean_digest.replace(' ', '_') clean_digest=clean_digest.replace(',', '_') From 85338e419eb3984613a88a2b42c2ba872b5a1b7a Mon Sep 17 00:00:00 2001 From: isart Date: Thu, 3 Dec 2015 11:23:42 +0100 Subject: [PATCH 21/28] excluding "null" queries --- mysql.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql.py b/mysql.py index 731392e..a1ffe50 100644 --- a/mysql.py +++ b/mysql.py @@ -643,6 +643,7 @@ def fetch_slow_queries(conn): ROUND(SUM_ROWS_SENT / COUNT_STAR) AS rows_sent_avg, SUM_ROWS_EXAMINED AS rows_scanned FROM performance_schema.events_statements_summary_by_digest + WHERE DIGEST_TEXT NOT LIKE 'null' ORDER BY SUM_TIMER_WAIT DESC LIMIT 10; """) for row in result.fetchall(): From 9def09bbeb4fcd1a074a3855b6eb825dc504595e Mon Sep 17 00:00:00 2001 From: isart Date: Mon, 7 Dec 2015 10:57:30 +0100 Subject: [PATCH 22/28] exclude table name from slow queries --- mysql.py | 1494 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 829 insertions(+), 665 deletions(-) diff --git a/mysql.py b/mysql.py index a1ffe50..928c7d7 100644 --- a/mysql.py +++ b/mysql.py @@ -9,11 +9,11 @@ # Configuration: # Import mysql # -# Host localhost -# Port 3306 (optional) -# User root -# Password xxxx -# HeartbeatTable percona.heartbeat (optional, if using pt-heartbeat) +# Host localhost +# Port 3306 (optional) +# User root +# Password xxxx +# HeartbeatTable percona.heartbeat (optional, if using pt-heartbeat) # Verbose true (optional, to enable debugging) # # @@ -28,746 +28,910 @@ import MySQLdb MYSQL_CONFIG = { - 'Host': 'localhost', - 'Port': 3306, - 'User': 'root', - 'Password': '', - 'HeartbeatTable': '', - 'Verbose': False, + 'Host': 'localhost', + 'Port': 3306, + 'User': 'root', + 'Password': '', + 'HeartbeatTable': '', + 'Verbose': False, } MYSQL_STATUS_VARS = { - 'Aborted_clients': 'counter', - 'Aborted_connects': 'counter', - 'Binlog_cache_disk_use': 'counter', - 'Binlog_cache_use': 'counter', - 'Bytes_received': 'counter', - 'Bytes_sent': 'counter', - 'Connections': 'counter', - 'Created_tmp_disk_tables': 'counter', - 'Created_tmp_files': 'counter', - 'Created_tmp_tables': 'counter', - 'Innodb_buffer_pool_pages_data': 'gauge', - 'Innodb_buffer_pool_pages_dirty': 'gauge', - 'Innodb_buffer_pool_pages_free': 'gauge', - 'Innodb_buffer_pool_pages_total': 'gauge', - 'Innodb_buffer_pool_read_requests': 'counter', - 'Innodb_buffer_pool_reads': 'counter', - 'Innodb_checkpoint_age': 'gauge', - 'Innodb_checkpoint_max_age': 'gauge', - 'Innodb_data_fsyncs': 'counter', - 'Innodb_data_pending_fsyncs': 'gauge', - 'Innodb_data_pending_reads': 'gauge', - 'Innodb_data_pending_writes': 'gauge', - 'Innodb_data_read': 'counter', - 'Innodb_data_reads': 'counter', - 'Innodb_data_writes': 'counter', - 'Innodb_data_written': 'counter', - 'Innodb_deadlocks': 'counter', - 'Innodb_history_list_length': 'gauge', - 'Innodb_ibuf_free_list': 'gauge', - 'Innodb_ibuf_merged_delete_marks': 'counter', - 'Innodb_ibuf_merged_deletes': 'counter', - 'Innodb_ibuf_merged_inserts': 'counter', - 'Innodb_ibuf_merges': 'counter', - 'Innodb_ibuf_segment_size': 'gauge', - 'Innodb_ibuf_size': 'gauge', - 'Innodb_lsn_current': 'counter', - 'Innodb_lsn_flushed': 'counter', - 'Innodb_max_trx_id': 'counter', - 'Innodb_mem_adaptive_hash': 'gauge', - 'Innodb_mem_dictionary': 'gauge', - 'Innodb_mem_total': 'gauge', - 'Innodb_mutex_os_waits': 'counter', - 'Innodb_mutex_spin_rounds': 'counter', - 'Innodb_mutex_spin_waits': 'counter', - 'Innodb_os_log_pending_fsyncs': 'gauge', - 'Innodb_pages_created': 'counter', - 'Innodb_pages_read': 'counter', - 'Innodb_pages_written': 'counter', - 'Innodb_row_lock_time': 'counter', - 'Innodb_row_lock_time_avg': 'gauge', - 'Innodb_row_lock_time_max': 'gauge', - 'Innodb_row_lock_waits': 'counter', - 'Innodb_rows_deleted': 'counter', - 'Innodb_rows_inserted': 'counter', - 'Innodb_rows_read': 'counter', - 'Innodb_rows_updated': 'counter', - 'Innodb_s_lock_os_waits': 'counter', - 'Innodb_s_lock_spin_rounds': 'counter', - 'Innodb_s_lock_spin_waits': 'counter', - 'Innodb_uncheckpointed_bytes': 'gauge', - 'Innodb_unflushed_log': 'gauge', - 'Innodb_unpurged_txns': 'gauge', - 'Innodb_x_lock_os_waits': 'counter', - 'Innodb_x_lock_spin_rounds': 'counter', - 'Innodb_x_lock_spin_waits': 'counter', - 'Key_blocks_not_flushed': 'gauge', - 'Key_blocks_unused': 'gauge', - 'Key_blocks_used': 'gauge', - 'Key_read_requests': 'counter', - 'Key_reads': 'counter', - 'Key_write_requests': 'counter', - 'Key_writes': 'counter', - 'Max_used_connections': 'gauge', - 'Open_files': 'gauge', - 'Open_table_definitions': 'gauge', - 'Open_tables': 'gauge', - 'Opened_files': 'counter', - 'Opened_table_definitions': 'counter', - 'Opened_tables': 'counter', - 'Qcache_free_blocks': 'gauge', - 'Qcache_free_memory': 'gauge', - 'Qcache_hits': 'counter', - 'Qcache_inserts': 'counter', - 'Qcache_lowmem_prunes': 'counter', - 'Qcache_not_cached': 'counter', - 'Qcache_queries_in_cache': 'counter', - 'Qcache_total_blocks': 'counter', - 'Questions': 'counter', - 'Select_full_join': 'counter', - 'Select_full_range_join': 'counter', - 'Select_range': 'counter', - 'Select_range_check': 'counter', - 'Select_scan': 'counter', - 'Slave_open_temp_tables': 'gauge', - 'Slave_retried_transactions': 'counter', - 'Slow_launch_threads': 'counter', - 'Slow_queries': 'counter', - 'Sort_merge_passes': 'counter', - 'Sort_range': 'counter', - 'Sort_rows': 'counter', - 'Sort_scan': 'counter', - 'Table_locks_immediate': 'counter', - 'Table_locks_waited': 'counter', - 'Table_open_cache_hits': 'counter', - 'Table_open_cache_misses': 'counter', - 'Table_open_cache_overflows': 'counter', - 'Threadpool_idle_threads': 'gauge', - 'Threadpool_threads': 'gauge', - 'Threads_cached': 'gauge', - 'Threads_connected': 'gauge', - 'Threads_created': 'counter', - 'Threads_running': 'gauge', - 'Uptime': 'gauge', + 'Aborted_clients': 'counter', + 'Aborted_connects': 'counter', + 'Binlog_cache_disk_use': 'counter', + 'Binlog_cache_use': 'counter', + 'Bytes_received': 'counter', + 'Bytes_sent': 'counter', + 'Connections': 'counter', + 'Created_tmp_disk_tables': 'counter', + 'Created_tmp_files': 'counter', + 'Created_tmp_tables': 'counter', + 'Innodb_buffer_pool_pages_data': 'gauge', + 'Innodb_buffer_pool_pages_dirty': 'gauge', + 'Innodb_buffer_pool_pages_free': 'gauge', + 'Innodb_buffer_pool_pages_total': 'gauge', + 'Innodb_buffer_pool_read_requests': 'counter', + 'Innodb_buffer_pool_reads': 'counter', + 'Innodb_checkpoint_age': 'gauge', + 'Innodb_checkpoint_max_age': 'gauge', + 'Innodb_data_fsyncs': 'counter', + 'Innodb_data_pending_fsyncs': 'gauge', + 'Innodb_data_pending_reads': 'gauge', + 'Innodb_data_pending_writes': 'gauge', + 'Innodb_data_read': 'counter', + 'Innodb_data_reads': 'counter', + 'Innodb_data_writes': 'counter', + 'Innodb_data_written': 'counter', + 'Innodb_deadlocks': 'counter', + 'Innodb_history_list_length': 'gauge', + 'Innodb_ibuf_free_list': 'gauge', + 'Innodb_ibuf_merged_delete_marks': 'counter', + 'Innodb_ibuf_merged_deletes': 'counter', + 'Innodb_ibuf_merged_inserts': 'counter', + 'Innodb_ibuf_merges': 'counter', + 'Innodb_ibuf_segment_size': 'gauge', + 'Innodb_ibuf_size': 'gauge', + 'Innodb_lsn_current': 'counter', + 'Innodb_lsn_flushed': 'counter', + 'Innodb_max_trx_id': 'counter', + 'Innodb_mem_adaptive_hash': 'gauge', + 'Innodb_mem_dictionary': 'gauge', + 'Innodb_mem_total': 'gauge', + 'Innodb_mutex_os_waits': 'counter', + 'Innodb_mutex_spin_rounds': 'counter', + 'Innodb_mutex_spin_waits': 'counter', + 'Innodb_os_log_pending_fsyncs': 'gauge', + 'Innodb_pages_created': 'counter', + 'Innodb_pages_read': 'counter', + 'Innodb_pages_written': 'counter', + 'Innodb_row_lock_time': 'counter', + 'Innodb_row_lock_time_avg': 'gauge', + 'Innodb_row_lock_time_max': 'gauge', + 'Innodb_row_lock_waits': 'counter', + 'Innodb_rows_deleted': 'counter', + 'Innodb_rows_inserted': 'counter', + 'Innodb_rows_read': 'counter', + 'Innodb_rows_updated': 'counter', + 'Innodb_s_lock_os_waits': 'counter', + 'Innodb_s_lock_spin_rounds': 'counter', + 'Innodb_s_lock_spin_waits': 'counter', + 'Innodb_uncheckpointed_bytes': 'gauge', + 'Innodb_unflushed_log': 'gauge', + 'Innodb_unpurged_txns': 'gauge', + 'Innodb_x_lock_os_waits': 'counter', + 'Innodb_x_lock_spin_rounds': 'counter', + 'Innodb_x_lock_spin_waits': 'counter', + 'Key_blocks_not_flushed': 'gauge', + 'Key_blocks_unused': 'gauge', + 'Key_blocks_used': 'gauge', + 'Key_read_requests': 'counter', + 'Key_reads': 'counter', + 'Key_write_requests': 'counter', + 'Key_writes': 'counter', + 'Max_used_connections': 'gauge', + 'Open_files': 'gauge', + 'Open_table_definitions': 'gauge', + 'Open_tables': 'gauge', + 'Opened_files': 'counter', + 'Opened_table_definitions': 'counter', + 'Opened_tables': 'counter', + 'Qcache_free_blocks': 'gauge', + 'Qcache_free_memory': 'gauge', + 'Qcache_hits': 'counter', + 'Qcache_inserts': 'counter', + 'Qcache_lowmem_prunes': 'counter', + 'Qcache_not_cached': 'counter', + 'Qcache_queries_in_cache': 'counter', + 'Qcache_total_blocks': 'counter', + 'Questions': 'counter', + 'Select_full_join': 'counter', + 'Select_full_range_join': 'counter', + 'Select_range': 'counter', + 'Select_range_check': 'counter', + 'Select_scan': 'counter', + 'Slave_open_temp_tables': 'gauge', + 'Slave_retried_transactions': 'counter', + 'Slow_launch_threads': 'counter', + 'Slow_queries': 'counter', + 'Sort_merge_passes': 'counter', + 'Sort_range': 'counter', + 'Sort_rows': 'counter', + 'Sort_scan': 'counter', + 'Table_locks_immediate': 'counter', + 'Table_locks_waited': 'counter', + 'Table_open_cache_hits': 'counter', + 'Table_open_cache_misses': 'counter', + 'Table_open_cache_overflows': 'counter', + 'Threadpool_idle_threads': 'gauge', + 'Threadpool_threads': 'gauge', + 'Threads_cached': 'gauge', + 'Threads_connected': 'gauge', + 'Threads_created': 'counter', + 'Threads_running': 'gauge', + 'Uptime': 'gauge', } MYSQL_VARS = [ - 'binlog_stmt_cache_size', - 'innodb_additional_mem_pool_size', - 'innodb_buffer_pool_size', - 'innodb_concurrency_tickets', - 'innodb_io_capacity', - 'innodb_log_buffer_size', - 'innodb_log_file_size', - 'innodb_open_files', - 'innodb_open_files', - 'join_buffer_size', - 'max_connections', - 'open_files_limit', - 'query_cache_limit', - 'query_cache_size', - 'query_cache_size', - 'read_buffer_size', - 'table_cache', - 'table_definition_cache', - 'table_open_cache', - 'thread_cache_size', - 'thread_cache_size', - 'thread_concurrency', - 'tmp_table_size', + 'binlog_stmt_cache_size', + 'innodb_additional_mem_pool_size', + 'innodb_buffer_pool_size', + 'innodb_concurrency_tickets', + 'innodb_io_capacity', + 'innodb_log_buffer_size', + 'innodb_log_file_size', + 'innodb_open_files', + 'innodb_open_files', + 'join_buffer_size', + 'max_connections', + 'open_files_limit', + 'query_cache_limit', + 'query_cache_size', + 'query_cache_size', + 'read_buffer_size', + 'table_cache', + 'table_definition_cache', + 'table_open_cache', + 'thread_cache_size', + 'thread_cache_size', + 'thread_concurrency', + 'tmp_table_size', ] MYSQL_PROCESS_STATES = { - 'closing_tables': 0, - 'copying_to_tmp_table': 0, - 'end': 0, - 'freeing_items': 0, - 'init': 0, - 'locked': 0, - 'login': 0, - 'none': 0, - 'other': 0, - 'preparing': 0, - 'reading_from_net': 0, - 'sending_data': 0, - 'sorting_result': 0, - 'statistics': 0, - 'updating': 0, - 'writing_to_net': 0, + 'closing_tables': 0, + 'copying_to_tmp_table': 0, + 'end': 0, + 'freeing_items': 0, + 'init': 0, + 'locked': 0, + 'login': 0, + 'none': 0, + 'other': 0, + 'preparing': 0, + 'reading_from_net': 0, + 'sending_data': 0, + 'sorting_result': 0, + 'statistics': 0, + 'updating': 0, + 'writing_to_net': 0, } MYSQL_INNODB_STATUS_VARS = { - 'active_transactions': 'gauge', - 'current_transactions': 'gauge', - 'file_reads': 'counter', - 'file_system_memory': 'gauge', - 'file_writes': 'counter', - 'innodb_lock_structs': 'gauge', - 'innodb_lock_wait_secs': 'gauge', - 'innodb_locked_tables': 'gauge', - 'innodb_sem_wait_time_ms': 'gauge', - 'innodb_sem_waits': 'gauge', - 'innodb_tables_in_use': 'gauge', - 'lock_system_memory': 'gauge', - 'locked_transactions': 'gauge', - 'log_writes': 'counter', - 'page_hash_memory': 'gauge', - 'pending_aio_log_ios': 'gauge', - 'pending_buf_pool_flushes': 'gauge', - 'pending_chkp_writes': 'gauge', - 'pending_ibuf_aio_reads': 'gauge', - 'pending_log_writes':'gauge', - 'queries_inside': 'gauge', - 'queries_queued': 'gauge', - 'read_views': 'gauge', + 'active_transactions': 'gauge', + 'current_transactions': 'gauge', + 'file_reads': 'counter', + 'file_system_memory': 'gauge', + 'file_writes': 'counter', + 'innodb_lock_structs': 'gauge', + 'innodb_lock_wait_secs': 'gauge', + 'innodb_locked_tables': 'gauge', + 'innodb_sem_wait_time_ms': 'gauge', + 'innodb_sem_waits': 'gauge', + 'innodb_tables_in_use': 'gauge', + 'lock_system_memory': 'gauge', + 'locked_transactions': 'gauge', + 'log_writes': 'counter', + 'page_hash_memory': 'gauge', + 'pending_aio_log_ios': 'gauge', + 'pending_buf_pool_flushes': 'gauge', + 'pending_chkp_writes': 'gauge', + 'pending_ibuf_aio_reads': 'gauge', + 'pending_log_writes': 'gauge', + 'queries_inside': 'gauge', + 'queries_queued': 'gauge', + 'read_views': 'gauge', } MYSQL_INNODB_STATUS_MATCHES = { - # 0 read views open inside InnoDB - 'read views open inside InnoDB': { - 'read_views': 0, - }, - # 5635328 OS file reads, 27018072 OS file writes, 20170883 OS fsyncs - ' OS file reads, ': { - 'file_reads': 0, - 'file_writes': 4, - }, - # ibuf aio reads: 0, log i/o's: 0, sync i/o's: 0 - 'ibuf aio reads': { - 'pending_ibuf_aio_reads': 3, - 'pending_aio_log_ios': 6, - 'pending_aio_sync_ios': 9, - }, - # Pending flushes (fsync) log: 0; buffer pool: 0 - 'Pending flushes (fsync)': { - 'pending_buf_pool_flushes': 7, - }, - # 16086708 log i/o's done, 106.07 log i/o's/second - " log i/o's done, ": { - 'log_writes': 0, - }, - # 0 pending log writes, 0 pending chkp writes - ' pending log writes, ': { - 'pending_log_writes': 0, - 'pending_chkp_writes': 4, - }, - # Page hash 2302856 (buffer pool 0 only) - 'Page hash ': { - 'page_hash_memory': 2, - }, - # File system 657820264 (812272 + 657007992) - 'File system ': { - 'file_system_memory': 2, - }, - # Lock system 143820296 (143819576 + 720) - 'Lock system ': { - 'lock_system_memory': 2, - }, - # 0 queries inside InnoDB, 0 queries in queue - 'queries inside InnoDB, ': { - 'queries_inside': 0, - 'queries_queued': 4, - }, - # --Thread 139954487744256 has waited at dict0dict.cc line 472 for 0.0000 seconds the semaphore: - 'seconds the semaphore': { - 'innodb_sem_waits': lambda row, stats: stats['innodb_sem_waits'] + 1, - 'innodb_sem_wait_time_ms': lambda row, stats: int(row[9]) * 1000, - }, - # mysql tables in use 1, locked 1 - 'mysql tables in use': { - 'innodb_tables_in_use': lambda row, stats: stats['innodb_tables_in_use'] + int(row[4]), - 'innodb_locked_tables': lambda row, stats: stats['innodb_locked_tables'] + int(row[6]), - }, - "------- TRX HAS BEEN": { - "innodb_lock_wait_secs": lambda row, stats: stats['innodb_lock_wait_secs'] + int(row[5]), - }, + # 0 read views open inside InnoDB + 'read views open inside InnoDB': { + 'read_views': 0, + }, + # 5635328 OS file reads, 27018072 OS file writes, 20170883 OS fsyncs + ' OS file reads, ': { + 'file_reads': 0, + 'file_writes': 4, + }, + # ibuf aio reads: 0, log i/o's: 0, sync i/o's: 0 + 'ibuf aio reads': { + 'pending_ibuf_aio_reads': 3, + 'pending_aio_log_ios': 6, + 'pending_aio_sync_ios': 9, + }, + # Pending flushes (fsync) log: 0; buffer pool: 0 + 'Pending flushes (fsync)': { + 'pending_buf_pool_flushes': 7, + }, + # 16086708 log i/o's done, 106.07 log i/o's/second + " log i/o's done, ": { + 'log_writes': 0, + }, + # 0 pending log writes, 0 pending chkp writes + ' pending log writes, ': { + 'pending_log_writes': 0, + 'pending_chkp_writes': 4, + }, + # Page hash 2302856 (buffer pool 0 only) + 'Page hash ': { + 'page_hash_memory': 2, + }, + # File system 657820264 (812272 + 657007992) + 'File system ': { + 'file_system_memory': 2, + }, + # Lock system 143820296 (143819576 + 720) + 'Lock system ': { + 'lock_system_memory': 2, + }, + # 0 queries inside InnoDB, 0 queries in queue + 'queries inside InnoDB, ': { + 'queries_inside': 0, + 'queries_queued': 4, + }, + # --Thread 139954487744256 has waited at dict0dict.cc line 472 + # for 0.0000 seconds the semaphore: + 'seconds the semaphore': { + 'innodb_sem_waits': lambda row, stats: stats['innodb_sem_waits'] + 1, + 'innodb_sem_wait_time_ms': lambda row, stats: int(row[9]) * 1000, + }, + # mysql tables in use 1, locked 1 + 'mysql tables in use': { + 'innodb_tables_in_use': lambda row, stats: + stats['innodb_tables_in_use'] + int(row[4]), + 'innodb_locked_tables': lambda row, stats: + stats['innodb_locked_tables'] + int(row[6]), + }, + "------- TRX HAS BEEN": { + "innodb_lock_wait_secs": lambda row, + stats: stats['innodb_lock_wait_secs'] + int(row[5]), + }, } + def get_mysql_conn(): - return MySQLdb.connect( - host=MYSQL_CONFIG['Host'], - port=MYSQL_CONFIG['Port'], - user=MYSQL_CONFIG['User'], - passwd=MYSQL_CONFIG['Password'] - ) + return MySQLdb.connect( + host=MYSQL_CONFIG['Host'], + port=MYSQL_CONFIG['Port'], + user=MYSQL_CONFIG['User'], + passwd=MYSQL_CONFIG['Password'] + ) + def mysql_query(conn, query): - cur = conn.cursor(MySQLdb.cursors.DictCursor) - cur.execute(query) - return cur + cur = conn.cursor(MySQLdb.cursors.DictCursor) + cur.execute(query) + return cur + def fetch_mysql_status(conn): - result = mysql_query(conn, 'SHOW GLOBAL STATUS') - status = {} - for row in result.fetchall(): - status[row['Variable_name']] = row['Value'] + result = mysql_query(conn, 'SHOW GLOBAL STATUS') + status = {} + for row in result.fetchall(): + status[row['Variable_name']] = row['Value'] + + # calculate the number of unpurged txns from existing variables + if 'Innodb_max_trx_id' in status: + status['Innodb_unpurged_txns'] = \ + int(status['Innodb_max_trx_id']) - \ + int(status['Innodb_purge_trx_id']) - # calculate the number of unpurged txns from existing variables - if 'Innodb_max_trx_id' in status: - status['Innodb_unpurged_txns'] = int(status['Innodb_max_trx_id']) - int(status['Innodb_purge_trx_id']) + if 'Innodb_lsn_last_checkpoint' in status: + status['Innodb_uncheckpointed_bytes'] = \ + int(status['Innodb_lsn_current']) - \ + int(status['Innodb_lsn_last_checkpoint']) - if 'Innodb_lsn_last_checkpoint' in status: - status['Innodb_uncheckpointed_bytes'] = int(status['Innodb_lsn_current'])- int(status['Innodb_lsn_last_checkpoint']) + if 'Innodb_lsn_flushed' in status: + status['Innodb_unflushed_log'] = \ + int(status['Innodb_lsn_current']) - \ + int(status['Innodb_lsn_flushed']) - if 'Innodb_lsn_flushed' in status: - status['Innodb_unflushed_log'] = int(status['Innodb_lsn_current']) - int(status['Innodb_lsn_flushed']) + return status - return status def fetch_mysql_master_stats(conn): - try: - result = mysql_query(conn, 'SHOW BINARY LOGS') - except MySQLdb.OperationalError: - return {} + try: + result = mysql_query(conn, 'SHOW BINARY LOGS') + except MySQLdb.OperationalError: + return {} - stats = { - 'binary_log_space': 0, - } + stats = { + 'binary_log_space': 0, + } - for row in result.fetchall(): - if 'File_size' in row and row['File_size'] > 0: - stats['binary_log_space'] += int(row['File_size']) + for row in result.fetchall(): + if 'File_size' in row and row['File_size'] > 0: + stats['binary_log_space'] += int(row['File_size']) + + return stats - return stats def fetch_mysql_slave_stats(conn): - result = mysql_query(conn, 'SHOW SLAVE STATUS') - slave_row = result.fetchone() - if slave_row is None: - return {} - - status = { - 'relay_log_space': slave_row['Relay_Log_Space'], - 'slave_lag': slave_row['Seconds_Behind_Master'] if slave_row['Seconds_Behind_Master'] != None else 0, - } - - if MYSQL_CONFIG['HeartbeatTable']: - query = """ - SELECT MAX(UNIX_TIMESTAMP() - UNIX_TIMESTAMP(ts)) AS delay - FROM %s - WHERE server_id = %s - """ % (MYSQL_CONFIG['HeartbeatTable'], slave_row['Master_Server_Id']) - result = mysql_query(conn, query) - row = result.fetchone() - if 'delay' in row and row['delay'] != None: - status['slave_lag'] = row['delay'] - - status['slave_running'] = 1 if slave_row['Slave_SQL_Running'] == 'Yes' else 0 - status['slave_stopped'] = 1 if slave_row['Slave_SQL_Running'] != 'Yes' else 0 - return status + result = mysql_query(conn, 'SHOW SLAVE STATUS') + slave_row = result.fetchone() + if slave_row is None: + return {} + if slave_row['Seconds_Behind_Master'] != None: + slave_lag = slave_row['Seconds_Behind_Master'] + else: + slave_lag = 0 + + status = { + 'relay_log_space': slave_row['Relay_Log_Space'], + 'slave_lag': slave_lag, + } + + if MYSQL_CONFIG['HeartbeatTable']: + query = """ + SELECT + MAX(UNIX_TIMESTAMP() - UNIX_TIMESTAMP(ts)) AS delay + FROM %s + WHERE server_id = %s + """ % (MYSQL_CONFIG['HeartbeatTable'], + slave_row['Master_Server_Id']) + result = mysql_query(conn, query) + row = result.fetchone() + if 'delay' in row and row['delay'] != None: + status['slave_lag'] = row['delay'] + + if slave_row['Slave_SQL_Running'] == 'Yes': + status['slave_running'] = 1 + else: + status['slave_running'] = 0 + + if slave_row['Slave_SQL_Running'] == 'Yes': + status['slave_stopped'] = 1 + else: + status['slave_stopped'] = 0 + + return status + def fetch_mysql_process_states(conn): - global MYSQL_PROCESS_STATES - result = mysql_query(conn, 'SHOW PROCESSLIST') - states = MYSQL_PROCESS_STATES.copy() - for row in result.fetchall(): - state = row['State'] - if state == '' or state == None: state = 'none' - state = re.sub(r'^(Table lock|Waiting for .*lock)$', "Locked", state) - state = state.lower().replace(" ", "_") - if state not in states: state = 'other' - states[state] += 1 - - return states + global MYSQL_PROCESS_STATES + result = mysql_query(conn, 'SHOW PROCESSLIST') + states = MYSQL_PROCESS_STATES.copy() + for row in result.fetchall(): + state = row['State'] + if state == '' or state is None: + state = 'none' + state = re.sub(r'^(Table lock|Waiting for .*lock)$', + "Locked", state) + state = state.lower().replace(" ", "_") + if state not in states: + state = 'other' + states[state] += 1 + + return states + def fetch_mysql_variables(conn): - global MYSQL_VARS - result = mysql_query(conn, 'SHOW GLOBAL VARIABLES') - variables = {} - for row in result.fetchall(): - if row['Variable_name'] in MYSQL_VARS: - variables[row['Variable_name']] = row['Value'] + global MYSQL_VARS + result = mysql_query(conn, 'SHOW GLOBAL VARIABLES') + variables = {} + for row in result.fetchall(): + if row['Variable_name'] in MYSQL_VARS: + variables[row['Variable_name']] = row['Value'] + + return variables - return variables def fetch_mysql_response_times(conn): - response_times = {} - try: - result = mysql_query(conn, """ - SELECT * - FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME - WHERE `time` != 'TOO LONG' - ORDER BY `time` - """) - except MySQLdb.OperationalError: - return {} - - for i in range(1, 14): - row = result.fetchone() - - # fill in missing rows with zeros - if not row: - row = { 'count': 0, 'total': 0 } - - response_times[i] = { - 'time': float(row['time']), - 'count': int(row['count']), - 'total': round(float(row['total']) * 1000000, 0), - } - - return response_times + response_times = {} + try: + result = mysql_query(conn, """ + SELECT * + FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME + WHERE `time` != 'TOO LONG' + ORDER BY `time` + """) + except MySQLdb.OperationalError: + return {} + + for i in range(1, 14): + row = result.fetchone() + + # fill in missing rows with zeros + if not row: + row = {'count': 0, 'total': 0} + + response_times[i] = { + 'time': float(row['time']), + 'count': int(row['count']), + 'total': round(float(row['total']) * 1000000, 0), + } + + return response_times + def fetch_innodb_stats(conn): - global MYSQL_INNODB_STATUS_MATCHES, MYSQL_INNODB_STATUS_VARS - result = mysql_query(conn, 'SHOW ENGINE INNODB STATUS') - row = result.fetchone() - status = row['Status'] - stats = dict.fromkeys(MYSQL_INNODB_STATUS_VARS.keys(), 0) - - for line in status.split("\n"): - line = line.strip() - row = re.split(r' +', re.sub(r'[,;] ', ' ', line)) - if line == '': continue - - # ---TRANSACTION 124324402462, not started - # ---TRANSACTION 124324402468, ACTIVE 0 sec committing - if line.find("---TRANSACTION") != -1: - stats['current_transactions'] += 1 - if line.find("ACTIVE") != -1: - stats['active_transactions'] += 1 - # LOCK WAIT 228 lock struct(s), heap size 46632, 65 row lock(s), undo log entries 1 - # 205 lock struct(s), heap size 30248, 37 row lock(s), undo log entries 1 - elif line.find("lock struct(s)") != -1: - if line.find("LOCK WAIT") != -1: - stats['innodb_lock_structs'] += int(row[2]) - stats['locked_transactions'] += 1 - else: - stats['innodb_lock_structs'] += int(row[0]) - else: - for match in MYSQL_INNODB_STATUS_MATCHES: - if line.find(match) == -1: continue - for key in MYSQL_INNODB_STATUS_MATCHES[match]: - value = MYSQL_INNODB_STATUS_MATCHES[match][key] - if type(value) is int: - stats[key] = int(row[value]) - else: - stats[key] = value(row, stats) - break - - return stats + global MYSQL_INNODB_STATUS_MATCHES, MYSQL_INNODB_STATUS_VARS + result = mysql_query(conn, 'SHOW ENGINE INNODB STATUS') + row = result.fetchone() + status = row['Status'] + stats = dict.fromkeys(MYSQL_INNODB_STATUS_VARS.keys(), 0) + + for line in status.split("\n"): + line = line.strip() + row = re.split(r' +', re.sub(r'[,;] ', ' ', line)) + if line == '': + continue + + # ---TRANSACTION 124324402462, not started + # ---TRANSACTION 124324402468, ACTIVE 0 sec committing + if line.find("---TRANSACTION") != -1: + stats['current_transactions'] += 1 + if line.find("ACTIVE") != -1: + stats['active_transactions'] += 1 + # LOCK WAIT 228 lock struct(s), heap size 46632, + # 65 row lock(s), undo log entries 1 + # 205 lock struct(s), heap size 30248, 37 row lock(s), + # undo log entries 1 + elif line.find("lock struct(s)") != -1: + if line.find("LOCK WAIT") != -1: + stats['innodb_lock_structs'] += int(row[2]) + stats['locked_transactions'] += 1 + else: + stats['innodb_lock_structs'] += int(row[0]) + else: + for match in MYSQL_INNODB_STATUS_MATCHES: + if line.find(match) == -1: + continue + for key in MYSQL_INNODB_STATUS_MATCHES[match]: + value = \ + MYSQL_INNODB_STATUS_MATCHES[match][key] + if type(value) is int: + stats[key] = int(row[value]) + else: + stats[key] = value(row, stats) + break + + return stats + # Check if PENFORMANCE_SCHEMA is enabled def is_ps_enabled(conn): - result = mysql_query(conn, 'SHOW GLOBAL VARIABLES LIKE "performance_schema"') - row = result.fetchone() - if row['Value'] == 'ON': - return True - else: - return False - + result = mysql_query(conn, + ''' + SHOW GLOBAL VARIABLES + LIKE "performance_schema" + ''') + row = result.fetchone() + if row['Value'] == 'ON': + return True + else: + return False + + def clean_string(digest): - clean_digest=str(digest) - clean_digest=re.sub(r'[^\x00-\x7F]+','_', clean_digest) - clean_digest=clean_digest.replace('`', '') - clean_digest=clean_digest.replace('\\', '') - clean_digest=clean_digest.replace('?', '') - clean_digest=clean_digest.replace(' ', '_') - clean_digest=clean_digest.replace(',', '_') - clean_digest=clean_digest.replace('(', '_') - clean_digest=clean_digest.replace(')', '_') - clean_digest=clean_digest.replace('.', '_') - clean_digest=re.sub(r'(__)', '',clean_digest) - clean_digest=re.sub('_$', '',clean_digest) - return clean_digest - -# http://www.markleith.co.uk/2011/04/18/monitoring-table-and-index-io-with-performance_schema/ -# http://www.markleith.co.uk/2012/07/04/mysql-performance-schema-statement-digests/ -# -# 1) A high level overview of the statements like Query Analysis, sorted by those queries with the highest latency + clean_digest = str(digest) + clean_digest = re.sub(r'[^\x00-\x7F]+', '_', clean_digest) + clean_digest = clean_digest.replace('`', '') + clean_digest = clean_digest.replace('\\', '') + clean_digest = clean_digest.replace('?', '') + clean_digest = clean_digest.replace(' ', '_') + clean_digest = clean_digest.replace(',', '_') + clean_digest = clean_digest.replace('(', '_') + clean_digest = clean_digest.replace(')', '_') + clean_digest = clean_digest.replace('.', '_') + clean_digest = re.sub(r'(__)', '', clean_digest) + clean_digest = re.sub('_$', '', clean_digest) + return clean_digest -# 2) List all normalized statements that use temporary tables ordered by number of on disk temporary tables descending first, then by the number of memory tables. -# 3) List all normalized statements that have done sorts, ordered by sort_merge_passes, sort_scans and sort_rows, all descending. -# 4) List all normalized statements that use have done a full table scan ordered by the percentage of times a full scan was done, then by the number of times the statement executed -# 5) List all normalized statements that have raised errors or warnings. # Connections per account def fetch_connections_per_account(conn): - queries = {} - try: - result = mysql_query(conn, """ - SELECT user,host,current_connections,total_connections - FROM performance_schema.accounts - LIMIT 10; - """) - for row in result.fetchall(): - # Clean the digest string - clean_digest=str(row['user'])+'_'+str(row['host']) - queries["current_connections_per_account_"+clean_digest] = row['current_connections'] - queries["total_connections_per_account_"+clean_digest] = row['total_connections'] - - except MySQLdb.OperationalError: - return {} - - return queries + queries = {} + try: + result = mysql_query(conn, + """ + SELECT + user,host,current_connections, + total_connections + FROM + performance_schema.accounts + LIMIT 10 + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest = str(row['user'])+'_'+str(row['host']) + queries["current_connections_per_account_"+clean_digest] = \ + row['current_connections'] + queries["total_connections_per_account_"+clean_digest] = \ + row['total_connections'] + + except MySQLdb.OperationalError: + return {} + + return queries # Connections per host def fetch_connections_per_host(conn): - queries = {} - try: - result = mysql_query(conn, """ - SELECT host,current_connections,total_connections - FROM performance_schema.hosts - LIMIT 10; - """) - for row in result.fetchall(): - # Clean the digest string - clean_digest=str(row['host']) - queries["current_connections_per_host_"+clean_digest] = row['current_connections'] - queries["total_connections_per_host_"+clean_digest] = row['total_connections'] - - except MySQLdb.OperationalError: - return {} - - return queries + queries = {} + try: + result = mysql_query(conn, """ + SELECT + host,current_connections, + total_connections + FROM performance_schema.hosts + LIMIT 10 + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest = str(row['host']) + queries["current_connections_per_host_"+clean_digest] = \ + row['current_connections'] + queries["total_connections_per_host_"+clean_digest] = \ + row['total_connections'] + + except MySQLdb.OperationalError: + return {} + + return queries + # Connections per user def fetch_connections_per_user(conn): - queries = {} - try: - result = mysql_query(conn, """ - SELECT user,current_connections,total_connections - FROM performance_schema.users - LIMIT 10; - """) - for row in result.fetchall(): - # Clean the digest string - clean_digest=str(row['user']) - queries["current_connections_per_user_"+clean_digest] = row['current_connections'] - queries["total_connections_per_user_"+clean_digest] = row['total_connections'] - - except MySQLdb.OperationalError: - return {} - - return queries + queries = {} + try: + result = mysql_query(conn, """ + SELECT + user,current_connections, + total_connections + FROM performance_schema.users + LIMIT 10 + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest = str(row['user']) + queries["current_connections_per_user_"+clean_digest] = \ + row['current_connections'] + queries["total_connections_per_user_"+clean_digest] = \ + row['total_connections'] + + except MySQLdb.OperationalError: + return {} + + return queries + # number of reads/changed per index def fetch_number_of_reads_per_index(conn): - queries = {} - try: - result = mysql_query(conn, """ - SELECT pst.object_schema AS table_schema, - pst.object_name AS table_name, - psi.object_name AS index_name, - pst.count_read AS rows_read, - pst.count_write AS rows_changed - FROM performance_schema.table_io_waits_summary_by_table AS pst - LEFT JOIN performance_schema.table_io_waits_summary_by_index_usage AS psi - ON pst.object_schema = psi.object_schema AND pst.object_name = psi.object_name - AND psi.index_name IS NOT NULL - WHERE pst.sum_timer_wait > 0 - GROUP BY pst.object_schema, pst.object_name - ORDER BY pst.sum_timer_wait DESC - LIMIT 10; - - """) - for row in result.fetchall(): - # Clean the digest string - clean_digest=clean_string(row['table_schema']+'_'+row['table_name']+'_'+row['index_name']) - queries["number_of_rows_reads_per_index_"+clean_digest] = row['rows_read'] - queries["number_of_rows_changed_per_index_"+clean_digest] = row['rows_changed'] - - except MySQLdb.OperationalError: - return {} - - return queries + queries = {} + try: + result = mysql_query(conn, """ + SELECT pst.object_schema AS table_schema, + pst.object_name AS table_name, + psi.object_name AS index_name, + pst.count_read AS rows_read, + pst.count_write AS rows_changed + FROM + performance_schema.table_io_waits_summary_by_table AS pst + LEFT JOIN + performance_schema.table_io_waits_summary_by_index_usage + AS psi + ON pst.object_schema = psi.object_schema AND + pst.object_name = psi.object_name + AND psi.index_name IS NOT NULL + WHERE pst.sum_timer_wait > 0 + GROUP BY pst.object_schema, pst.object_name + ORDER BY pst.sum_timer_wait DESC + LIMIT 10 + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest = clean_string('{}_{}_{}'. + format(row['table_schema'], + row['table_name'], + row['index_name'])) + queries["number_of_rows_reads_per_index_"+clean_digest] = \ + row['rows_read'] + queries["number_of_rows_changed_per_index_"+clean_digest] = \ + row['rows_changed'] + + except MySQLdb.OperationalError: + return {} + + return queries + # Indexes not being used def fetch_indexes_not_being_used(conn): - queries = {} - try: - result = mysql_query(conn, """ - SELECT object_schema, - object_name, - index_name - FROM performance_schema.table_io_waits_summary_by_index_usage - WHERE index_name IS NOT NULL - AND count_star = 0 - AND object_schema != 'mysql' - ORDER BY object_schema, object_name - LIMIT 10; - """) - for row in result.fetchall(): - # Clean the digest string - clean_digest=clean_string(row['object_schema']+'_'+row['object_name']+'_'+row['index_name']) - queries["index_not_being_used_"+clean_digest] = 1 - - except MySQLdb.OperationalError: - return {} - - return queries - + queries = {} + try: + result = mysql_query(conn, """ + SELECT object_schema, + object_name, + index_name + FROM + performance_schema.table_io_waits_summary_by_index_usage + WHERE index_name IS NOT NULL + AND count_star = 0 + AND object_schema != 'mysql' + ORDER BY object_schema, object_name + LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest = clean_string('{}_{}_{}'. + format(row['object_schema'], + row['object_name'], + row['index_name'])) + queries["index_not_being_used_"+clean_digest] = 1 + + except MySQLdb.OperationalError: + return {} + + return queries # Queries that raised errors def fetch_warning_error_queries(conn): - queries = {} - try: - # Get the slow queries - result = mysql_query(conn, """ - SELECT DIGEST_TEXT AS query, - COUNT_STAR AS exec_count, - SUM_ERRORS AS errors, - SUM_WARNINGS AS warnings - FROM performance_schema.events_statements_summary_by_digest - WHERE SUM_ERRORS > 0 - OR SUM_WARNINGS > 0 - ORDER BY SUM_ERRORS DESC, SUM_WARNINGS DESC - LIMIT 10; - """) - for row in result.fetchall(): - # Clean the digest string - clean_digest=clean_string(row['query']) - queries["exec_count_"+clean_digest] = row['exec_count'] - queries["errors_"+clean_digest] = row['errors'] - queries["warnings_"+clean_digest] = row['warnings'] - - except MySQLdb.OperationalError: - return {} - - return queries - -# Slow queries, response time, rows scanned, rows returned + queries = {} + try: + # Get the slow queries + result = mysql_query(conn, """ + SELECT + DIGEST_TEXT AS query, + COUNT_STAR AS exec_count, + SUM_ERRORS AS errors, + SUM_WARNINGS AS warnings + FROM + performance_schema.events_statements_summary_by_digest + WHERE SUM_ERRORS > 0 + OR SUM_WARNINGS > 0 + ORDER BY SUM_ERRORS DESC, SUM_WARNINGS DESC + LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest = clean_string(row['query']) + queries["exec_count_"+clean_digest] = row['exec_count'] + queries["errors_"+clean_digest] = row['errors'] + queries["warnings_"+clean_digest] = row['warnings'] + + except MySQLdb.OperationalError: + return {} + + return queries + + +# Slow queries, response time, rows scanned, rows returned def fetch_slow_queries(conn): - slow_queries = {} - try: - # Get the slow queries - result = mysql_query(conn, """ - SELECT DIGEST_TEXT AS query, - COUNT_STAR AS exec_count, - round(SUM_TIMER_WAIT/1000000000) AS exec_time_total_ms, - round(MAX_TIMER_WAIT/1000000000) AS exec_time_max_ms, - round(AVG_TIMER_WAIT/1000000000) AS exec_time_avg_ms, - SUM_ROWS_SENT AS rows_sent_sum, - ROUND(SUM_ROWS_SENT / COUNT_STAR) AS rows_sent_avg, - SUM_ROWS_EXAMINED AS rows_scanned - FROM performance_schema.events_statements_summary_by_digest - WHERE DIGEST_TEXT NOT LIKE 'null' - ORDER BY SUM_TIMER_WAIT DESC LIMIT 10; - """) - for row in result.fetchall(): - # Clean the digest string - clean_digest=clean_string(row['query']) - slow_queries["exec_count_"+clean_digest] = row['exec_count'] - slow_queries["exec_time_total_"+clean_digest] = row['exec_time_total_ms'] - slow_queries["exec_time_max_"+clean_digest] = row['exec_time_max_ms'] - slow_queries["exec_time_avg_ms_"+clean_digest] = row['exec_time_avg_ms'] - slow_queries["rows_sent_sum_"+clean_digest] = row['rows_sent_sum'] - slow_queries["rows_sent_avg_"+clean_digest] = row['rows_sent_avg'] - slow_queries["rows_scanned_"+clean_digest] = row['rows_scanned'] - - except MySQLdb.OperationalError: - return {} - - return slow_queries - + slow_queries = {} + try: + # Get the slow queries + result = mysql_query(conn, """ + SELECT DIGEST_TEXT AS query, + COUNT_STAR AS exec_count, + round(SUM_TIMER_WAIT/1000000000) AS exec_time_total_ms, + round(MAX_TIMER_WAIT/1000000000) AS exec_time_max_ms, + round(AVG_TIMER_WAIT/1000000000) AS exec_time_avg_ms, + SUM_ROWS_SENT AS rows_sent_sum, + ROUND(SUM_ROWS_SENT / COUNT_STAR) AS rows_sent_avg, + SUM_ROWS_EXAMINED AS rows_scanned + FROM + performance_schema.events_statements_summary_by_digest + WHERE DIGEST_TEXT NOT LIKE 'null' + ORDER BY SUM_TIMER_WAIT DESC LIMIT 10; + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest = clean_string(row['query']) + slow_queries["exec_count_"+clean_digest] = \ + row['exec_count'] + slow_queries["exec_time_total_"+clean_digest] = \ + row['exec_time_total_ms'] + slow_queries["exec_time_max_"+clean_digest] = \ + row['exec_time_max_ms'] + slow_queries["exec_time_avg_ms_"+clean_digest] = \ + row['exec_time_avg_ms'] + slow_queries["rows_sent_sum_"+clean_digest] = \ + row['rows_sent_sum'] + slow_queries["rows_sent_avg_"+clean_digest] = \ + row['rows_sent_avg'] + slow_queries["rows_scanned_"+clean_digest] = \ + row['rows_scanned'] + + except MySQLdb.OperationalError: + return {} + + return slow_queries + + +# Slow queries excluding table names, +# response time, rows scanned, rows returned +def fetch_slow_queries_excluding_table_names(conn): + slow_queries = {} + try: + # Get the slow queries + result = mysql_query(conn, """ + SELECT + query, + sum(exec_count) AS exec_count, + sum(exec_time_total_ms) as exec_time_total_ms, + sum(exec_time_max_ms) as exec_time_max_ms, + sum(exec_time_avg_ms) as exec_time_avg_ms, + sum(rows_sent_sum) as rows_sent_sum, + sum(rows_sent_avg) as rows_sent_avg, + sum(rows_scanned) as rows_scanned + FROM + (SELECT + if(DIGEST_TEXT REGEXP '^SELECT.*WHERE.*', + concat(substring_index(DIGEST_TEXT,'FROM',1), + ' FROM WHERE ', + substring_index(DIGEST_TEXT, + 'WHERE',-1)), + if(DIGEST_TEXT REGEXP '^SELECT.*', + concat(substring_index(DIGEST_TEXT,'FROM',1), + ' FROM
'), + if(DIGEST_TEXT REGEXP '^INSERT.*', + concat(substring_index(DIGEST_TEXT,'INTO',1), + ' INTO
', + substring_index(DIGEST_TEXT, + '\` \( \`',-1)), + if(DIGEST_TEXT REGEXP '^UPDATE.*', + concat(' UPDATE
', + substring_index(DIGEST_TEXT,'SET',-1)), + DIGEST_TEXT + ) + ) + ) + ) as query, + COUNT_STAR AS exec_count, + round(SUM_TIMER_WAIT/1000000000) + AS exec_time_total_ms, + round(MAX_TIMER_WAIT/1000000000) + AS exec_time_max_ms, + round(AVG_TIMER_WAIT/1000000000) + AS exec_time_avg_ms, + SUM_ROWS_SENT AS rows_sent_sum, + ROUND(SUM_ROWS_SENT / COUNT_STAR) + AS rows_sent_avg, + SUM_ROWS_EXAMINED AS rows_scanned + FROM + performance_schema.events_statements_summary_by_digest + WHERE DIGEST_TEXT NOT LIKE 'null' + ORDER BY SUM_TIMER_WAIT DESC LIMIT 100 + ) as a + group by query + """) + for row in result.fetchall(): + # Clean the digest string + clean_digest = clean_string(row['query']) + slow_queries["exec_count_"+clean_digest] = \ + row['exec_count'] + slow_queries["exec_time_total_"+clean_digest] = \ + row['exec_time_total_ms'] + slow_queries["exec_time_max_"+clean_digest] = \ + row['exec_time_max_ms'] + slow_queries["exec_time_avg_ms_"+clean_digest] = \ + row['exec_time_avg_ms'] + slow_queries["rows_sent_sum_"+clean_digest] = \ + row['rows_sent_sum'] + slow_queries["rows_sent_avg_"+clean_digest] = \ + row['rows_sent_avg'] + slow_queries["rows_scanned_"+clean_digest] = \ + row['rows_scanned'] + + except MySQLdb.OperationalError: + return {} + + return slow_queries def log_verbose(msg): - if MYSQL_CONFIG['Verbose'] == False: - return - collectd.info('mysql plugin: %s' % msg) + if MYSQL_CONFIG['Verbose'] == False: + return + collectd.info('mysql plugin: %s' % msg) + def dispatch_value(prefix, key, value, type, type_instance=None): - if not type_instance: - type_instance = key + if not type_instance: + type_instance = key + + log_verbose('Sending value: %s/%s=%s' % (prefix, type_instance, value)) + if not value: + return + value = int(value) # safety check - log_verbose('Sending value: %s/%s=%s' % (prefix, type_instance, value)) - if not value: - return - value = int(value) # safety check + val = collectd.Values(plugin='mysql', plugin_instance=prefix) + val.type = type + val.type_instance = type_instance + val.values = [value] + val.dispatch() - val = collectd.Values(plugin='mysql', plugin_instance=prefix) - val.type = type - val.type_instance = type_instance - val.values = [value] - val.dispatch() def configure_callback(conf): - global MYSQL_CONFIG - for node in conf.children: - if node.key in MYSQL_CONFIG: - MYSQL_CONFIG[node.key] = node.values[0] + global MYSQL_CONFIG + for node in conf.children: + if node.key in MYSQL_CONFIG: + MYSQL_CONFIG[node.key] = node.values[0] + + MYSQL_CONFIG['Port'] = int(MYSQL_CONFIG['Port']) + MYSQL_CONFIG['Verbose'] = bool(MYSQL_CONFIG['Verbose']) - MYSQL_CONFIG['Port'] = int(MYSQL_CONFIG['Port']) - MYSQL_CONFIG['Verbose'] = bool(MYSQL_CONFIG['Verbose']) def read_callback(): - global MYSQL_STATUS_VARS - conn = get_mysql_conn() - - mysql_status = fetch_mysql_status(conn) - for key in mysql_status: - if mysql_status[key] == '': mysql_status[key] = 0 - - # collect anything beginning with Com_/Handler_ as these change - # regularly between mysql versions and this is easier than a fixed - # list - if key.split('_', 2)[0] in ['Com', 'Handler']: - ds_type = 'counter' - elif key in MYSQL_STATUS_VARS: - ds_type = MYSQL_STATUS_VARS[key] - else: - continue - - dispatch_value('status', key, mysql_status[key], ds_type) - - mysql_variables = fetch_mysql_variables(conn) - for key in mysql_variables: - dispatch_value('variables', key, mysql_variables[key], 'gauge') - - mysql_master_status = fetch_mysql_master_stats(conn) - for key in mysql_master_status: - dispatch_value('master', key, mysql_master_status[key], 'gauge') - - mysql_states = fetch_mysql_process_states(conn) - for key in mysql_states: - dispatch_value('state', key, mysql_states[key], 'gauge') - - slave_status = fetch_mysql_slave_stats(conn) - for key in slave_status: - dispatch_value('slave', key, slave_status[key], 'gauge') - - response_times = fetch_mysql_response_times(conn) - for key in response_times: - dispatch_value('response_time_total', str(key), response_times[key]['total'], 'counter') - dispatch_value('response_time_count', str(key), response_times[key]['count'], 'counter') - - innodb_status = fetch_innodb_stats(conn) - for key in MYSQL_INNODB_STATUS_VARS: - if key not in innodb_status: continue - dispatch_value('innodb', key, innodb_status[key], MYSQL_INNODB_STATUS_VARS[key]) - - # Performance_Schema metrics - if is_ps_enabled(conn): - slow_queries = fetch_slow_queries(conn) - for key in slow_queries: - dispatch_value('slow_query', key, slow_queries[key], 'gauge') - - queries = fetch_warning_error_queries(conn) - for key in queries: - dispatch_value('warn_err_query', key, queries[key], 'gauge') - - queries = fetch_indexes_not_being_used(conn) - for key in queries: - dispatch_value('indexes_not_being_used', key, queries[key], 'gauge') - - queries = fetch_number_of_reads_per_index(conn) - for key in queries: - dispatch_value('number_of_reads_per_index', key, queries[key], 'gauge') - - queries=fetch_connections_per_user(conn) - for key in queries: - dispatch_value('connections_per_user', key, queries[key], 'gauge') - - queries=fetch_connections_per_host(conn) - for key in queries: - dispatch_value('connections_per_host', key, queries[key], 'gauge') - - queries=fetch_connections_per_account(conn) - for key in queries: - dispatch_value('connections_per_account', key, queries[key], 'gauge') + global MYSQL_STATUS_VARS + conn = get_mysql_conn() + + mysql_status = fetch_mysql_status(conn) + for key in mysql_status: + if mysql_status[key] == '': mysql_status[key] = 0 + + # collect anything beginning with Com_/Handler_ as these change + # regularly between mysql versions and this is easier than a fixed + # list + if key.split('_', 2)[0] in ['Com', 'Handler']: + ds_type = 'counter' + elif key in MYSQL_STATUS_VARS: + ds_type = MYSQL_STATUS_VARS[key] + else: + continue + + dispatch_value('status', key, mysql_status[key], ds_type) + + mysql_variables = fetch_mysql_variables(conn) + for key in mysql_variables: + dispatch_value('variables', key, mysql_variables[key], 'gauge') + + mysql_master_status = fetch_mysql_master_stats(conn) + for key in mysql_master_status: + dispatch_value('master', key, mysql_master_status[key], 'gauge') + + mysql_states = fetch_mysql_process_states(conn) + for key in mysql_states: + dispatch_value('state', key, mysql_states[key], 'gauge') + + slave_status = fetch_mysql_slave_stats(conn) + for key in slave_status: + dispatch_value('slave', key, slave_status[key], 'gauge') + + response_times = fetch_mysql_response_times(conn) + for key in response_times: + dispatch_value('response_time_total', str(key), response_times[key]['total'], 'counter') + dispatch_value('response_time_count', str(key), response_times[key]['count'], 'counter') + + innodb_status = fetch_innodb_stats(conn) + for key in MYSQL_INNODB_STATUS_VARS: + if key not in innodb_status: continue + dispatch_value('innodb', key, innodb_status[key], MYSQL_INNODB_STATUS_VARS[key]) + + # Performance_Schema metrics + if is_ps_enabled(conn): + queries = fetch_slow_queries(conn) + for key in queries: + dispatch_value('slow_query', key, queries[key], 'gauge') + + queries = fetch_slow_queries_excluding_table_names(conn) + for key in queries: + dispatch_value('slow_query_excluding_table_names', key, queries[key], 'gauge') + + queries = fetch_warning_error_queries(conn) + for key in queries: + dispatch_value('warn_err_query', key, queries[key], 'gauge') + + queries = fetch_indexes_not_being_used(conn) + for key in queries: + dispatch_value('indexes_not_being_used', key, queries[key], 'gauge') + +# queries = fetch_number_of_reads_per_index(conn) +# for key in queries: +# dispatch_value('number_of_reads_per_index', key, queries[key], 'gauge') + + queries=fetch_connections_per_user(conn) + for key in queries: + dispatch_value('connections_per_user', key, queries[key], 'gauge') + + queries=fetch_connections_per_host(conn) + for key in queries: + dispatch_value('connections_per_host', key, queries[key], 'gauge') + + queries=fetch_connections_per_account(conn) + for key in queries: + dispatch_value('connections_per_account', key, queries[key], 'gauge') collectd.register_read(read_callback) From 6506b843d6d4a505d2dd2d971e390ffa9e513584 Mon Sep 17 00:00:00 2001 From: isart Date: Mon, 7 Dec 2015 11:19:34 +0100 Subject: [PATCH 23/28] turn off metrics to get unused indexes until PS is tunned --- mysql.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mysql.py b/mysql.py index 928c7d7..9a707b9 100644 --- a/mysql.py +++ b/mysql.py @@ -913,9 +913,9 @@ def read_callback(): for key in queries: dispatch_value('warn_err_query', key, queries[key], 'gauge') - queries = fetch_indexes_not_being_used(conn) - for key in queries: - dispatch_value('indexes_not_being_used', key, queries[key], 'gauge') +# queries = fetch_indexes_not_being_used(conn) +# for key in queries: +# dispatch_value('indexes_not_being_used', key, queries[key], 'gauge') # queries = fetch_number_of_reads_per_index(conn) # for key in queries: From a3ad466d508b41d970ecc2d74128e9d418b1838f Mon Sep 17 00:00:00 2001 From: isart Date: Mon, 7 Dec 2015 11:52:59 +0100 Subject: [PATCH 24/28] updaing readme --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 0f2b476..c949711 100644 --- a/README.md +++ b/README.md @@ -332,6 +332,7 @@ If Performance_schema is enabled you will get the following metrics as well. Indexes not being used (didn't get any read) - schema, table, index_name Queries that raised errors/warnings - Query, number of executions, errors, warnings Slow queries - Query, number of executions, execution time (total,max,avg), rows sent (total, avg), scanned rows + Added slow queries excluding table names. Very useful when you have different table with same 'schema'. ## License MIT (http://www.opensource.org/licenses/mit-license.php) From 82f016dea84b061b4d2fd9e14777b42109b02d91 Mon Sep 17 00:00:00 2001 From: isart Date: Fri, 11 Dec 2015 17:34:47 +0100 Subject: [PATCH 25/28] slow queries must be counter instead of gauge --- mysql.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/mysql.py b/mysql.py index 9a707b9..84f283c 100644 --- a/mysql.py +++ b/mysql.py @@ -514,7 +514,8 @@ def clean_string(digest): clean_digest = clean_digest.replace('(', '_') clean_digest = clean_digest.replace(')', '_') clean_digest = clean_digest.replace('.', '_') - clean_digest = re.sub(r'(__)', '', clean_digest) + clean_digest = clean_digest.replace('=', '_') + clean_digest = re.sub(r'(_)+', '_', clean_digest) clean_digest = re.sub('_$', '', clean_digest) return clean_digest @@ -760,19 +761,19 @@ def fetch_slow_queries_excluding_table_names(conn): (SELECT if(DIGEST_TEXT REGEXP '^SELECT.*WHERE.*', concat(substring_index(DIGEST_TEXT,'FROM',1), - ' FROM
WHERE ', + ' FROM TABLE WHERE ', substring_index(DIGEST_TEXT, 'WHERE',-1)), if(DIGEST_TEXT REGEXP '^SELECT.*', concat(substring_index(DIGEST_TEXT,'FROM',1), - ' FROM
'), + ' FROM TABLE '), if(DIGEST_TEXT REGEXP '^INSERT.*', concat(substring_index(DIGEST_TEXT,'INTO',1), - ' INTO
', + ' INTO TABLE ', substring_index(DIGEST_TEXT, '\` \( \`',-1)), if(DIGEST_TEXT REGEXP '^UPDATE.*', - concat(' UPDATE
', + concat(' UPDATE TABLE ', substring_index(DIGEST_TEXT,'SET',-1)), DIGEST_TEXT ) @@ -903,15 +904,15 @@ def read_callback(): if is_ps_enabled(conn): queries = fetch_slow_queries(conn) for key in queries: - dispatch_value('slow_query', key, queries[key], 'gauge') + dispatch_value('slow_query', key, queries[key], 'counter') queries = fetch_slow_queries_excluding_table_names(conn) for key in queries: - dispatch_value('slow_query_excluding_table_names', key, queries[key], 'gauge') + dispatch_value('slow_query_excluding_table_names', key, queries[key], 'counter') queries = fetch_warning_error_queries(conn) for key in queries: - dispatch_value('warn_err_query', key, queries[key], 'gauge') + dispatch_value('warn_err_query', key, queries[key], 'counter') # queries = fetch_indexes_not_being_used(conn) # for key in queries: From 2389c3dc6ab3d4a19d869a9c4030c7d99fc973bc Mon Sep 17 00:00:00 2001 From: isart Date: Mon, 14 Dec 2015 12:57:07 +0100 Subject: [PATCH 26/28] adding crc32 checkum on queries to avoid duplciates --- mysql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql.py b/mysql.py index 84f283c..b21d807 100644 --- a/mysql.py +++ b/mysql.py @@ -908,7 +908,7 @@ def read_callback(): queries = fetch_slow_queries_excluding_table_names(conn) for key in queries: - dispatch_value('slow_query_excluding_table_names', key, queries[key], 'counter') + dispatch_value('slow_query_excluding_table_names', "{}-{}".format(binascii.crc32(key) % 100, key) , queries[key], 'counter') queries = fetch_warning_error_queries(conn) for key in queries: From c3580fbec7670068feedff9479d195570ecaabf3 Mon Sep 17 00:00:00 2001 From: isart Date: Thu, 14 Jan 2016 16:18:03 +0100 Subject: [PATCH 27/28] fixing tab --- mysql.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mysql.py b/mysql.py index b21d807..063fd3a 100644 --- a/mysql.py +++ b/mysql.py @@ -26,6 +26,7 @@ import collectd import re import MySQLdb +import binascii MYSQL_CONFIG = { 'Host': 'localhost', @@ -326,7 +327,7 @@ def fetch_mysql_status(conn): int(status['Innodb_lsn_current']) - \ int(status['Innodb_lsn_flushed']) - return status + return status def fetch_mysql_master_stats(conn): From c252a5be33a126888bf2e091a881d5cbe13e6e97 Mon Sep 17 00:00:00 2001 From: Isart Montane Mogas Date: Tue, 19 Jul 2016 10:54:32 +0200 Subject: [PATCH 28/28] Check if information_schema.query_response_time exists. Avoid failures on type conversion --- mysql.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/mysql.py b/mysql.py index 063fd3a..e2ee48c 100644 --- a/mysql.py +++ b/mysql.py @@ -312,17 +312,17 @@ def fetch_mysql_status(conn): status[row['Variable_name']] = row['Value'] # calculate the number of unpurged txns from existing variables - if 'Innodb_max_trx_id' in status: + if 'Innodb_max_trx_id' in status and 'Innodb_purge_trx_id' in status: status['Innodb_unpurged_txns'] = \ int(status['Innodb_max_trx_id']) - \ int(status['Innodb_purge_trx_id']) - if 'Innodb_lsn_last_checkpoint' in status: + if 'Innodb_lsn_last_checkpoint' in status and 'Innodb_lsn_current' in status: status['Innodb_uncheckpointed_bytes'] = \ int(status['Innodb_lsn_current']) - \ int(status['Innodb_lsn_last_checkpoint']) - if 'Innodb_lsn_flushed' in status: + if 'Innodb_lsn_flushed' in status and 'Innodb_lsn_current' in status: status['Innodb_unflushed_log'] = \ int(status['Innodb_lsn_current']) - \ int(status['Innodb_lsn_flushed']) @@ -479,13 +479,17 @@ def fetch_innodb_stats(conn): if line.find(match) == -1: continue for key in MYSQL_INNODB_STATUS_MATCHES[match]: - value = \ - MYSQL_INNODB_STATUS_MATCHES[match][key] - if type(value) is int: - stats[key] = int(row[value]) - else: - stats[key] = value(row, stats) - break + try: + value = \ + MYSQL_INNODB_STATUS_MATCHES[match][key] + if type(value) is int: + stats[key] = int(row[value]) + else: + stats[key] = value(row, stats) + break + except Exception, e: + log_verbose(Exception) + log_verbose(e) return stats @@ -892,9 +896,10 @@ def read_callback(): dispatch_value('slave', key, slave_status[key], 'gauge') response_times = fetch_mysql_response_times(conn) - for key in response_times: - dispatch_value('response_time_total', str(key), response_times[key]['total'], 'counter') - dispatch_value('response_time_count', str(key), response_times[key]['count'], 'counter') + if response_times != {}: + for key in response_times: + dispatch_value('response_time_total', str(key), response_times[key]['total'], 'counter') + dispatch_value('response_time_count', str(key), response_times[key]['count'], 'counter') innodb_status = fetch_innodb_stats(conn) for key in MYSQL_INNODB_STATUS_VARS: