from pyspark.sql import SparkSession
from pyspark.sql.functions import col, coalesce, trim, when, lit, sum
from pyspark.sql.types import StringType, NumericType
spark = SparkSession.builder \.appName("Hive Data Quality Analysis") \.enableHiveSupport() \.getOrCreate()
database_name = "your_database"
result_list = []
tables = spark.catalog.listTables(database_name)for table in tables:table_name = table.namefull_table_name = f"{database_name}.{table_name}"try:df = spark.table(full_table_name)df.cache() total_count = df.count()if total_count == 0:continuefor field in df.schema.fields:column_name = field.namecolumn_type = str(field.dataType)if isinstance(field.dataType, StringType):modified_col = trim(coalesce(col(column_name), lit("")))condition = (modified_col == lit(""))count_expr = sum(when(condition, 1).otherwise(0)).alias("cnt")elif isinstance(field.dataType, NumericType):modified_col = coalesce(col(column_name), lit(0))condition = (modified_col == lit(0))count_expr = sum(when(condition, 1).otherwise(0)).alias("cnt")else:condition = col(column_name).isNull()count_expr = sum(when(condition, 1).otherwise(0)).alias("cnt")stats = df.agg(count_expr).collect()[0]["cnt"]percentage = round((stats / total_count) * 100, 2) if total_count > 0 else 0.0result_list.append((database_name,table_name,column_name,column_type,stats,total_count,float(percentage)))df.unpersist() except Exception as e:print(f"Error processing table {table_name}: {str(e)}")continue
result_columns = ["database_name","table_name","column_name","column_type","stat_count","total_rows","percentage"
]result_df = spark.createDataFrame(result_list, result_columns)
result_df.show(truncate=False)
spark.stop()
代码说明:
- 初始化配置:创建SparkSession并启用Hive支持
- 元数据获取:通过Spark Catalog获取指定数据库的所有表
- 数据缓存:对每个表进行缓存以提高后续多次操作的性能
- 字段类型判断:
- 字符串类型:NULL转空字符串并去除空格
- 数值类型:NULL转0
- 其他类型:直接统计NULL值
- 统计计算:使用PySpark的表达式进行条件统计
- 结果收集:将统计结果组织成结构化数据格式
- 结果输出:将最终结果以表格形式展示
注意事项:
- 需要替换代码中的
your_database
为实际数据库名称 - 该代码会处理数据库中所有表,如需指定特定表,可修改
tables
的获取逻辑 - 结果展示方式可根据需要修改为写入Hive表或文件系统
- 处理大型表时建议增加分区处理逻辑以提高性能
- 需要确保Spark集群有足够内存来处理目标表的数据量
输出示例:
+-------------+-----------+-----------+-----------+---------+----------+----------+
|database_name|table_name |column_name|column_type|stat_count|total_rows|percentage|
+-------------+-----------+-----------+-----------+---------+----------+----------+
|your_database|customers |name |StringType |125 |10000 |1.25 |
|your_database|customers |age |IntegerType|324 |10000 |3.24 |
|your_database|orders |order_date |DateType |56 |5000 |1.12 |
+-------------+-----------+-----------+-----------+---------+----------+----------+