前言

查询的分区情况

程序

  • Jupyter
# 导入信息
from pyspark.sql import SparkSession, Row
from pyspark import SQLContext
from pyspark.sql.functions import udf, col, explode, collect_set, get_json_object, concat_ws,  split
from pyspark.sql.types import StringType, IntegerType, StructType, StructField, ArrayType, MapType

# from offline_verification_func import *
spark = SparkSession \
        .builder.master("local[50]") \
        .config("spark.executor.memory", "10g")\
        .config("spark.driver.memory", "20g")\
        .config("spark.driver.maxResultSize","4g")\
        .appName("test") \
        .enableHiveSupport() \
        .getOrCreate()
# 查询语句
spark.sql("""
show partitions  表名
""").show()
  • Hive中
# 显示表分区:
hive> show partitions table_name;
  • 数据库中
show partitions table_name;
Logo

为开发者提供学习成长、分享交流、生态实践、资源工具等服务,帮助开发者快速成长。

更多推荐