HIVE语句

DDL(data definition)

CREATE、ALTER、DROP……

  • 创建表
CREATE [EXTERNAL] TABLE [IF NOT EXISTS] table_name 
   [(col_name data_type [COMMENT col_comment], ...)] 
   [COMMENT table_comment] 
   [PARTITIONED BY (col_name data_type [COMMENT col_comment], ...)] 
   [CLUSTERED BY (col_name, col_name, ...) 
   [SORTED BY (col_name [ASC|DESC], ...)] INTO num_buckets BUCKETS] 
   [ROW FORMAT row_format] 
   [STORED AS file_format] 
   [LOCATION hdfs_path]
  • 修改表
ALTER TABLE table_name ADD [IF NOT EXISTS] partition_spec [ LOCATION 'location1' ] partition_spec [ LOCATION 'location2' ] ...
partition_spec:
: PARTITION (partition_col = partition_col_value, partition_col = partiton_col_value, ...)
ALTER TABLE table_name DROP partition_spec, partition_spec,...

  • 显示命令
show tables
show databases
show partitions
show functions
desc extended t_name;
desc formatted table_name;

操作:

内部表
create table if not exists mytable(sid int,sname string)
row format delimited fields terminated by ',' stored as textfile;
外部表
create external table t_sz_ext(id int,name string)
row format delimited fields terminated by '\t'
stored as textfile
location '/class03';
分区表
create table t_sz_part(id int,name string)
partitioned by (country string)##country生成一个伪字段
row format delimited
fields terminated by ‘,’;
增加修改分区:
alter table t_sz_part add partition (country=’america’);
alter table t_sz_part drop partition (country=’america’);
show partitions t_sz_part;

分桶表
create table t_buck(id string,name string)
clustered by(id)
sorted by (id)
into 4 buckets
row format delimited fields terminated by ',';

desc extended t_buck;

create table t_p(id string,name string)
row format delimited fields terminated by ',';
load data local inpath '/home/hadoop/t.dat' into table t_p;
#指定开始分桶
set hive.enforce.bucketing = true;
#set reduce 数量和分桶数一致
set mapreduce.job.reduces=4;
insert into table t_buck
select id,name from t_p cluster by (id);

truncate清空数据,drop删除表

内部表外部表的区别:外部表drop的时候,把元数据中的记录删掉了,表不存在了,但是数据还在。内部表drop时,元数据信息和表目录整个都会被删掉。内部表用于内部etl产生的表。外部表用于对接外部业务系统数据目录。
分区表作用:在做统计的时候少统计一些数据,把数据放入多个文件夹,统计的时候可以指定一个分区,统计的范围变小,加快运行速度。
分桶表 哈希散列
表或分区组织为桶,针对某一列对桶进行组织,hive采用对列值hash,然后除以桶的个数求余的方式决定该条记录存放在哪个桶中(和mapreduce中的get partition一样)。
作用:最大的作用就是用来提高join操作的效率。(mapside join)
对于JOIN操作两个表有一个相同的列,如果对这两个表都进行了桶操作。那么将保存相同列值的桶进行JOIN操作就可以,可以大大较少JOIN的数据量。
select a.id,a.name,b.addr from a join b on a.id=b.id
若果a,b已经是分桶表且分桶字段是id字段(桶数一样或者成倍数关系),做上面join操作不需要全表做笛卡尔积。

DML(data manipulation)

LOAD、SELECT、UPDATE、INSERT、DELETE……
hive不会做数据检查,上传的数据它不能解释就不解释。select * 看不到的数据,hdfs上还在。
hive里面也可以敲hdfs命令,如dfs –ls /;

  • 关于order by,sort by,distribute by,cluster by
    order by会对输入做全局排序,因此只要一个reducer,当输入规模较大时需要较长的计算时间。
    set hive.mapred.mode=strict,order by 语句必须跟着limit,但是nonstrict下就不是必须的
    sort by不是全局排序,是数据进入reducer之前完成排序。mapred.reduce.task>1。sort by只保证每个reducer输出有序,不保证全局有序。
    distribute by(字段)根据指定字段将数据分到不同的reducer,且分发算法是hash散列。
    如果分桶和sort字段是同一个时,此时,cluster by(字段) = distribute by(id) +sort by(id)

  • 关于left,right,full outer join,inner join,left semi join

数据:
1,a
2,b
3,c
4,d
7,y
8,u

2,bb
3,cc
7,yy
9,pp

create table a(id int,name string)
row format delimited fields terminated by ',';

create table b(id int,name string)
row format delimited fields terminated by ',';

load data local inpath '/home/hadoop/a.txt' into table a;
load data local inpath '/home/hadoop/b.txt' into table b;

join
select * from a inner join b on a.id=b.id;
交集

select * from a left join b on a.id=b.id;
左连接

select * from a right join b on a.id=b.id;
右连接

select * from a full outer join b on a.id=b.id;
全连接

select * from a left semi join b on a.id=b.id;   exist子查询操作
也就是inner join 取左边一半

如下sql语句:
  SELECT a.key, a.value
  FROM a
  WHERE a.key exist in
   (SELECT b.key
    FROM B); //hive不支持exist in 加子查询
hive实现:
select a.key,a.value
from a left semi join b on (a.key=b.key)  //left semi join 是exist in 在hive中的高效实现,效率比inner join 高

hive中实现不等式join很难,比如:select a.* from a join b on (a.id>b.id)
  • 关于保存查询结果的三种方法
create table t_tmp
as
select * from t_p;  

insert into(overwrite) table t_tmp
select * from t_p;

insert overwrite local directory '/xx/xx'  //可以是本地或者 hdfs(去掉local)
select * from t_p;
(mapreduce出来是先保存在hdfs然后拷贝到本地)
  • 一些操作示例
load data local inpath ‘/home/hadoop/t.dat’ into table t _sz_part partition(country=’China’);
country生成一个伪字段。

select count(1) from t_sz_part where country=’China’;
select count(1) from t_sz_part grouop by (name=’aaa’) ;(group by 后面不能加where)
select count(1) from t_sz_part where country=’China’ grouop by (name=’aaa’) ;
select Sname from student s where s.Sdept is not null;
select count(*) from student;
select avg(s.Grade) from sc s where s.Cno = 1;

group by
select Cno,avg(Grade) from sc group by Cno;
select Sno from (select Sno,Count(Cno) CountCno from sc group by Sno)a where a.CountCno>3;
select Sno from sc group by Sno having count(Cno)>3;

sort by,order by,distribute by,cluster by
insert overwrite local directory '/home/hadoop/out'
select * from student distributed by Sex sorted by Sage;
select student.*,sc.* from student join sc on (student.Sno =sc.Sno);
set mapred.reduce.tasks=2;
insert overwrite local directory '/home/hadoop/out'
select * from student distributed by Sex sorted by Sage;

join
select student.Sname,course.Cname,sc.Grade from student join sc on student.Sno=sc.Sno join course on sc.cno=course.cno;
select student.Sname,sc.Cno from student join sc on student.Sno=sc.Sno 
where sc.Cno=2 and sc.Grade>90;
select s1.Sname from student s1 left semi join student s2 on s1.Sdept=s2.Sdept and s2.Sname='刘晨';

insert into table t_buck
select id,name from t_p cluster by (id);
  • hive cli(command line interface)命令
SHOW FUNCTIONS;
显示当前回话有多少函数可用
DESC FUNCTIONS concat;
显示函数的描述信息
DESC FUNCTION EXTENDED concat;
显示函数的扩展描述信息
  • hive函数
    测试hive函数作用的小方法:
create table dual(id string);//oracle中也有这个伪表
load data local inpath '/home/hadoop/t.dat(一行一个空格)' into table dual;
select concat('a','b')from dual;
select substring('asdf',0,2) from dual;//从第一个开始截两个(hive中下标是从1开始,所以0也是从1开始)

推荐阅读更多精彩内容