Oracle学习笔记(十)分区索引失效的思考

允我心安 提交于 2020-02-14 17:11:34
此处只说索引失效的场景(只会影响全局索引):
结论:全局索引truncate 分区和交换分区都会导致索引失效果
局部索引truncate分区不会导致索引失效。
drop table part_tab_trunc purge;
create table part_tab_trunc (id int,col2 int,col3 int,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (maxvalue)
        )
        ;
insert into part_tab_trunc select rownum ,rownum+1,rownum+2, rpad('*',400,'*') from dual connect by rownum <=50000;
commit;
create  index idx_part_trunc_col2  on part_tab_trunc(col2) local;
create  index idx_part_trunc_col3  on part_tab_trunc(col3) ;


---分区truncate前
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_TRUNC_COL2';
 
 
INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_TRUNC_COL2            P1                             USABLE
IDX_PART_TRUNC_COL2            P2                             USABLE
IDX_PART_TRUNC_COL2            P3                             USABLE

select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_TRUNC_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_TRUNC_COL3            VALID

alter table part_tab_trunc truncate partition p1 ;

---分区truncate后
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_TRUNC_COL2';
 
 
INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_TRUNC_COL2            P1                             USABLE
IDX_PART_TRUNC_COL2            P2                             USABLE
IDX_PART_TRUNC_COL2            P3                             USABLE


select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_TRUNC_COL3';


INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_TRUNC_COL3            UNUSABLE

  

此处只说索引失效的场景(也是只影响全局索引):
--试验1(未加Update GLOBAL  indexes关键字)

drop table part_tab_drop purge;
create table part_tab_drop (id int,col2 int ,col3 int,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (maxvalue)
        )
        ;
insert into part_tab_drop select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=50000;
commit;

create  index idx_part_drop_col2 on part_tab_drop(col2) local;
create  index idx_part_drop_col3 on part_tab_drop(col3) ;

--未drop分区之前
select index_name,status from user_indexes where index_name='IDX_PART_DROP_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_DROP_COL3             VALID

alter table part_tab_drop drop partition p1 ;

--已drop分区之后
select index_name,status from user_indexes where index_name='IDX_PART_DROP_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_DROP_COL3             UNUSABLE

--试验2(加Update GLOBAL  indexes关键字)
drop table part_tab_drop purge;
create table part_tab_drop (id int,col2 int ,col3 int,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (maxvalue)
        )
        ;
insert into part_tab_drop select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=50000;
commit;

create  index idx_part_drop_col2 on part_tab_drop(col2) local;
create  index idx_part_drop_col3 on part_tab_drop(col3) ;

--未drop分区之前
select index_name,status from user_indexes where index_name='IDX_PART_DROP_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_DROP_COL3             VALID

alter table part_tab_drop drop partition p1 Update GLOBAL  indexes;

--已drop分区之后
select index_name,status from user_indexes where index_name='IDX_PART_DROP_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_DROP_COL3             VALID

  

--此处只说索引失效的场景:
--分区表SPLIT的时候,如果MAX区中已经有记录了,这个时候SPLIT就会导致有记录的新增分区的局部索引失效!
drop table part_tab_split purge;
create table part_tab_split (id int,col2 int ,col3 int ,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p_max values less than (maxvalue)
        )
        ;
insert into part_tab_split select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=90000;
commit;
create index idx_part_split_col2 on part_tab_split (col2) local;
create index idx_part_split_col3 on part_tab_split (col3) ;

---分区split前
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_SPLIT_COL2';

INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ -------
IDX_PART_SPLIT_COL2            P1                             USABLE
IDX_PART_SPLIT_COL2            P2                             USABLE
IDX_PART_SPLIT_COL2            P_MAX                          USABLE
 
select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_SPLIT_COL3';
 
INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_SPLIT_COL3            VALID
 
alter table part_tab_split SPLIT PARTITION   P_MAX  at (30000) into (PARTITION p3,PARTITION P_MAX);
alter table part_tab_split SPLIT PARTITION   P_MAX  at (40000) into (PARTITION p4,PARTITION P_MAX);
alter table part_tab_split SPLIT PARTITION   P_MAX  at (50000) into (PARTITION p5,PARTITION P_MAX);
alter table part_tab_split SPLIT PARTITION   P_MAX  at (60000) into (PARTITION p6,PARTITION P_MAX);
alter table part_tab_split SPLIT PARTITION   P_MAX  at (70000) into (PARTITION p7,PARTITION P_MAX);
 
---分区split后

select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_SPLIT_COL2';

INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_SPLIT_COL2            P1                             USABLE
IDX_PART_SPLIT_COL2            P2                             USABLE
IDX_PART_SPLIT_COL2            P3                             UNUSABLE
IDX_PART_SPLIT_COL2            P4                             UNUSABLE
IDX_PART_SPLIT_COL2            P5                             UNUSABLE
IDX_PART_SPLIT_COL2            P6                             UNUSABLE
IDX_PART_SPLIT_COL2            P7                             UNUSABLE
IDX_PART_SPLIT_COL2            P_MAX                          UNUSABLE
 
select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_SPLIT_COL3';
 
 
INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_SPLIT_COL3            UNUSABLE
 
--结论是:split会导致全局索引失效,也会导致局部索引失效。(假如P7分区没数据则局部索引不会失效)
 
--重建索引
--local索引重建
select b.table_name,
       a.INDEX_NAME,
       a.PARTITION_NAME,
       a.STATUS,
       'alter index ' || a.index_name || ' rebuild partition ' ||partition_name || ';' 重建列
  from USER_IND_PARTITIONS a, user_part_indexes b
 where a.index_name = b.index_name
   and b.TABLE_NAME IN ('PART_TAB_SPLIT')
   and  STATUS = 'UNUSABLE'
 ORDER BY b.table_name, a.INDEX_NAME, a.PARTITION_NAME;

--全局索引重建 
 alter index idx_part_split_col3 rebuild;

  

add不会导致全局和局部索引失效!
drop table part_tab_add purge;
create table part_tab_add (id int,col2 int,col3 int,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (30000),
        partition p4 values less than (40000),
        partition p5 values less than (50000)
        )
        ;
insert into part_tab_add select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=45000;
commit;

create index idx_part_add_col2 on part_tab_add (col2) local;
create index idx_part_add_col3 on part_tab_add (col3) ;

---分区add前
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_ADD_COL2';
 
 INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ ------
IDX_PART_ADD_COL2              P1                             USABLE
IDX_PART_ADD_COL2              P2                             USABLE
IDX_PART_ADD_COL2              P3                             USABLE
IDX_PART_ADD_COL2              P4                             USABLE
IDX_PART_ADD_COL2              P5                             USABLE

select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_ADD_COL3';
 
INDEX_NAME                     STATUS
------------------------------ -------
IDX_PART_ADD_COL3              VALID

 
alter table part_tab_add  add PARTITION p6 values less than (60000);
alter table part_tab_add  add PARTITION p_max  values less than (maxvalue);

---分区add前
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_ADD_COL2';

INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ -------
IDX_PART_ADD_COL2              P1                             USABLE
IDX_PART_ADD_COL2              P2                             USABLE
IDX_PART_ADD_COL2              P3                             USABLE
IDX_PART_ADD_COL2              P4                             USABLE
IDX_PART_ADD_COL2              P5                             USABLE
IDX_PART_ADD_COL2              P6                             USABLE
IDX_PART_ADD_COL2              P_MAX                          USABLE
 
select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_ADD_COL3';

INDEX_NAME                     STATUS
------------------------------ ------
IDX_PART_ADD_COL3              VALID

  交换索引:

/*
此处只说索引失效的场景(只会影响全局索引):
结论:全局索引truncate 分区和交换分区都会导致索引失效果
局部索引truncate分区不会导致索引失效。但是如果交换分区的时候,交换的临时表没有索引,或者
有索引,没有用including indexes的关键字,会导致局部的索引失效,就是某个分区失效
重建局部索引只能用alter index local_idx  rebuild partition p1这样的方式
*/

--分区表的exchange
connect ljb/ljb
drop table part_tab_exch purge;
create table part_tab_exch (id int,col2 int,col3 int,contents varchar2(4000))
        partition by range (id)
        (
         partition p1 values less than (10000),
         partition p2 values less than (20000),
         partition p3 values less than (30000),
         partition p4 values less than (40000),
         partition p5 values less than (50000),
         partition p_max values less than (maxvalue)
        )
        ;
insert into part_tab_exch select rownum ,rownum+1,rownum+2, rpad('*',400,'*') from dual connect by rownum <=60000;
commit;

create index idx_part_exch_col2 on part_tab_exch(col2) local;
create index idx_part_exch_col3 on part_tab_exch (col3);


--分区表的EXCHANGE(某分区和普通表之间的数据进行交换)
drop table normal_tab purge;
create table normal_tab (id int,col2 int,col3 int,contents varchar2(4000));
create index idx_norm_col2  on normal_tab (col2);


--未exchange 分区之前
select index_name,partition_name,status from  user_ind_partitions where index_name='IDX_PART_EXCH_COL2';
INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_EXCH_COL2             P1                             USABLE
IDX_PART_EXCH_COL2             P2                             USABLE
IDX_PART_EXCH_COL2             P3                             USABLE
IDX_PART_EXCH_COL2             P4                             USABLE
IDX_PART_EXCH_COL2             P5                             USABLE
IDX_PART_EXCH_COL2             P_MAX                          USABLE

select index_name,status from user_indexes where index_name='IDX_PART_EXCH_COL3';
INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_EXCH_COL3             VALID

alter table part_tab_exch exchange partition p1 with table normal_tab including indexes;


--exchange分区后
select index_name,partition_name,status from  user_ind_partitions where index_name='IDX_PART_EXCH_COL2';
INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_EXCH_COL2             P1                             USABLE
IDX_PART_EXCH_COL2             P2                             USABLE
IDX_PART_EXCH_COL2             P3                             USABLE
IDX_PART_EXCH_COL2             P4                             USABLE
IDX_PART_EXCH_COL2             P5                             USABLE
IDX_PART_EXCH_COL2             P_MAX                          USABLE

select index_name,status from user_indexes where index_name='IDX_PART_EXCH_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_EXCH_COL3             UNUSABLE

---注意,如果加上update global indexes 关键字,这个IDX_PART_EXCH_COL3就不会失效

--试验2(加Update GLOBAL  indexes关键字)

--分区表的exchange
connect ljb/ljb
drop table part_tab_exch purge;
create table part_tab_exch (id int,col2 int,col3 int,contents varchar2(4000))
        partition by range (id)
        (
         partition p1 values less than (10000),
         partition p2 values less than (20000),
         partition p3 values less than (30000),
         partition p4 values less than (40000),
         partition p5 values less than (50000),
         partition p_max values less than (maxvalue)
        )
        ;
insert into part_tab_exch select rownum ,rownum+1,rownum+2, rpad('*',400,'*') from dual connect by rownum <=60000;
commit;

create index idx_part_exch_col2 on part_tab_exch(col2) local;
create index idx_part_exch_col3 on part_tab_exch (col3);


--分区表的EXCHANGE(某分区和普通表之间的数据进行交换)
drop table normal_tab purge;
create table normal_tab (id int,col2 int,col3 int,contents varchar2(4000));
create index idx_norm_col2  on normal_tab (col2);


--未exchange 分区之前
select index_name,partition_name,status from  user_ind_partitions where index_name='IDX_PART_EXCH_COL2';
select index_name,status from user_indexes where index_name='IDX_PART_EXCH_COL3';

alter table part_tab_exch exchange partition p1 with table normal_tab including indexes Update GLOBAL  indexes;

--exchange分区后
select index_name,partition_name,status from  user_ind_partitions where index_name='IDX_PART_EXCH_COL2';
select index_name,status from user_indexes where index_name='IDX_PART_EXCH_COL3';

  失去分区表的意义:

-- 范围分区示例
drop table range_part_tab purge;
--注意,此分区为范围分区

--例子1
create table range_part_tab (id number,deal_date date,area_code number,nbr number,contents varchar2(4000))
           partition by range (deal_date)
           (
           partition p_201301 values less than (TO_DATE('2013-02-01', 'YYYY-MM-DD')),
           partition p_201302 values less than (TO_DATE('2013-03-01', 'YYYY-MM-DD')),
           partition p_201303 values less than (TO_DATE('2013-04-01', 'YYYY-MM-DD')),
           partition p_201304 values less than (TO_DATE('2013-05-01', 'YYYY-MM-DD')),
           partition p_201305 values less than (TO_DATE('2013-06-01', 'YYYY-MM-DD')),
           partition p_201306 values less than (TO_DATE('2013-07-01', 'YYYY-MM-DD')),
           partition p_201307 values less than (TO_DATE('2013-08-01', 'YYYY-MM-DD')),
           partition p_max values less than (maxvalue)
           )
           ;


--以下是插入2013年一整年日期随机数和表示福建地区号含义(591到599)的随机数记录,共有10万条,如下:
insert into range_part_tab (id,deal_date,area_code,nbr,contents)
      select rownum,
             to_date( to_char(sysdate-365,'J')+TRUNC(DBMS_RANDOM.VALUE(0,365)),'J'),
             ceil(dbms_random.value(591,599)),
             ceil(dbms_random.value(18900000001,18999999999)),
             rpad('*',400,'*')
        from dual
      connect by rownum <= 100000;
commit;



--以下是插入2014年一整年日期随机数和表示福建地区号含义(591到599)的随机数记录,共有10万条,如下:
insert into range_part_tab (id,deal_date,area_code,nbr,contents)
      select rownum,
             to_date( to_char(sysdate,'J')+TRUNC(DBMS_RANDOM.VALUE(0,365)),'J'),
             ceil(dbms_random.value(591,599)),
             ceil(dbms_random.value(18900000001,18999999999)),
             rpad('*',400,'*')
        from dual
      connect by rownum <= 100000;
commit;

--经常有类似如下的案例:由于规划失误,数据都进默认分区,导致默认分区超大,失去分区表的意义。

SQL> select count(*) from range_part_tab partition (p_201301);
  COUNT(*)
----------
     16858
SQL> select count(*) from range_part_tab partition (p_201302);
  COUNT(*)
----------
      7664
SQL> select count(*) from range_part_tab partition (p_201303);
  COUNT(*)
----------
      8484
SQL> select count(*) from range_part_tab partition (p_201304);
  COUNT(*)
----------
      8177
SQL> select count(*) from range_part_tab partition (p_201305);
  COUNT(*)
----------
      8414
SQL> select count(*) from range_part_tab partition (p_201306);
  COUNT(*)
----------
      8245
SQL> select count(*) from range_part_tab partition (p_201307);
  COUNT(*)
----------
      8565
SQL> select count(*) from range_part_tab partition (p_max);
  COUNT(*)
----------
    133593

  分区表比普通表性能差:

--构造分区表,插入数据。
drop table part_tab purge;
create table part_tab (id int,col2 int,col3 int)
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (30000),
        partition p4 values less than (40000),
        partition p5 values less than (50000),
        partition p6 values less than (60000),
        partition p7 values less than (70000),
        partition p8 values less than (80000),
        partition p9 values less than (90000),
        partition p10 values less than (100000),
        partition p11 values less than (maxvalue)
        )
        ;
insert into part_tab select rownum,rownum+1,rownum+2 from dual connect by rownum <=110000;
commit;
create  index idx_par_tab_col2 on part_tab(col2) local;
create  index idx_par_tab_col3 on part_tab(col3) ;


--构造普通表,表结构和数据量都与分区表一样。

drop table norm_tab purge;
create table norm_tab  (id int,col2 int,col3 int);
insert into norm_tab select rownum,rownum+1,rownum+2 from dual connect by rownum <=110000;
commit;
create  index idx_nor_tab_col2 on norm_tab(col2) ;
create  index idx_nor_tab_col3 on norm_tab(col3) ;

set autotrace traceonly
set linesize 1000
set timing on 

--第1组试验
--首先查看分区表的性能
select * from part_tab where col2=8 ;
执行计划
-----------------------------------------------------------------------------------------------------------------------
| Id  | Operation                          | Name             | Rows  | Bytes | Cost (%CPU)| Time     | Pstart| Pstop |
-----------------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                   |                  |     1 |    39 |    13   (0)| 00:00:01 |       |       |
|   1 |  PARTITION RANGE ALL               |                  |     1 |    39 |    13   (0)| 00:00:01 |     1 |    11 |
|   2 |   TABLE ACCESS BY LOCAL INDEX ROWID| PART_TAB         |     1 |    39 |    13   (0)| 00:00:01 |     1 |    11 |
|*  3 |    INDEX RANGE SCAN                | IDX_PAR_TAB_COL2 |     1 |       |    12   (0)| 00:00:01 |     1 |    11 |
-----------------------------------------------------------------------------------------------------------------------
  3 - access("COL2"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
         24  consistent gets
          0  physical reads
          0  redo size
        539  bytes sent via SQL*Net to client
        415  bytes received via SQL*Net from client
          2  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          1  rows processed

--再查看普通表的性能
select * from norm_tab where col2=8 ;
执行计划
------------------------------------------------------------------------------------------------
| Id  | Operation                   | Name             | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT            |                  |     1 |    39 |     2   (0)| 00:00:01 |
|   1 |  TABLE ACCESS BY INDEX ROWID| NORM_TAB         |     1 |    39 |     2   (0)| 00:00:01 |
|*  2 |   INDEX RANGE SCAN          | IDX_NOR_TAB_COL2 |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------
 2 - access("COL2"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
          4  consistent gets
          0  physical reads
          0  redo size
        543  bytes sent via SQL*Net to client
        415  bytes received via SQL*Net from client
          2  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          1  rows processed



--查看索引高度等信息
set autotrace off
select index_name,
          blevel,
          leaf_blocks,
          num_rows,
          distinct_keys,
          clustering_factor
     from user_ind_statistics
    where table_name in( 'NORM_TAB');
    
select index_name,
          blevel,
          leaf_blocks,
          num_rows,
          distinct_keys,
          clustering_factor FROM USER_IND_PARTITIONS where index_name like 'IDX_PAR_TAB%';


--再看看另外两组试验:
set autotrace traceonly

--第2组试验
select * from part_tab where col2=8 and id=2;
执行计划
----------------------------------------------------------------------------------------------------------------------
| Id  | Operation                          | Name             | Rows  | Bytes | Cost (%CPU)| Time     | Pstart| Pstop |
-----------------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                   |                  |     1 |    39 |     2   (0)| 00:00:01 |       |       |
|   1 |  PARTITION RANGE SINGLE            |                  |     1 |    39 |     2   (0)| 00:00:01 |     1 |     1 |
|*  2 |   TABLE ACCESS BY LOCAL INDEX ROWID| PART_TAB         |     1 |    39 |     2   (0)| 00:00:01 |     1 |     1 |
|*  3 |    INDEX RANGE SCAN                | IDX_PAR_TAB_COL2 |     1 |       |     1   (0)| 00:00:01 |     1 |     1 |
-----------------------------------------------------------------------------------------------------------------------
  2 - filter("ID"=2)
  3 - access("COL2"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
          3  consistent gets
          0  physical reads
          0  redo size
        399  bytes sent via SQL*Net to client
        404  bytes received via SQL*Net from client
          1  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          0  rows processed
          
select * from norm_tab where col2=8 and id=2;

执行计划
----------------------------------------------------------
Plan hash value: 3649198428

------------------------------------------------------------------------------------------------
| Id  | Operation                   | Name             | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT            |                  |    10 |   390 |     2   (0)| 00:00:01 |
|*  1 |  TABLE ACCESS BY INDEX ROWID| NORM_TAB         |    10 |   390 |     2   (0)| 00:00:01 |
|*  2 |   INDEX RANGE SCAN          | IDX_NOR_TAB_COL2 |     4 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------
  1 - filter("ID"=2)
  2 - access("COL2"=8) 
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
        329  consistent gets
          0  physical reads
          0  redo size
        399  bytes sent via SQL*Net to client
        404  bytes received via SQL*Net from client
          1  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          0  rows processed
          
          
--第3组试验
select * from part_tab where col3=8 ;
执行计划
-----------------------------------------------------------------------------------------------------------------------
| Id  | Operation                          | Name             | Rows  | Bytes | Cost (%CPU)| Time     | Pstart| Pstop |
-----------------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                   |                  |     1 |    15 |     2   (0)| 00:00:01 |       |       |
|   1 |  TABLE ACCESS BY GLOBAL INDEX ROWID| PART_TAB         |     1 |    15 |     2   (0)| 00:00:01 | ROWID | ROWID |
|*  2 |   INDEX RANGE SCAN                 | IDX_PAR_TAB_COL3 |     1 |       |     1   (0)| 00:00:01 |       |       |
-----------------------------------------------------------------------------------------------------------------------
   2 - access("COL3"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
          4  consistent gets
          0  physical reads
          0  redo size
        543  bytes sent via SQL*Net to client
        415  bytes received via SQL*Net from client
          2  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          1  rows processed

select * from norm_tab where col3=8 ;
执行计划
------------------------------------------------------------------------------------------------
| Id  | Operation                   | Name             | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT            |                  |     1 |    15 |     2   (0)| 00:00:01 |
|   1 |  TABLE ACCESS BY INDEX ROWID| NORM_TAB         |     1 |    15 |     2   (0)| 00:00:01 |
|*  2 |   INDEX RANGE SCAN          | IDX_NOR_TAB_COL3 |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------
   2 - access("COL3"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
          4  consistent gets
          0  physical reads
          0  redo size
        543  bytes sent via SQL*Net to client
        415  bytes received via SQL*Net from client
          2  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          1  rows processed

  统计信息引发的性能:

--试验1

DROP TABLE t1 CASCADE CONSTRAINTS PURGE; 
DROP TABLE t2 CASCADE CONSTRAINTS PURGE; 
CREATE TABLE t1 (
     id NUMBER NOT NULL,
     n NUMBER,
     contents VARCHAR2(4000)
   )
   ; 
   --基于会话方式创建全局临时表
CREATE global temporary table t2 (
     id NUMBER NOT NULL,
     t1_id NUMBER NOT NULL,
     n NUMBER,
     contents VARCHAR2(4000)
   ) on commit preserve rows 
   ;   
execute dbms_random.seed(0); 
INSERT INTO t1
     SELECT  rownum,  rownum, dbms_random.string('a', 50)
       FROM dual
     CONNECT BY level <= 10
      ORDER BY dbms_random.random; 
INSERT INTO t2 SELECT rownum, rownum, rownum, dbms_random.string('b', 50) FROM dual CONNECT BY level <= 100000
    ORDER BY dbms_random.random; 
COMMIT; 
select count(*) from t1;
select count(*) from t2;

set linesize 1000 
alter session set statistics_level=all ;
SELECT *
FROM t1, t2
WHERE t1.id = t2.t1_id;
select * from table(dbms_xplan.display_cursor(null,null,'allstats last'));
----------------------------------------------------------------------------------------------------------------
| Id  | Operation          | Name | Starts | E-Rows | A-Rows |   A-Time   | Buffers |  OMem |  1Mem | Used-Mem |
----------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT   |      |      1 |        |     10 |00:00:00.05 |     991 |       |       |          |
|*  1 |  HASH JOIN         |      |      1 |     10 |     10 |00:00:00.05 |     991 |   742K|   742K| 1092K (0)|
|   2 |   TABLE ACCESS FULL| T1   |      1 |     10 |     10 |00:00:00.01 |       7 |       |       |          |
|   3 |   TABLE ACCESS FULL| T2   |      1 |  90564 |    100K|00:00:00.02 |     984 |       |       |          |
----------------------------------------------------------------------------------------------------------------
--试验2(收集了全局临时表的统计信息)


DROP TABLE t1 CASCADE CONSTRAINTS PURGE; 
DROP TABLE t2 CASCADE CONSTRAINTS PURGE; 
CREATE TABLE t1 (
     id NUMBER NOT NULL,
     n NUMBER,
     contents VARCHAR2(4000)
   )
   ; 
CREATE global temporary table t2 (
     id NUMBER NOT NULL,
     t1_id NUMBER NOT NULL,
     n NUMBER,
     contents VARCHAR2(4000)
   ) on commit preserve rows 
   ;   
execute dbms_random.seed(0); 
INSERT INTO t1
     SELECT  rownum,  rownum, dbms_random.string('a', 50)
       FROM dual
     CONNECT BY level <= 10
      ORDER BY dbms_random.random; 
INSERT INTO t2 SELECT rownum, rownum, rownum, dbms_random.string('b', 50) FROM dual CONNECT BY level <= 100000
    ORDER BY dbms_random.random; 
COMMIT; 

--这个时候,如果系统曾经对全局临时表收集过统计信息,这个肯定不可能正好落在这个SESSION收集统计信息,所以
我们做试验的时候,退出当前SESSION,开新的SESSION执行如下
exec dbms_stats.gather_table_stats(ownname => 'LJB',tabname => 'T2',estimate_percent => 10,method_opt=> 'for all indexed columns',cascade=>TRUE) ;  

select table_name,
       partition_name,
       last_analyzed,
       partition_position,      
       num_rows
  from user_tab_statistics t
 where table_name ='T2';
 
 
 --然后回到刚才的SESSION,继续操作
select count(*) from t1;
select count(*) from t2;


set linesize 1000 
alter session set statistics_level=all ;
SELECT *
FROM t1, t2
WHERE t1.id = t2.t1_id;
select * from table(dbms_xplan.display_cursor(null,null,'allstats last'));

----------------------------------------------------------------------------------------------------------------
| Id  | Operation          | Name | Starts | E-Rows | A-Rows |   A-Time   | Buffers |  OMem |  1Mem | Used-Mem |
----------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT   |      |      1 |        |     10 |00:00:00.10 |    1970 |       |       |          |
|*  1 |  HASH JOIN         |      |      1 |      1 |     10 |00:00:00.10 |    1970 |  9472K|  1956K|    9M (0)|
|   2 |   TABLE ACCESS FULL| T2   |      1 |      1 |    100K|00:00:00.02 |    1962 |       |       |          |
|   3 |   TABLE ACCESS FULL| T1   |      1 |     10 |     10 |00:00:00.01 |       8 |       |       |          |
----------------------------------------------------------------------------------------------------------------

  各接口程序的经典优化:

主要应用了临时表各个SESSION数据独立的特性。
可以考虑用同样一张表名,给不同的应用接口使用。
传统的老方法是有10个接口程序就处理10张不同的中间接口表,在里面增加一个表示类型的字段,来区别,这样对扩展性和性能都有害。


drop table t_global;
create global temporary table t_global on commit preserve rows as select  * from dba_objects where 1=2;
select table_name,temporary,duration from user_tables  where table_name='T_GLOBAL';

--不同session的例子试验基于sesssion 的临时表即可了,不用试验另外一个了。
---连上session 1(比如是业务1的进程)
 connect ljb/ljb
 
 insert into t_global select * from dba_objects where rownum<=10;
 --可以体会提交,基于session 的提交并清理数据
 commit;
 select count(*) from t_global;


  COUNT(*)
----------
    10
 
继续登录session 2(比如是业务2的进程)
connect yxl/yxl 

insert into t_global select * from dba_objects where rownum<=20;
commit;
select count(*) from t_global;

  COUNT(*)
----------
    20

--业务n的进程......

  字段设计和空间换时间:

思路:

    把用于中间运算的全局临时表增加字段,从而减少了在一些业务场景下的表连接次数,这是一个空间换时间的概念,需要权衡利弊。
    
相关案例概述:

某次生产系统中发现一条语句执行的很慢,经过查看该语句十表关联, 执行计划甚为复杂,类似如下:

    SELECT *
   FROM t1           a,
        t_global_tab b,
        t2           c,
        t3           d,
        t4           e,
        t5           f,
        ...
  WHERE a.id=b.id
     and ...
     and ...


后来经过业务的推敲和确认,发现将这个t_gloabl_tab临时表增加几个字段,就无需到那么多表中获整其他相关信息了。
改造后的SQL写法,从原来的10表关联瞬间变成4表关联。性能大大提升了!

  

日志暴增相关的故障:
某次系统出现REDO暴增的情况,经过查询,发现有大量的delete语句在操作,而该语句实质是在做中间运算,将临时数据先存在中间表,处理完毕后删除该表。
后来把这个表改为全局临时表后,REDO暴增的情况得以大大缓和,系统性能同时也得以提升。

---试验准备工作,建观察redo的视图
sqlplus "/ as sysdba"
grant all on v_$mystat to ljb;
grant all on v_$statname to ljb;
connect  ljb/ljb
drop table t purge;
create table t as select * from dba_objects ;
--以下创建视图,方便后续直接用select * from v_redo_size进行查询
create or replace view v_redo_size as
    select a.name,b.value
    from v$statname a,v$mystat b
    where a.statistic#=b.statistic#
    and a.name='redo size';


--方案1
drop table t_tmp purge;
create table t_tmp (id int,col2 int ,col3 int,contents varchar2(4000));

select * from v_redo_size;

NAME                                                                  VALUE
---------------------------------------------------------------- ----------
redo size                                                              9988

begin 
   insert into t_tmp select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=10000; 
   --临时插入t_tmp表后,接下来删除该临时表记录,中间略去了大部分逻辑
   delete from t_tmp ;
   commit;
end; 
/

select * from v_redo_size;

NAME                                                                  VALUE
---------------------------------------------------------------- ----------
redo size                                                          11385896

总共产生日志量为:11385896-9988=11375908

--方案2
--退出session,连到新的session上完成如下操作
drop table t_global purge;
create global temporary table t_global (id int,col2 int ,col3 int,contents varchar2(4000)) on commit delete rows;

select * from v_redo_size;

NAME                                                                  VALUE
---------------------------------------------------------------- ----------
redo size                                                             42272

begin 
   insert into t_global select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=10000; 
   --临时插入t_global表后,如下删除临时表记录的delete动作可以不做,commit后数据自动清理
   --delete from t_global ;
  commit;
end; 
/

select * from v_redo_size;
NAME                                                                  VALUE
---------------------------------------------------------------- ----------
redo size                                                             209152

总共产生日志量为:209152-42272=166880

                               11385896

  

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!