前言
前面的章节我们聊到从体系结构,逻辑结构,表设计,索引,以及SQL语句都是可以优化的切入点,但是凡事如果能做到在事情发现前做到预防,是否发生能够快速定位,所以这里就列举了日常工作频繁用到的监控类别和能够直接使用的工具,在实际的工作中能够定位大部分的问题,希望能够帮助到大家
DBMS各时段主要参数信息
读取数据库系统的在各个小时的redo大小,物理读,逻辑读,SQL解析数,硬解析,事务数。从全局定位问题的时间段和产生问题的大致原因
select s.snap_date,
decode(s.redosize, null, '--shutdown or end--', s.currtime) "TIME",
to_char(round(s.seconds/60,2)) "elapse(min)",
round(t.db_time / 1000000 / 60, 2) "DB time(min)",
s.redosize redo,
round(s.redosize / s.seconds, 2) "redo/s",
s.logicalreads logical,
round(s.logicalreads / s.seconds, 2) "logical/s",
physicalreads physical,
round(s.physicalreads / s.seconds, 2) "phy/s",
s.executes execs,
round(s.executes / s.seconds, 2) "execs/s",
s.parse,
round(s.parse / s.seconds, 2) "parse/s",
s.hardparse,
round(s.hardparse / s.seconds, 2) "hardparse/s",
s.transactions trans,
round(s.transactions / s.seconds, 2) "trans/s"
from (select curr_redo - last_redo redosize,
curr_logicalreads - last_logicalreads logicalreads,
curr_physicalreads - last_physicalreads physicalreads,
curr_executes - last_executes executes,
curr_parse - last_parse parse,
curr_hardparse - last_hardparse hardparse,
curr_transactions - last_transactions transactions,
round(((currtime + 0) - (lasttime + 0)) * 3600 * 24, 0) seconds,
to_char(currtime, 'yy/mm/dd') snap_date,
to_char(currtime, 'hh24:mi') currtime,
currsnap_id endsnap_id,
to_char(startup_time, 'yyyy-mm-dd hh24:mi:ss') startup_time
from (select a.redo last_redo,
a.logicalreads last_logicalreads,
a.physicalreads last_physicalreads,
a.executes last_executes,
a.parse last_parse,
a.hardparse last_hardparse,
a.transactions last_transactions,
lead(a.redo, 1, null) over(partition by b.startup_time order by b.end_interval_time) curr_redo,
lead(a.logicalreads, 1, null) over(partition by b.startup_time order by b.end_interval_time) curr_logicalreads,
lead(a.physicalreads, 1, null) over(partition by b.startup_time order by b.end_interval_time) curr_physicalreads,
lead(a.executes, 1, null) over(partition by b.startup_time order by b.end_interval_time) curr_executes,
lead(a.parse, 1, null) over(partition by b.startup_time order by b.end_interval_time) curr_parse,
lead(a.hardparse, 1, null) over(partition by b.startup_time order by b.end_interval_time) curr_hardparse,
lead(a.transactions, 1, null) over(partition by b.startup_time order by b.end_interval_time) curr_transactions,
b.end_interval_time lasttime,
lead(b.end_interval_time, 1, null) over(partition by b.startup_time order by b.end_interval_time) currtime,
lead(b.snap_id, 1, null) over(partition by b.startup_time order by b.end_interval_time) currsnap_id,
b.startup_time
from (select snap_id,
dbid,
instance_number,
sum(decode(stat_name, 'redo size', value, 0)) redo,
sum(decode(stat_name,
'session logical reads',
value,
0)) logicalreads,
sum(decode(stat_name,
'physical reads',
value,
0)) physicalreads,
sum(decode(stat_name, 'execute count', value, 0)) executes,
sum(decode(stat_name,
'parse count (total)',
value,
0)) parse,
sum(decode(stat_name,
'parse count (hard)',
value,
0)) hardparse,
sum(decode(stat_name,
'user rollbacks',
value,
'user commits',
value,
0)) transactions
from dba_hist_sysstat
where stat_name in
('redo size',
'session logical reads',
'physical reads',
'execute count',
'user rollbacks',
'user commits',
'parse count (hard)',
'parse count (total)')
group by snap_id, dbid, instance_number) a,
dba_hist_snapshot b
where a.snap_id = b.snap_id
and a.dbid = b.dbid
and a.instance_number = b.instance_number
order by end_interval_time)) s,
(select lead(a.value, 1, null) over(partition by b.startup_time order by b.end_interval_time) - a.value db_time,
lead(b.snap_id, 1, null) over(partition by b.startup_time order by b.end_interval_time) endsnap_id
from dba_hist_sys_time_model a, dba_hist_snapshot b
where a.snap_id = b.snap_id
and a.dbid = b.dbid
and a.instance_number = b.instance_number
and a.stat_name = 'DB time') t
where s.endsnap_id = t.endsnap_id
order by s.snap_date ,time desc;
查看CPU的等待时长
--30分钟内CPU或等待最长的
select t.*, s.sid, s.serial#, s.machine, s.program, s.osuser
from (select c.USERNAME,
a.event,
to_char(a.cnt) as seconds,
a.sql_id,
dbms_lob.substr(b.sql_fulltext,100,1) sqltext
from (select rownum rn, t.*
from (select decode(s.session_state,
'WAITING',
s.event,
'Cpu + Wait For Cpu') Event,
s.sql_id,
s.user_id,
count(*) CNT
from v$active_session_history s
where sample_time > sysdate - 30 / 1440
group by s.user_id,
decode(s.session_state,
'WAITING',
s.event,
'Cpu + Wait For Cpu'),
s.sql_id
order by CNT desc) t
where rownum < 20) a,
v$sqlarea b,
dba_users c
where a.sql_id = b.sql_id
and a.user_id = c.user_id
order by CNT desc) t,
v$session s
where t.sql_id = s.sql_id(+);
查看当前等待事件
--等待事件(当前)
select t.event, count(*)
from v$session t
group by event
order by count(*) desc;
--等待事件(历史汇集)
select t.event, t. total_waits
from v$system_event t
order by total_waits desc;
查看PGA占用追多的线程
prompt <p>PGA占用最多的进程
SELECT * FROM (
SELECT p.spid,
p.pid,
s.sid,
s.serial#,
s.status,
p.pga_alloc_mem,
s.username,
s.osuser,
s.program
FROM v$process p, v$session s
WHERE s.paddr(+) = p.addr
Order by p.pga_alloc_mem desc)
where rownum < 21;
逻辑读最多的SQL
select *
from (select sql_id,
sql_text,
s.EXECUTIONS,
s.LAST_LOAD_TIME,
s.FIRST_LOAD_TIME,
s.DISK_READS,
s.BUFFER_GETS
from v$sql s
where s.buffer_gets > 300
order by buffer_gets desc)
where rownum <= 10;
物理读最多的SQL
select *
from (select sql_id,
sql_text,
s.EXECUTIONS,
s.LAST_LOAD_TIME,
s.FIRST_LOAD_TIME,
s.DISK_READS,
s.BUFFER_GETS,
s.PARSE_CALLS
from v$sql s
where s.disk_reads > 300
order by disk_reads desc)
where rownum<=10;
解析次数最多的
select *
from (select sql_id,
sql_text,
s.EXECUTIONS,
s.LAST_LOAD_TIME,
s.FIRST_LOAD_TIME,
s.DISK_READS,
s.BUFFER_GETS,
s.PARSE_CALLS
from v$sql s
order by s.PARSE_CALLS desc)
where rownum <= 10;
磁盘排序最多的SQL
select sess.username, sql.sql_text, sql.address, sort1.blocks
from v$session sess, v$sqlarea sql, v$sort_usage sort1
where sess.serial# = sort1.session_num
and sort1.sqladdr = sql.address
and sort1.sqlhash = sql.hash_value
and sort1.blocks > 200
order by sort1.blocks desc;
系统目前的锁
SELECT /*+ RULE */
LS.OSUSER OS_USER_NAME,
LS.USERNAME USER_NAME,
DECODE(LS.TYPE,
'RW',
'Row wait enqueue lock',
'TM',
'DML enqueue lock',
'TX',
'Transaction enqueue lock',
'UL',
'User supplied lock') LOCK_TYPE,
O.OBJECT_NAME OBJECT,
DECODE(LS.LMODE,
1,
NULL,
2,
'Row Share',
3,
'Row Exclusive',
4,
'Share',
5,
'Share Row Exclusive',
6,
'Exclusive',
NULL) LOCK_MODE,
O.OWNER,
LS.SID,
LS.SERIAL# SERIAL_NUM,
LS.ID1,
LS.ID2
FROM SYS.DBA_OBJECTS O,
(SELECT S.OSUSER,
S.USERNAME,
L.TYPE,
L.LMODE,
S.SID,
S.SERIAL#,
L.ID1,
L.ID2
FROM V$SESSION S, V$LOCK L
WHERE S.SID = L.SID) LS
WHERE O.OBJECT_ID = LS.ID1
AND O.OWNER <> 'SYS'
ORDER BY O.OWNER, O.OBJECT_NAME;
查看等待锁
select /*+no_merge(a) no_merge(b) */
(select username from v$session where sid=a.sid) blocker,
a.sid, 'is blocking',
(select username from v$session where sid=b.sid) blockee,
b.sid
from v$lock a,v$lock b
where a.block=1 and b.request>0
and a.id1=b.id1
and a.id2=b.id2
order by a.sid;
表空间的使用情况
SELECT A.TABLESPACE_NAME "表空间名",
A.TOTAL_SPACE "总空间(G)",
NVL(B.FREE_SPACE, 0) "剩余空间(GB)",
A.TOTAL_SPACE - NVL(B.FREE_SPACE, 0) "使用空间(GB)",
CASE WHEN A.TOTAL_SPACE=0 THEN 0 ELSE trunc(NVL(B.FREE_SPACE, 0) / A.TOTAL_SPACE * 100, 2) END "剩余百分比%" --避免分母为0
FROM (SELECT TABLESPACE_NAME, trunc(SUM(BYTES) / 1024 / 1024/1024 ,2) TOTAL_SPACE
FROM DBA_DATA_FILES
GROUP BY TABLESPACE_NAME) A,
(SELECT TABLESPACE_NAME, trunc(SUM(BYTES / 1024 / 1024/1024 ),2) FREE_SPACE
FROM DBA_FREE_SPACE
GROUP BY TABLESPACE_NAME) B
WHERE A.TABLESPACE_NAME = B.TABLESPACE_NAME(+)
ORDER BY 5;
监控提交过于频繁的语句
我们知道,如果是更新语句会产生比较多redo日志,会产生一定的性能问题,同时也有很大可能是SQL写得有问题的地方,所以这也是一个监控点,下面是定位的方案。
--提交次数最多的SESSION
select t1.sid, t1.value, t2.name
from v$sesstat t1, v$statname t2
where t2.name like '%user commits%'
and t1.STATISTIC# = t2.STATISTIC#
and value >= 10000
order by value desc;
--取得SID既可以代入到V$SESSION 和V$SQL中去分析
--得出SQL_ID
select t.SID, t.PROGRAM,t.EVENT,t.LOGON_TIME,t.WAIT_TIME,
t.SECONDS_IN_WAIT,t.SQL_ID,t.PREV_SQL_ID
from v$session t
where sid in(SID) ;
--根据sql_id或prev_sql_id代入得到SQL
select t.sql_id,t.sql_text,t.EXECUTIONS,
t.FIRST_LOAD_TIME,t.LAST_LOAD_TIME
from v$sqlarea t
where sql_id in ('sql_Id');
监控日志暴增问题并定位SQL
日志暴增,会产生数据库极大的IO问题,同时会妨碍系统对请求的响应。突然的暴增日志,如果不是由于业务量的突然增加,那么就很有可能是糟糕的SQL引起的,怎么定位,请看下面例子:
1、redo大量产生必然是由于大量产生"块改变"。从awr视图中找出"块改变"最多的segments
select * from (
SELECT to_char(begin_interval_time, 'YYYY_MM_DD HH24:MI') snap_time,
dhsso.object_name,
SUM(db_block_changes_delta)
FROM dba_hist_seg_stat dhss,
dba_hist_seg_stat_obj dhsso,
dba_hist_snapshot dhs
WHERE dhs.snap_id = dhss. snap_id
AND dhs.instance_number = dhss. instance_number
AND dhss.obj# = dhsso. obj#
AND dhss.dataobj# = dhsso.dataobj#
AND begin_interval_time> sysdate - 60/1440
GROUP BY to_char(begin_interval_time, 'YYYY_MM_DD HH24:MI'),
dhsso.object_name
order by 3 desc)
where rownum<=5;
2、从awr视图中找出步骤1中排序靠前的对象涉及到的SQL
SELECT to_char(begin_interval_time, 'YYYY_MM_DD HH24:MI'),
dbms_lob.substr(sql_text, 4000, 1),
dhss.instance_number,
dhss.sql_id,
executions_delta,
rows_processed_delta
FROM dba_hist_sqlstat dhss, dba_hist_snapshot dhs, dba_hist_sqltext dhst
WHERE UPPER(dhst.sql_text) LIKE '%这里写对象名大写%'
AND dhss.snap_id = dhs.snap_id
AND dhss.instance_Number = dhs.instance_number
AND dhss.sql_id = dhst.sql_id;
--3、从ASH相关视图中找出执行这些SQL的session、module、machine
select * from dba_hist_active_sess_history WHERE sql_id = '';
select * from v$active_session_history where sql_Id = '';
监控ARCHIVELOG日志使用率
--查看ARCHIVELOG日志使用率(进而观察DB_RECOVERY_FILE_DEST_SIZ参数)
select substr(name, 1, 30) name,
space_limit as quota,
space_used as used,
space_reclaimable as reclaimable,
number_of_files as files
from v$recovery_file_dest;
select * from V$FLASH_RECOVERY_AREA_USAGE;
观察日志切换频率
--日志切换频率分析(注意观察各行里first_time之间的时间差异会不会很短,很短就是切换过频繁)
select *
from (select thread#, sequence#, to_char(first_time, 'MM/DD/RR HH24:MI:SS')
from v$log_history
order by first_time desc)
where rownum <= 50;
监控近几天的日志切换量
--最近10天中每天日志切换的量(即可分析10天的波度,又可分析24小时内,可很容易看出异常情况)
SELECT SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,5) Day,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'00',1,0)) H00,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'01',1,0)) H01,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'02',1,0)) H02,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'03',1,0)) H03,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'04',1,0)) H04,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'05',1,0)) H05,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'06',1,0)) H06,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'07',1,0)) H07,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'08',1,0)) H08,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'09',1,0)) H09,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'10',1,0)) H10,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'11',1,0)) H11,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'12',1,0)) H12,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'13',1,0)) H13,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'14',1,0)) H14,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'15',1,0)) H15,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'16',1,0)) H16,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'17',1,0)) H17,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'18',1,0)) H18,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'19',1,0)) H19,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'20',1,0)) H20,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'21',1,0)) H21,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'22',1,0)) H22 ,
SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'23',1,0)) H23,
COUNT(*) TOTAL
FROM v$log_history a
where first_time>=to_char(sysdate-11)
GROUP BY SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,5)
ORDER BY SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,5) DESC;
监控过多分区的表
select table_name, partitioning_type, subpartitioning_type
from user_part_tables
where partition_count > 100;
监控分区数据不均匀的表
--当前用户下,某个分区的记录数是平均记录数的2倍以上
set linesize 266
col table_name format a20
select table_name,
max(num_rows),
trunc(avg(num_rows),0),
sum(num_rows),
trunc(max(num_rows) / sum(num_rows),2),
count(*)
from user_tab_partitions
group by table_name
having max(num_rows) / sum(num_rows) > 2 / count(*);
监控被收集了统计信息的临时表。
我们知道全局临时表在会话结束后会被清空,如果收集了统计信息,相当于空表。在接下来的操作中临时表被灌入大量数据,这时候利用临时表,数据库会因为手机了错误了统计信息而产生错误的执行计划
select owner,
table_name,
t.last_analyzed,
t.num_rows,
t.blocks
from user_tables t
where t.temporary = 'Y'
and last_analyzed is not null;
监控外键不建立索引的表
--查看当前数据库哪些对象外键没建索引
select table_name,
constraint_name,
cname1 || nvl2(cname2, ',' || cname2, null) ||
nvl2(cname3, ',' || cname3, null) ||
nvl2(cname4, ',' || cname4, null) ||
nvl2(cname5, ',' || cname5, null) ||
nvl2(cname6, ',' || cname6, null) ||
nvl2(cname7, ',' || cname7, null) ||
nvl2(cname8, ',' || cname8, null) columns
from (select b.table_name,
b.constraint_name,
max(decode(position, 1, column_name, null)) cname1,
max(decode(position, 2, column_name, null)) cname2,
max(decode(position, 3, column_name, null)) cname3,
max(decode(position, 4, column_name, null)) cname4,
max(decode(position, 5, column_name, null)) cname5,
max(decode(position, 6, column_name, null)) cname6,
max(decode(position, 7, column_name, null)) cname7,
max(decode(position, 8, column_name, null)) cname8,
count(*) col_cnt
from (select substr(table_name, 1, 30) table_name,
substr(constraint_name, 1, 30) constraint_name,
substr(column_name, 1, 30) column_name,
position
from user_cons_columns) a,
user_constraints b
where a.constraint_name = b.constraint_name
and b.constraint_type = 'R'
group by b.table_name, b.constraint_name) cons
where col_cnt > ALL
(select count(*)
from user_ind_columns i
where i.table_name = cons.table_name
and i.column_name in (cname1, cname2, cname3, cname4, cname5,
cname6, cname7, cname8)
and i.column_position <= cons.col_cnt
group by i.index_name)
监控失效的普通索引
--查询当前用户下,失效-普通索引
select t.index_name,
t.table_name,
blevel,
t.num_rows,
t.leaf_blocks,
t.distinct_keys
from user_indexes t
where status = 'INVALID';
监控失效的分区索引
--查询当前用户下的失效-分区索引
select t1.blevel,
t1.leaf_blocks,
t1.INDEX_NAME,
t2.table_name,
t1.PARTITION_NAME,
t1.STATUS
from user_ind_partitions t1, user_indexes t2
where t1.index_name = t2.index_name
and t1.STATUS = 'UNUSABLE';
监控大表未建立分区
--当前用户下,表大小超过100个GB未建分区的
select owner,
segment_name,
segment_type,
sum(bytes) / 1024 / 1024 / 1024 object_size
from user_segments
WHERE segment_type = 'TABLE'
group by owner, segment_name, segment_type
having sum(bytes) / 1024 / 1024 / 1024 >= 10
order by object_size desc;
监控是否有过时的字段
select table_name,
column_name,
data_type
from user_tab_columns
where data_type in ( 'LONG','CHAR');
是否存在单列索引和组合索引有重叠
select table_name, trunc(count(distinct(column_name)) / count(*),2) cross_idx_rate
from user_ind_columns
group by table_name
having count(distinct(column_name)) / count(*) < 1
order by cross_idx_rate ;
监控来没有使用过的索引
--以下判断在最近30天内未被使用过的索引有哪些
select *
from v$object_usage
where USED = 'NO'
and START_MONITORING <= sysdate - 30
and END_MONITORING is not null;
监控索引的二元高度
过高的索引会带来查询性能的降低
--当前用户下,哪些索引的高度比较高,大于5层(LEVEL=4)
select table_name,
index_name,
blevel,
leaf_blocks,
num_rows,
last_analyzed,
degree,
status
from user_indexes
where blevel>=4;
监控很久没被收集统计信息的表
-- 检查哪些表未被收集或者很久没收集
select owner, count(*)
from dba_tab_statistics t
where (t.last_analyzed is null or t.last_analyzed < sysdate - 100)
and table_name not like 'BIN$%'
group by owner
order by owner;
监控被设置了并行度的表
一般在OLTP场景下如果被设置了并行度,高并发的场景会有严重的块争用现象
--当前用户下,哪些索引被设置了并行。
select table_name,
index_name,
blevel,
leaf_blocks,
num_rows,
last_analyzed,
degree,
status
from user_indexes
where degree>1;
监控过大的索引聚合因子
聚合因子越大,说明在做表连接,回表的会消耗IO
-- 当前用户下,哪些索引的聚合因子特别大。
select a.table_name,
a.index_name,
a.blevel,
a.leaf_blocks,
b.num_rows,
b.blocks,
a.clustering_factor,
trunc(a.clustering_factor / b.num_rows,2) cluster_rate
from user_indexes a, user_tables b
where a.table_name = b.table_name
and a.clustering_factor is not null
and a.clustering_factor / b.num_rows>0.9
order by cluster_rate desc ;
监控过多索引的表
--当前用户下,哪些表的索引个数字超过5个的
select table_name, count(*) cnt
from user_indexes
group by table_name
having count(*) >= 5
order by cnt desc ;
监控索引组合列过多
-- 当前用户下,哪些组合索引组合列超过4个的
select table_name, index_name, count(*)
from user_ind_columns
group table_name, index_name
having count(*) >= 4
order by count(*) desc;
监控是否存在特殊索引
位图索引,函数索引,全文索引,反向索引皆为特殊索引,很容易因为产生使用不当造成性能瓶颈
--当前系统有哪些位图索引, 建在哪个表的哪个列上,状态如何。
select t1.table_name,
t1.index_name,
t2.COLUMN_NAME,
t2.COLUMN_POSITION,
t1.status
from user_indexes t1, user_ind_columns t2
where t1.index_name = t2.INDEX_NAME
and t1.index_type = 'BITMAP';
--当前系统有哪些全文索引, 建在哪个表的哪个列上,状态如何。
select t1.table_name,
t1.index_name,
t1.parameters,
t2.column_name,
t2.column_position,
t1.status,
t1.domidx_status
from user_indexes t1, user_ind_columns t2
where t1.index_name = t2.INDEX_NAME
and t1.index_type = 'DOMAIN';
--当前系统有哪些函数索引, 建在哪个表的哪个列上,状态如何。
select t1.table_name,
t1.index_name,
t2.COLUMN_NAME,
t2.COLUMN_POSITION,
t1.status,
t1.funcidx_status
from user_indexes t1, user_ind_columns t2
where t1.index_name = t2.INDEX_NAME
and t1.index_type = 'FUNCTION-BASED NORMAL';
--当前系统有哪些反向键索引, 建在哪个表的哪个列上,状态如何。
select t1.table_name,
t1.index_name,
t2.COLUMN_NAME,
t2.COLUMN_POSITION,
t1.status
from user_indexes t1, user_ind_columns t2
where t1.index_name = t2.INDEX_NAME
and t1.index_type = 'NORMAL/REV';
查看存在是否触发器
select OWNER, TRIGGER_NAME, TABLE_NAME, STATUS
from dba_triggers
where owner ='&S_SCHEMA';