Thursday, December 15, 2011
Part 2 : Identify Corrupted Database Object
After collection corrutpion report in V$DATABASE_BLOCK_CORRUPTION
when I run the query:
SQL > select * from v$database_block_corruption;
FILE# BLOCK# BLOCKS CORRUPTION_CHANGE# CORRUPTION_TYPE
----- ----- ------ ---------------- -------------
64 57306 1 42451322513 CORRUPT
64 57148 1 41767312306 CORRUPT
64 57132 1 41767312306 CORRUPT
Step 1 : Create base table
create table pradipta.extent_report
as
select owner,segment_name,partition_name,segment_type,tablespace_name,file_id,block_id
from
dba_extents where rownum < 0;
Step 2 : Run Segment Identifier
BEGIN
for c1 in (select file#,block# from V$DATABASE_BLOCK_CORRUPTION)
LOOP
insert into pradipta.extent_report
(select owner,segment_name,partition_name,segment_type,tablespace_name,file_id,block_id
from
dba_extents where file_id = c1.file# and c1.block# between block_id and
block_id +
blocks - 1);
END LOOP;
commit;
END;
/
Step 3: Check corrupted segments
PDD IDX_PAT_01 ORG0330 INDEX PARTITION IDX_PDD 64 57093
PDD IDX_PAT_01 ORG0330 INDEX PARTITION IDX_PDD 64 57093
PDD IDX_PAT_01 ORG0330 INDEX PARTITION IDX_PDD 64 57093
Tuesday, November 8, 2011
Part 1: Logical validatation of database
After we restore our production database from Tape backup. we found there are serveral loggical corruption in our database.
i.e
ORA-01578: ORACLE data block corrupted (file # 11, block # 4262)
ORA-01110: data file 11: '/db01/oracle/PROD/datafile//data01.dbf'
So I decided to run full datababase validatation in my way
Step 1: Create status table for datafile
SQL> create table pradipta.rman_data_file as select file_id,null from dba_data_files;
(This table will use as status and monitor)
Step 2 : run_data_chk.sh (Pilot Script)
#!/bin/sh
. ~oracle/.oenv
ORACLE_SID=mdd11g
export ORACLE_SID
ORACLE_HOME=$ORACLE_HOME
export ORACLE_HOME
ORACLE_SID=$ORACLE_SID
export ORACLE_SID
sqlplus -s / as sysdba <
Declare
v_input varchar2(4000);
BEGIN
for c1 in (select file_id from pradipta.rman_data_file_chk order by file_id)
LOOP
v_input :='/u02/bin/RMAN/script/logical_chk.sh '
c1.file_id;
sys.host(v_input); #(My magic OS setup)
END LOOP;
END;
/
exit;
!EOF
exit
Step 3: Unix Script : logical_chk.sh (Slave Script)
#!/bin/sh
. ~oracle/.oenv
ORACLE_SID=mdd11g
export ORACLE_SID
datafile=$1
ORACLE_HOME=$ORACLE_HOME
export ORACLE_HOME
ORACLE_SID=$ORACLE_SID
export ORACLE_SID
rman target / nocatalog << EOF
RUN
{
allocate channel d1 type disk;
backup check logical validate datafile $datafile;
release channel d1;
}
EOF
RSTAT=$?
if [ "$RSTAT" = "0" ]
then
LOGMSG="SUCESS"
else
LOGMSG="FAILED"
fi
sqlplus -s / as sysdba <
update pradipta.rman_data_file_chk set status='${LOGMSG}' where file_id=${datafile};
commit;
exit;
!EOF
exit $RSTAT
Step 4: Run pilot script to identify logical corruption
$ ./run_data_chk.sh &
Step 5: Check Status
SQL> select * from pradipta.rman_data_file;
FILE_ID STATUS
--------- ----------
64 FAILED
68 SUCESS
69 SUCESS
79 SUCESS
81 SUCESS
82 SUCESS
84 SUCESS
i.e
ORA-01578: ORACLE data block corrupted (file # 11, block # 4262)
ORA-01110: data file 11: '/db01/oracle/PROD/datafile//data01.dbf'
So I decided to run full datababase validatation in my way
Step 1: Create status table for datafile
SQL> create table pradipta.rman_data_file as select file_id,null from dba_data_files;
(This table will use as status and monitor)
Step 2 : run_data_chk.sh (Pilot Script)
#!/bin/sh
. ~oracle/.oenv
ORACLE_SID=mdd11g
export ORACLE_SID
ORACLE_HOME=$ORACLE_HOME
export ORACLE_HOME
ORACLE_SID=$ORACLE_SID
export ORACLE_SID
sqlplus -s / as sysdba <
Declare
v_input varchar2(4000);
BEGIN
for c1 in (select file_id from pradipta.rman_data_file_chk order by file_id)
LOOP
v_input :='/u02/bin/RMAN/script/logical_chk.sh '
c1.file_id;
sys.host(v_input); #(My magic OS setup)
END LOOP;
END;
/
exit;
!EOF
exit
Step 3: Unix Script : logical_chk.sh (Slave Script)
#!/bin/sh
. ~oracle/.oenv
ORACLE_SID=mdd11g
export ORACLE_SID
datafile=$1
ORACLE_HOME=$ORACLE_HOME
export ORACLE_HOME
ORACLE_SID=$ORACLE_SID
export ORACLE_SID
rman target / nocatalog << EOF
RUN
{
allocate channel d1 type disk;
backup check logical validate datafile $datafile;
release channel d1;
}
EOF
RSTAT=$?
if [ "$RSTAT" = "0" ]
then
LOGMSG="SUCESS"
else
LOGMSG="FAILED"
fi
sqlplus -s / as sysdba <
update pradipta.rman_data_file_chk set status='${LOGMSG}' where file_id=${datafile};
commit;
exit;
!EOF
exit $RSTAT
Step 4: Run pilot script to identify logical corruption
$ ./run_data_chk.sh &
Step 5: Check Status
SQL> select * from pradipta.rman_data_file;
FILE_ID STATUS
--------- ----------
64 FAILED
68 SUCESS
69 SUCESS
79 SUCESS
81 SUCESS
82 SUCESS
84 SUCESS
Wednesday, October 12, 2011
ORA-15196 After ASM Was Upgraded From 10gR2 To 11gR2
After upgrading a 10gR2 ASM instance to 11.2, ASM is able to mount the disk groups however these quickly dismount as soon as v$asm_file is queried:
SUCCESS: diskgroup DATA_DG01 was mounted
SUCCESS: ALTER DISKGROUP DATA_DG01 MOUNT /* asm agent */
Thu Jun 24 15:13:05 2010
NOTE: diskgroup resource ora.DATA_DG01.dg is online
Thu Jun 24 15:18:31 2010
WARNNING: cache read a corrupted block group=DATA_DG01 fn=1 blk=512 from disk 0
NOTE: a corrupted block from group DATA_DG01 was dumped to
/u000/app/grid/diag/asm/+asm/+ASM/trace/+ASM_ora_348464.trc
WARNNING: cache read(retry) a corrupted block group=DATA_DG01 fn=1 blk=512
from disk 0
ERROR: cache failed to read group=DATA_DG01 fn=1 blk=512 from disk(s): 0
DATA_DG01_0000
ORA-15196: invalid ASM block header [kfc.c:23925] [hard_kfbh] [1] [512] [0 !=130]
Solution
-----------
1) Run the following from 11.2 ASM on each disk group a few times (say 3 times):
SQL> ALTER DISKGROUP DATA_DG_1 CHECK ALL REPAIR;
On the first time they execute this command, the ASM alert log will show entries like:
"ERROR: file 1 extent 0: blocks XX to XX are unformatted"
The next runs, the ASM alert logs will show something like:
ERROR: file 1 extent 0: blocks XX to XX are unformatted"
"SUCCESS: file 1 extent 0 repaired"
2) Check if ASM in 11.2 still dismounts the disk group when querying v$asm_file.
3) If the disk group continues to be dismounted with the above errors, then restore the 10gR2 environment (like prior to the ASM upgrade) and then apply Patch 5100163 to the 10.2.0.4 ASM home.
4) After the patch was installed, then run the ALTER DISKGROUP...CHECK ALL REPAIR on all the disk groups. Then retry the ASM upgrade.
SUCCESS: diskgroup DATA_DG01 was mounted
SUCCESS: ALTER DISKGROUP DATA_DG01 MOUNT /* asm agent */
Thu Jun 24 15:13:05 2010
NOTE: diskgroup resource ora.DATA_DG01.dg is online
Thu Jun 24 15:18:31 2010
WARNNING: cache read a corrupted block group=DATA_DG01 fn=1 blk=512 from disk 0
NOTE: a corrupted block from group DATA_DG01 was dumped to
/u000/app/grid/diag/asm/+asm/+ASM/trace/+ASM_ora_348464.trc
WARNNING: cache read(retry) a corrupted block group=DATA_DG01 fn=1 blk=512
from disk 0
ERROR: cache failed to read group=DATA_DG01 fn=1 blk=512 from disk(s): 0
DATA_DG01_0000
ORA-15196: invalid ASM block header [kfc.c:23925] [hard_kfbh] [1] [512] [0 !=130]
Solution
-----------
1) Run the following from 11.2 ASM on each disk group a few times (say 3 times):
SQL> ALTER DISKGROUP DATA_DG_1 CHECK ALL REPAIR;
On the first time they execute this command, the ASM alert log will show entries like:
"ERROR: file 1 extent 0: blocks XX to XX are unformatted"
The next runs, the ASM alert logs will show something like:
ERROR: file 1 extent 0: blocks XX to XX are unformatted"
"SUCCESS: file 1 extent 0 repaired"
2) Check if ASM in 11.2 still dismounts the disk group when querying v$asm_file.
3) If the disk group continues to be dismounted with the above errors, then restore the 10gR2 environment (like prior to the ASM upgrade) and then apply Patch 5100163 to the 10.2.0.4 ASM home.
4) After the patch was installed, then run the ALTER DISKGROUP...CHECK ALL REPAIR on all the disk groups. Then retry the ASM upgrade.
Thursday, September 22, 2011
Add a New Table/New Schema to an Existing Streams Setup
Step 1: Stop Apply process in Target
SQL> conn strmadmin/strmadmin
-- Check apply process name
select apply_name from dba_apply where apply_user=USER;
-- Stop apply process
SQL>
begin
dbms_apply_adm.stop_apply('TARGET_APPLY');
end;
/
Step 2: Creating Schema Apply rule in Target system
SQL> conn strmadmin/strmadmin
SQL>
BEGIN
DBMS_STREAMS_ADM.ADD_SCHEMA_RULES(
schema_name => 'STUDENT',
streams_type => 'APPLY',
streams_name => 'TARGET_APPLY',
queue_name => 'STRMADMIN.TARGET_Q',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'MDDRCAT.MDDATACOR.ORG');
END;
/
Step 3: Creating propagation process on Downstream for new schema
SQL> conn strmadmin/strmadmin
SQL>
begin
dbms_streams_adm.add_schema_propagation_rules (
schema_name => 'STUDENT',
streams_name => 'DOWNSTREAM_PROPAGATE',
source_queue_name => 'STRMADMIN.DOWNSTREAM_Q',
destination_queue_name=> 'STRMADMIN.TARGET_Q@MDDPROD.MDDATACOR.ORG',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'MDDRCAT.MDDATACOR.ORG',
inclusion_rule => TRUE,
queue_to_queue => TRUE);
end;
/
Step 4: Creating Schema CAPTURE rules on Downstream for new schema
SQL > conn strmadmin/strmadmin
SQL>
BEGIN
DBMS_STREAMS_ADM.ADD_SCHEMA_RULES(
schema_name => 'STUDENT',
streams_type => 'CAPTURE',
streams_name => 'DOWNSTREAM_CAPTURE',
queue_name => 'STRMADMIN.DOWNSTREAM_Q',
include_dml => TRUE,
include_ddl => TRUE,
include_tagged_lcr => FALSE,
source_database => 'MDDRCAT.MDDATACOR.ORG',
inclusion_rule => TRUE);
end;
/
Step 5: Instantiate the Schema at Target
SQL > conn strmadmin/strmadmin
SQL>
DECLARE
iscn NUMBER; -- Variable to hold instantiation SCN value
BEGIN
iscn := DBMS_FLASHBACK.GET_SYSTEM_CHANGE_NUMBER@MDDRCAT.MDDATACOR.ORG ();
DBMS_APPLY_ADM.SET_SCHEMA_INSTANTIATION_SCN(
source_schema_name => 'STUDENT',
source_database_name => 'MDDRCAT.MDDATACOR.ORG',
instantiation_scn => iscn,
recursive => true);
END;
/
-- Check Schema INSTANTIATION
SQL> select * from DBA_APPLY_INSTANTIATED_SCHEMAS;
Step 6: Start Apply process in Target
SQL> conn strmadmin/strmadmin
-- Check apply process name
select apply_name from dba_apply where apply_user=USER;
-- Stop apply process
SQL>
begin
dbms_apply_adm.start_apply('TARGET_APPLY');
end;
/
Wednesday, June 8, 2011
ASM foot notes
- ASM does NOT perform I/O on behalf of the RDBMS
- I/O is performed by the RDBMS processes as it does with other storage types
- Thus, ASM is not an intermediary for I/O (would be a bottleneck)
- I/O can occur synchronously or asynchronously depending on the value of the DISK_ASYNCH_IO parameter
- Disks are RAW devices to ASM
- Files that can be stored in ASM: typical database data files, control files, redologs, archivelogs, flashback logs, spfiles,
- RMAN backups and incremental tracking bitmaps, datapump dumpsets.
- The smallest unit of storage written to disk is called an "allocation unit" (AU) and is usually 1MB (4MB recommended for Exadata)
- Very simply, ASM is organized around storing files
- Files are divided into pieces called "extents"
- Extent sizes are typically equal to 1 AU, except in 11g where it will use variable extent sizes that can be 1, 8, or 64 AUs
- File extent locations are maintained by ASM using file extent maps.
- ASM maintains file metadata in headers on the disks rather than in a data dictionary
- The file extent maps are cached in the RDBMS shared pool; these are consulted when an RDBMS process does I/O
- ASM is very crash resilient since it uses instance / crash recovery similar to a normal RDBMS (similar to using undo and redo logging)
- A DG has a name like "DATA" in ASM which is visible to the RDBMS as a file begining with "+DATA"; when tablespaces are created, they refer to a DG for storage such as "+DATA/.../..."
- Beneath a diskgroup are one or more failure groups (FGs)
- FGs are defined over a set of "disks"
- "Disks" can be based on raw physical volumes, a disk partition, a LUN presenting a disk array, or even an LVM or NAS device
- FGs should have disks defined that have a common failure component, otherwise ASM redundancy will not be effective
- ASM can perform mirroring to recover from device failures
- You have a choice of EXTERNAL, NORMAL, OR HIGH redundancy mirroring
- EXTERNAL means allow the underlying physical disk array do the mirroring
- NORMAL means ASM will create one additional copy of an extent for redundancy
- HIGH means ASM will create two additional copies of an extent for redundancy
- Mirroring is implemented via "failure groups" and extent partnering; ASM can tolerate the complete loss of all disks in a failure group when NORMAL or HIGH redundancy is implemented
- Mirroring is not implemented like RAID 1 arrays (where a disk is partnered with another disk
- Mirroring occurs at the file extent level and these extents are distributed among several disks known as "partners"
- Partner disks will reside in one or more separate failure groups (otherwise mirror copies would be vulnerable)
- ASM automatically choses partners and limits the number of them to less than 10 (varies by RDBMS version) in order to contain the overall impact of multiple disk failures
- If a disk fails, then ASM updates its extent mapping such that reads will now occur on the surviving partners
- This is one example when ASM and the RDBMS communicate with each other
Rebalancing
- "Rebalancing" is the process of moving file extents onto or off of disks for the purpose of evenly distributing the I/O load of the diskgroup
- It occurs asynchronously in the background and can be monitored
- In a clustered environment, rebalancing for a disk group is done within a single ASM instance only and cannot be distributed across multiple cluster node to speed it up
- ASM will automatically rebalance data on disks when disks are added or removed
- The speed and effort placed on rebalancing can be controlled via a POWER LIMIT setting
- POWER LIMIT controls the number of background processes involved in the rebalancing effort and is limited to 11. Level 0 means no rebalancing will occur
- I/O performance is impacted during rebalancing, but the amount of impact varies on which disks are being rebalanced and how much they are part of the I/O workload. The default power limit was chosen so as not to impact application performance
- ASM will maximize the available bandwidth of disks by striping file extents across all disks in a DG
- Two stripe widths are available: coarse which has a stripe size of 1 AU, and fine with stripe size of 128K
- Fine striping still uses normally-sized file extents, but the striping occurs in small pieces across these extents in a round-robin fashion
- ASM does not read from alternating mirror copies since disks contain primary and mirror extents and I/O is already balanced
- By default the RDBMS will read from a primary extent; in 11.1 this can be changed via the PREFERRED_READ_FAILURE_GROUP parameter setting for cases where reading extents from a local node results in lower latency. Note: This is a special case applicable to "stretch clusters" and not applicable in the general usage of ASM
- ASM can work for RAC and non-RAC databases
- One ASM instance on a node will service any number of instances on that node
- If using ASM for RAC, ASM must also be clustered to allow instances to update each other when file mapping changes occur
- In 11.2 onwards, ASM is installed in a grid home along with the clusterware as opposed to an RDBMS home in prior versions.
Thursday, May 19, 2011
This script calculates the size of all databases in the oratab file if no arguments are passed or the size of a specific database if an argument is passed.The first argument $1 is considered to be the instance name. All other arguments are ignored.
#! /bin/ksh
PROGRAM_NAME=`basename $0`
PROG_BIN_DIR=/users/oracle/local/prod/sh
PROG_LOG_DIR=/users/oracle/local/prod/logs
PATH=${PROG_BIN_DIR}:/users/oracle/product/10.2.0/db/bin:/usr/bin:/users/oracle/bin:/usr/local/bin:/sbin:/usr/sbin:/usr/ccs/bin:/usr/ucb:/usr/bin/X11:/usr/bin:/usr/proc/bin:/usr/ucb:/usr/local/bin
ORATABFILE=/etc/oratab
# If the first argument to this script is null, then calculate the size all databases in the oratab file
if [[ -z $1 ]]; then
# Get all SIDs from oratab file
for dbname in `sed -e '/^[ ]*$/d' -e '/^[ ]*\#/d' -e '/^[ ]*\*/d' ${ORATABFILE}
cut -d":" -f1
sed 's/ //'
grep -v '\+ASM'` ; do
RUNDATE=`date '+%m%d%Y_%H%M%S'`
# Continue with next SID when instance is not running
if [[ `ps -ef
grep "ora_smon_${dbname}"
grep -v 'grep'
wc -l
awk '{ print $1 }'` -eq 0 ]]; then
echo "Database Name ${dbname} is not up and running"
continue
fi
# Change the Oracle environment for the current dbname
ORACLE_SID=${dbname}
ORAENV_ASK=NO
. oraenv
ORAENV_ASK=YES
PATH=${ORACLE_HOME}/bin:${PATH}
LIBPATH=${ORACLE_HOME}/lib
echo "Database Size of ${dbname} = \c"
sqlplus -s / <<-EOF
WHENEVER SQLERROR EXIT FAILURE
SET PAGES 0 HEA OFF FEED OFF VER OFF
--
SELECT TO_CHAR(ROUND(SUM(bytes)/1024/1024/1024, 2))
' GB'
FROM
(
SELECT SUM(bytes) bytes
FROM dba_data_files
UNION
SELECT SUM(bytes) bytes
FROM dba_temp_files
)
/
EXIT
EOF
RETCODE=$?
if [[ ${RETCODE} != 0 ]]; then
echo "FAILURE : Calculating Database Size of Database ${dbname} On ${RUNDATE}"
continue
fi
done
# If the first argument to this script is not null, then calculate the size of the database specified as the argument
else
dbname=`echo $1
tr "[A-Z]" "[a-z]"`
RUNDATE=`date '+%m%d%Y_%H%M%S'`
# Exit when dbname does not exist in oratab file
# This is required because oraenv file is being called later
# and if dbname is missing from oratab file, the oraenv script
# will prompt for ORACLE_HOME
if [[ `grep "^${dbname}:" ${ORATABFILE}
wc -l
awk '{ print $1 }'` -eq 0 ]]; then
echo "Database Name ${dbname} does not exist in ${ORATABFILE} file"
exit 16
fi
# Exit when instance is not running
if [[ `ps -ef
grep "ora_smon_${dbname}"
grep -v 'grep'
wc -l
awk '{ print $1 }'` -eq 0 ]]; then
echo "Database Name ${dbname} is not up and running"
exit 16
fi
# Change the Oracle environment for the current dbname
ORACLE_SID=${dbname}
ORAENV_ASK=NO
. oraenv
ORAENV_ASK=YES
PATH=${ORACLE_HOME}/bin:${PATH}
LIBPATH=${ORACLE_HOME}/lib
echo "Database Size of ${dbname} = \c"
sqlplus -s / <<-EOF
WHENEVER SQLERROR EXIT FAILURE
SET PAGES 0 HEA OFF FEED OFF VER OFF
--
SELECT TO_CHAR(ROUND(SUM(bytes)/1024/1024/1024, 2))
' GB'
FROM
(
SELECT SUM(bytes) bytes
FROM dba_data_files
UNION
SELECT SUM(bytes) bytes
FROM dba_temp_files
)
/
EXIT
EOF
RETCODE=$?
if [[ ${RETCODE} != 0 ]]; then
echo "FAILURE : Calculating Database Size of Database ${dbname} On ${RUNDATE}"
fi
fi
Thursday, May 12, 2011
Apply or Delete Apply errors and Restart Stream Apply process
Step 1: Check Apply status
select apply_name,status from dba_apply;
APPLY_NAME STATUS
------------------------------ --------
TARGET_APPLY ABORTED
If status is aborted then check dba_apply_error view
Step 2: Check Apply error
select local_transaction_id,error_number,error_message from dba_apply_error;
LOCAL_TRANSACTION_ID ERROR_NUMBER ERROR_MESSAGE
-------------------- ------------- ------------------------
9.59.1031 959 ORA-00959: tablespace 'CATTBS' does not exist
Step 3: Print Apply error statement and LCRs
SQL> select local_transaction_id from dba_apply_error;
If the local transaction identifier is 1.17.2485, then enter the following:
SQL> set serveroutput on size unlimited
SQL> spool error.log
SQL> EXEC print_transaction('1.17.2485');
SQL> spool error.log
Step 4: Re-execute apply error
-- To Execute specific transaction
exec DBMS_APPLY_ADM.EXECUTE_ERROR ('');
-- To Execute all errors:
exec dbms_apply_Adm.execute_all_errors;
Step 5: Delete the apply errors
-- Check the errors in the error queue:
select apply_name, LOCAL_TRANSACTION_ID, ERROR_MESSAGE from dba_apply_error;
-- To delete specific transaction:
exec DBMS_APPLY_ADM.DELETE_ERROR('');
-- To delete all errors:
exec DBMS_APPLY_ADM.DELETE_ALL_ERRORS('');
Step 6: Check Apply status
select apply_name,status from dba_apply;
APPLY_NAME STATUS
------------------------------ --------
TARGET_APPLY ABORTED
select apply_name,status from dba_apply;
APPLY_NAME STATUS
------------------------------ --------
TARGET_APPLY ABORTED
If status is aborted then check dba_apply_error view
Step 2: Check Apply error
select local_transaction_id,error_number,error_message from dba_apply_error;
LOCAL_TRANSACTION_ID ERROR_NUMBER ERROR_MESSAGE
-------------------- ------------- ------------------------
9.59.1031 959 ORA-00959: tablespace 'CATTBS' does not exist
Step 3: Print Apply error statement and LCRs
SQL> select local_transaction_id from dba_apply_error;
If the local transaction identifier is 1.17.2485, then enter the following:
SQL> set serveroutput on size unlimited
SQL> spool error.log
SQL> EXEC print_transaction('1.17.2485');
SQL> spool error.log
Step 4: Re-execute apply error
-- To Execute specific transaction
exec DBMS_APPLY_ADM.EXECUTE_ERROR ('
-- To Execute all errors:
exec dbms_apply_Adm.execute_all_errors;
Step 5: Delete the apply errors
-- Check the errors in the error queue:
select apply_name, LOCAL_TRANSACTION_ID, ERROR_MESSAGE from dba_apply_error;
-- To delete specific transaction:
exec DBMS_APPLY_ADM.DELETE_ERROR('
-- To delete all errors:
exec DBMS_APPLY_ADM.DELETE_ALL_ERRORS('
Step 6: Check Apply status
select apply_name,status from dba_apply;
APPLY_NAME STATUS
------------------------------ --------
TARGET_APPLY ABORTED
Tuesday, March 1, 2011
Stream Administration
Handy queries for administration of stream.
--- Info about apply process
select * from dba_apply;
SELECT APPLY_NAME,QUEUE_NAME,RULE_SET_NAME,DECODE(APPLY_CAPTURED,'YES', 'Captured','NO', 'User-Enqueued') APPLY_CAPTURED,STATUS
FROM DBA_APPLY;
--- Find out apply process status
select apply_name,status from dba_apply;
exec dbms_apply_adm.start_apply(apply_name=>'&apply_name');
exec dbms_apply_adm.stop_apply(apply_name=>'&apply_name');
--- Apply parameter
SELECT PARAMETER,VALUE,SET_BY_USER FROM DBA_APPLY_PARAMETERS
WHERE APPLY_NAME = '&apply_parameter';
BEGIN
DBMS_APPLY_ADM.SET_PARAMETER(apply_name => '&apply_parameter',parameter => 'commit_serialization',value => 'none');
END;
/
-- Determine any instantiation
-- Instantiation SCN is SCN that apply process discard any SCN lower than or equal and commit any SCN higher than that.
SELECT SOURCE_DATABASE,SOURCE_OBJECT_OWNER,SOURCE_OBJECT_NAME,
INSTANTIATION_SCN
FROM DBA_APPLY_INSTANTIATED_OBJECTS;
--- Find out about any handler -- Any DML handler
SELECT OBJECT_OWNER,OBJECT_NAME,OPERATION_NAME,USER_PROCEDURE,
DECODE(ERROR_HANDLER,'Y', 'Error','N', 'DML') ERROR_HANDLER
FROM DBA_APPLY_DML_HANDLERS
WHERE APPLY_DATABASE_LINK IS NULLORDER BY OBJECT_OWNER, OBJECT_NAME, ERROR_HANDLER;
-- Any DDL/Message handler
SELECT APPLY_NAME, DDL_HANDLER, MESSAGE_HANDLER FROM DBA_APPLY;
-- Using key substitution for any table
-- Key substitution is useful when there is no PK for distinguishing rows.
SELECT OBJECT_OWNER, OBJECT_NAME, COLUMN_NAME, APPLY_DATABASE_LINK
FROM DBA_APPLY_KEY_COLUMNS
ORDER BY APPLY_DATABASE_LINK, OBJECT_OWNER, OBJECT_NAME;
-- Update conflict handler
SELECT OBJECT_OWNER,OBJECT_NAME,METHOD_NAME,RESOLUTION_COLUMN,
COLUMN_NAME
FROM DBA_APPLY_CONFLICT_COLUMNS
ORDER BY OBJECT_OWNER, OBJECT_NAME, RESOLUTION_COLUMN;
-- Reader servers
-- In parallel servers, reader server dequeues data and translates them to transactions,It also checks dependencies, Delivers all data to coordinator
SELECT DECODE(ap.APPLY_CAPTURED,'YES','Captured LCRS','NO','User-enqueued messages','UNKNOWN') APPLY_CAPTURED,SUBSTR(s.PROGRAM,INSTR(S.PROGRAM,'(')+1,4) PROCESS_NAME,r.STATE,r.TOTAL_MESSAGES_DEQUEUED
FROM V$STREAMS_APPLY_READER r, V$SESSION s, DBA_APPLY ap
WHERE r.APPLY_NAME = ap.APPLY_NAME AND r.SID = s.SID AND r.SERIAL# = s.SERIAL# AND r.APPLY_NAME = ap.APPLY_NAME;
-- Check out latency
-- Capture to dequeue latency
-- Latency : =(Dequeue_time in dest database - event creation in source DB)
-- Creation: = Time when redo log generated in source database / time user-message enqueued
-- Message number = MSG number which was dequeued.
SELECT (DEQUEUE_TIME - DEQUEUED_MESSAGE_CREATE_TIME) * 86400 LATENCY,
TO_CHAR (DEQUEUED_MESSAGE_CREATE_TIME, 'HH24:MI:SS MM/DD/YY') CREATION,
TO_CHAR (DEQUEUE_TIME, 'HH24:MI:SS MM/DD/YY') LAST_DEQUEUE,
DEQUEUED_MESSAGE_NUMBER FROM V$STREAMS_APPLY_READER a,dba_apply b
WHERE a.APPLY_NAME = b.apply_name;
--- Capture to apply latency
SELECT (HWM_TIME-HWM_MESSAGE_CREATE_TIME)*86400 "Latency in Seconds",TO_CHAR(HWM_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY') "Event Creation",TO_CHAR(HWM_TIME,'HH24:MI:SS MM/DD/YY') "Apply Time",HWM_MESSAGE_NUMBER "Applied Message Number"
FROM V$STREAMS_APPLY_COORDINATOR
WHERE APPLY_NAME = '&apply_name';
OR
SELECT (APPLY_TIME-APPLIED_MESSAGE_CREATE_TIME)*86400 "Latency in Seconds",TO_CHAR(APPLIED_MESSAGE_CREATE_TIME,'HH24:MI:SS MM/DD/YY')"Event Creation",TO_CHAR(APPLY_TIME,'HH24:MI:SS MM/DD/YY') "Apply Time",APPLIED_MESSAGE_NUMBER "Applied Message Number"
FROM DBA_APPLY_PROGRESSWHERE APPLY_NAME = '&apply_name';
--- Apply coordinator
select * from v$streams_apply_coordinator;
------ Effectiveness of parallelism in apply process.
SELECT COUNT(SERVER_ID) "Effective Parallelism"
FROM V$STREAMS_APPLY_SERVER
WHERE APPLY_NAME = 'STRMADMIN_ORA9IPR_US_ORAC' ANDTOTAL_MESSAGES_APPLIED > 0;
-- How many message applied by each apply process
SELECT SERVER_ID, TOTAL_MESSAGES_APPLIED
FROM V$STREAMS_APPLY_SERVER
WHERE APPLY_NAME = 'STRMADMIN_ORA9IPR_US_ORAC'ORDER BY SERVER_ID;
--- Find apply error -- Find out apply error reason, two options : reexecute transaction, delete transaction.
select * from apply_error;
-- Find out progpagation rule set
SELECT RULE_SET_OWNER, RULE_SET_NAMEFROM DBA_applyWHERE apply_NAME = '&apply_name';
select STREAMS_NAME,STREAMS_TYPE,RULE_TYPE,RULE_NAME,
TABLE_OWNER'.'TABLE_NAME,RULE_OWNER,RULE_CONDITION
from "DBA_STREAMS_TABLE_RULES"
where streams_type='APPLY' and rule_name in (select rule_name from DBA_RULE_SET_RULES a, dba_apply b where a.rule_set_name=b.rule_set_name)
union all
select STREAMS_NAME,STREAMS_TYPE,RULE_TYPE,RULE_NAME,
SCHEMA_NAME,RULE_OWNER,RULE_CONDITION
from "DBA_STREAMS_SCHEMA_RULES"
where streams_type='APPLY' and rule_name in (select rule_name from DBA_RULE_SET_RULES a, dba_apply b where a.rule_set_name=b.rule_set_name)
union all
select STREAMS_NAME,STREAMS_TYPE,RULE_TYPE,RULE_NAME,
null,RULE_OWNER,RULE_CONDITION
from "DBA_STREAMS_GLOBAL_RULES"
where streams_type='APPLY' and rule_name in (select rule_name from DBA_RULE_SET_RULES a, dba_apply b where a.rule_set_name=b.rule_set_name);
-- Which DML/DDL rules the capture process is capturing
select * from "DBA_STREAMS_TABLE_RULES"
where streams_type='APPLY' and rule_name in (select rule_name from DBA_RULE_SET_RULES a, dba_apply b where a.rule_set_name=b.rule_set_name and apply_name='&apply_name');
-- Which schame rules the capture process is capturing
select * from "DBA_STREAMS_SCHEMA_RULES"
where streams_type='APPLY' and rule_name in (select rule_name from DBA_RULE_SET_RULES a, dba_apply b where a.rule_set_name=b.rule_set_name and apply_name='&apply_name');
-- Which global rules the capture process is capturing
select * from "DBA_STREAMS_GLOBAL_RULES"
where streams_type='PROPAGATION' and rule_name in (select rule_name from DBA_RULE_SET_RULES a, dba_apply b where a.rule_set_name=b.rule_set_name and apply_name='&apply_name');
Subscribe to:
Posts (Atom)