set serveroutput on
declare
cursor c1 is select version
from v$instance;
cursor c2 is
select
host_name
, instance_name
, to_char(sysdate, 'HH24:MI:SS DD-MON-YY') currtime
, to_char(startup_time, 'HH24:MI:SS DD-MON-YY') starttime
from v$instance;
cursor c4 is
select * from (SELECT count(*) cnt, substr(event,1,50) event
FROM v$session_wait
WHERE wait_time = 0
AND event NOT IN ('smon timer','pipe get','wakeup time manager','pmon timer','rdbms ipc message',
'SQL*Net message from client')
GROUP BY event
ORDER BY 1 DESC) where rownum <6;
cursor c5 is
select round(sum(value)/1048576) as sgasize from v$sga;
cursor c6 is select round(sum(bytes)/1048576) as dbsize
from v$datafile;
cursor c7 is select 'top physical i/o process' category, sid,
username, total_user_io amt_used,
round(100 * total_user_io/total_io,2) pct_used
from (select b.sid sid, nvl(b.username, p.name) username,
sum(value) total_user_io
from v$statname c, v$sesstat a,
v$session b, v$bgprocess p
where a.statistic# = c.statistic#
and p.paddr (+) = b.paddr
and b.sid = a.sid
and c.name in ('physical reads', 'physical writes',
'physical reads direct',
'physical reads direct (lob)',
'physical writes direct',
'physical writes direct (lob)')
and b.username not in ('SYS', 'SYSTEM', 'SYSMAN', 'DBSNMP')
group by b.sid, nvl(b.username, p.name)
order by 3 desc),
(select sum(value) total_io
from v$statname c, v$sesstat a
where a.statistic# = c.statistic#
and c.name in ('physical reads', 'physical writes',
'physical reads direct',
'physical reads direct (lob)',
'physical writes direct',
'physical writes direct (lob)'))
where rownum < 2
union all
select 'top logical i/o process', sid, username,
total_user_io amt_used,
round(100 * total_user_io/total_io,2) pct_used
from (select b.sid sid, nvl(b.username, p.name) username,
sum(value) total_user_io
from v$statname c, v$sesstat a,
v$session b, v$bgprocess p
where a.statistic# = c.statistic#
and p.paddr (+) = b.paddr
and b.sid = a.sid
and c.name in ('consistent gets', 'db block gets')
and b.username not in ('SYS', 'SYSTEM', 'SYSMAN', 'DBSNMP')
group by b.sid, nvl(b.username, p.name)
order by 3 desc),
(select sum(value) total_io
from v$statname c, v$sesstat a,
v$session b, v$bgprocess p
where a.statistic# = c.statistic#
and p.paddr (+) = b.paddr
and b.sid = a.sid
and c.name in ('consistent gets', 'db block gets'))
where rownum < 2
union all
select 'top memory process', sid,
username, total_user_mem,
round(100 * total_user_mem/total_mem,2)
from (select b.sid sid, nvl(b.username, p.name) username,
sum(value) total_user_mem
from v$statname c, v$sesstat a,
v$session b, v$bgprocess p
where a.statistic# = c.statistic#
and p.paddr (+) = b.paddr
and b.sid = a.sid
and c.name in ('session pga memory', 'session uga memory')
and b.username not in ('SYS', 'SYSTEM', 'SYSMAN', 'DBSNMP')
group by b.sid, nvl(b.username, p.name)
order by 3 desc),
(select sum(value) total_mem
from v$statname c, v$sesstat a
where a.statistic# = c.statistic#
and c.name in ('session pga memory', 'session uga memory'))
where rownum < 2
union all
select 'top cpu process', sid, username,
total_user_cpu,
round(100 * total_user_cpu/greatest(total_cpu,1),2)
from (select b.sid sid, nvl(b.username, p.name) username,
sum(value) total_user_cpu
from v$statname c, v$sesstat a,
v$session b, v$bgprocess p
where a.statistic# = c.statistic#
and p.paddr (+) = b.paddr
and b.sid = a.sid
and c.name = 'CPU used by this session'
and b.username not in ('SYS', 'SYSTEM', 'SYSMAN', 'DBSNMP')
group by b.sid, nvl(b.username, p.name)
order by 3 desc),
(select sum(value) total_cpu
from v$statname c, v$sesstat a,
v$session b, v$bgprocess p
where a.statistic# = c.statistic#
and p.paddr (+) = b.paddr
and b.sid = a.sid
and c.name = 'CPU used by this session')
where rownum < 2;
cursor c8 is select username, sum(VALUE/100) cpu_usage_sec
from v$session ss, v$sesstat se, v$statname sn
where se.statistic# = sn.statistic#
and name like '%CPU used by this session%'
and se.sid = ss.sid
and username is not null
and username not in ('SYS', 'SYSTEM', 'SYSMAN', 'DBSNMP')
group by username
order by 2 desc;
begin
dbms_output.put_line ('Database Version');
dbms_output.put_line ('-----------------');
for rec in c1
loop
dbms_output.put_line(rec.version);
end loop;
dbms_output.put_line( chr(13) );
dbms_output.put_line('Hostname');
dbms_output.put_line ('----------');
for rec in c2
loop
dbms_output.put_line(rec.host_name);
end loop;
dbms_output.put_line( chr(13) );
dbms_output.put_line('SGA Size (MB)');
dbms_output.put_line ('-------------');
for rec in c5
loop
dbms_output.put_line(rec.sgasize);
end loop;
dbms_output.put_line( chr(13) );
dbms_output.put_line('Database Size (MB)');
dbms_output.put_line ('-----------------');
for rec in c6
loop
dbms_output.put_line(rec.dbsize);
end loop;
dbms_output.put_line( chr(13) );
dbms_output.put_line('Instance start-up time');
dbms_output.put_line ('-----------------------');
for rec in c2 loop
dbms_output.put_line( rec.starttime );
end loop;
dbms_output.put_line( chr(13) );
for b in
(select total, active, inactive, system, killed
from
(select count(*) total from v$session)
, (select count(*) system from v$session where username is null)
, (select count(*) active from v$session where status = 'ACTIVE' and username is not null)
, (select count(*) inactive from v$session where status = 'INACTIVE')
, (select count(*) killed from v$session where status = 'KILLED')) loop
dbms_output.put_line('Active Sessions');
dbms_output.put_line ('---------------');
dbms_output.put_line(b.total || ' sessions: ' || b.inactive || ' inactive,' || b.active || ' active, ' || b.system || ' system, ' || b.killed || ' killed ');
end loop;
dbms_output.put_line( chr(13) );
dbms_output.put_line( 'Sessions Waiting' );
dbms_output.put_line( chr(13) );
dbms_output.put_line('Count Event Name');
dbms_output.put_line('----- -----------------------------------------------------');
for rec in c4
loop
dbms_output.put_line(rec.cnt||' '||rec.event);
end loop;
dbms_output.put_line( chr(13) );
dbms_output.put_line('----- -----------------------------------------------------');
dbms_output.put_line('TOP Physical i/o, logical i/o, memory and CPU processes');
dbms_output.put_line ('---------------');
for rec in c7
loop
dbms_output.put_line (rec.category||': SID '||rec.sid||' User : '||rec.username||': Amount used : '||rec.amt_used||': Percent used: '||rec.pct_used);
end loop;
dbms_output.put_line('------------------------------------------------------------------');
dbms_output.put_line('TOP CPU users by usage');
dbms_output.put_line ('---------------');
for rec in c8
loop
dbms_output.put_line (rec.username||'--'||rec.cpu_usage_sec);
dbms_output.put_line ('---------------');
end loop;
end;
Friday, September 28, 2012
check the general health of the database
SELECT file_id,
tablespace_name,
file_name,
status
FROM sys.dba_data_files;
SELECT file#,
name,
status,
enabled
FROM v$datafile;
SELECT *
FROM v$backup;
SELECT *
FROM v$recovery_status;
SELECT *
FROM v$recover_file;
SELECT *
FROM v$recovery_file_status;
SELECT *
FROM v$recovery_log;
SELECT username,
command,
status,
module
FROM v$session;
tablespace_name,
file_name,
status
FROM sys.dba_data_files;
SELECT file#,
name,
status,
enabled
FROM v$datafile;
SELECT *
FROM v$backup;
SELECT *
FROM v$recovery_status;
SELECT *
FROM v$recover_file;
SELECT *
FROM v$recovery_file_status;
SELECT *
FROM v$recovery_log;
SELECT username,
command,
status,
module
FROM v$session;
RAC - Issues & Troubleshooting
RAC - Issues & Troubleshooting
####################################
Whenever a node is having issues joining the cluster back post reboot, here is a quick check list I would suggest:
/var/log/messages
ifconfig
ip route
/etc/hosts
/etc/sysconfig/network-scripts/ifcfg-eth*
ethtool
mii-tool
cluvfy
$ORA_CRS_HOME/log
Let us now take a closer look at specifc issues with examples and steps taken for their resolution.
These are all tested on Oracle 10.2.0.4 database on RHEL4 U8 x-64
1. srvctl not able to start Oracle Instance but sqlplus able to start
a. Check racg log for actual error message.
% more $ORACLE_HOME/log/`hostname -s`/racg/ora.{DBNAME}.{INSTANCENAME}.inst.log
b. Check if srvctl is configured to use correct parameter file(pfile/spfile)
% srvctl config database -d {DBNAME} -a
You can also validate parameter file by using sqlplus to see the exact error message.
c. Check ownership for $ORACLE_HOME/log
If this is owned by root, srvctl won't be able to start instance as oracle user.
# chown -R oracle:dba $ORACLE_HOME/log
2. VIP has failed over to another node but is not coming back to the original node
Fix: The node where the VIP has failed over, bring it down manually as root
Example: ifconfig eth0:2 down
PS: Be careful to bring down only VIP. A small typo may bring down your public interface:)
3. Moving OCR to a different location
PS: This can be done while CRS is up as root.
While trying to change ocr mirror or the ocr to a new location, ocrconfig complaints.
The fix is to touch the new file.
Example:
# ocrconfig -replace ocrmirror /crs_new/cludata/ocrfile
PROT-21: Invalid parameter
# touch /crs_new/cludata/ocrfile
# chown root:dba /crs_new/cludata/ocrfile
# ocrconfig -replace ocrmirror /crs_new/cludata/ocrfile
Verify:
a. Validate using "ocrcheck". Device/File Name should point to the new one with integrity check succeeded.
b. Ensure OCR inventory is updated correctly
# cat /etc/oracle/ocr.loc
ocrconfig_loc and ocrmirrorconfig_loc should point to correct locations.
4. Moving Voting Disk to a different location
PS: CRS must be down while moving the voting disk.
The idea is to add new voting disks and delete the older ones.
Find below sample errors and their fix.
# crsctl add css votedisk /crs_new/cludata/cssfile_new
Cluster is not in a ready state for online disk addition
We need to use force option. However, before using force option, ensure CRS is down.
If CRS is up, DO NOT use force option else it may corrupt your OCR.
# crsctl add css votedisk /crs_new/cludata/cssfile_new -force
Now formatting voting disk: /crs_new/cludata/cssfile_new
successful addition of votedisk /crs_new/cludata/cssfile_new.
Verify using "crsctl query css votedisk" and then delete the old votedisks.
While deleting too, you'll need to use force option.
Also verify the permissions of the voting disk files. It should be oracle:dba
If voting disks were added using root, the permission should be changed to oracle:dba
5. Manually registering listener resource to OCR
Listener was registered manually with OCR but srvctl was unable to bring up the listener
Let us first see example of how to manually do this.
From an existing available node, print the listener resource
% crs_stat -p ora.test-server2.LISTENER_TEST-SERVER2.lsnr > /tmp/res
% cat /tmp/res
NAME=ora.test-server2.LISTENER_TEST-SERVER2.lsnr
TYPE=application
ACTION_SCRIPT=/orahome/ora10g/product/10.2.0/db_1/bin/racgwrap
ACTIVE_PLACEMENT=0
AUTO_START=1
CHECK_INTERVAL=600
DESCRIPTION=CRS application for listener on node
FAILOVER_DELAY=0
FAILURE_INTERVAL=0
FAILURE_THRESHOLD=0
HOSTING_MEMBERS=test-server2
OPTIONAL_RESOURCES=
PLACEMENT=restricted
REQUIRED_RESOURCES=ora.test-server2.vip
RESTART_ATTEMPTS=5
SCRIPT_TIMEOUT=600
START_TIMEOUT=0
STOP_TIMEOUT=0
UPTIME_THRESHOLD=7d
USR_ORA_ALERT_NAME=
USR_ORA_CHECK_TIMEOUT=0
USR_ORA_CONNECT_STR=/ as sysdba
USR_ORA_DEBUG=0
USR_ORA_DISCONNECT=false
USR_ORA_FLAGS=
USR_ORA_IF=
USR_ORA_INST_NOT_SHUTDOWN=
USR_ORA_LANG=
USR_ORA_NETMASK=
USR_ORA_OPEN_MODE=
USR_ORA_OPI=false
USR_ORA_PFILE=
USR_ORA_PRECONNECT=none
USR_ORA_SRV=
USR_ORA_START_TIMEOUT=0
USR_ORA_STOP_MODE=immediate
USR_ORA_STOP_TIMEOUT=0
USR_ORA_VIP=
Modify relevant parameters in the resource file to point to correct instance.
Rename as resourcename.cap
% mv /tmp/res /tmp/ora.test-server1.LISTENER_TEST-SERVER1.lsnr.cap
Register with OCR
% crs_register ora.test-server1.LISTENER_TEST-SERVER1.lsnr -dir /tmp/
Start listener
% srvctl start listener -d testdb -n test-server1
While trying to start listener, srvctl is throwing errors like "Unable to read from listener log file"
The listener log file exists.
If resource is registered using root, then srvctl won't be able to start using oracle user.
So all the aforementioned operations while registering the listener manually should be done using oracle user.
6. Services
While checking status of a service, it says "not running"
If we try to start it using srvctl, the error message is "No such service exists" or "already running"
If we try to add service with same name, it says "already exists"
This happens because the service is in an "Unknown" state in the OCR
Using crs_stat, check if any related resource for service(resource names ending with .srv and .cs) is still lying around.
srvctl remove service -f has been tried and the issue persists.
Here is the fix:
# crs_stop -f {resourcename}
# crs_unregister {resourcename}
Now service can be added and started correctly.
7. Post host reboot, CRS is not starting
After host reboot, CRS was not coming up. No CRS logs in $ORA_CRS_HOME
Check /var/log/messages
"Cluster Ready Services waiting on dependencies. Diagnostics in /tmp/crsctl.9559"
No logs seen in /tmp/crsctl.*
Run cluvfy to identify the issue
$ORA_CRS_HOME/bin/cluvfy stage -post crsinst -n {nodename}
/tmp was not writable
/etc/fstab was incorrect and was fixed for making /tmp available
If you see messages like "Shutdown CacheLocal. my hash ids don't match" in the CRS log, then
check if /etc/oracle/ocr.loc is same across all nodes of the cluster.
8. CRS binary restored by copying from existing node in the cluster
CRS not starting with following messages in /var/log/messages;
"Id "h1" respawning too fast: disabled for 5 minutes"
CRSD log showing "no listener"
If CRS binary is restored by copying from existing node in the cluster, then you need to ensure:
a. Hostnames are modified correctly in $ORA_CRS_HOME/log
b. You may need to cleanup socket files from /var/tmp/.oracle
PS:Exercise caution while working with the socket files. If CRS is up, you should never touch those files otherwise reboot may be inevitable.
9. CRS rebooting frequently by oprocd
Check /etc/oracle/oprocd/ and grep for "Rebooting".
Check /var/log/messages and grep for "restart"
If the timestamps are matching, this confirms reboots are being initated by oprocd process.
%ps -ef
grep oprocd
root 10409 9937 0 Feb27 ? 00:00:00 /oracle/product/crs/bin/oprocd.bin run -t 1000 -m 500 -f
-t 1000 means oprocd would wake up every 1000ms
-m 500 means allow upto 500ms margin of error
Basically with these options if oprocd wakes up after > 1.5 secs it’s going to force a reboot.
This is conceptually analogous to what hangcheck timer used to do pre 10.2.0.4 Oracle releases on Linux.
Fix is to set CSS diagwait to 13
#crsctl set css diagwait 13 -force
# /oracle/product/crs/bin/crsctl get css diagwait
13
This actually changes what parameters oprocd runs with
%ps -ef
grep oprocd
root 10409 9937 0 Feb27 ? 00:00:00 /oracle/product/crs/bin/oprocd.bin run -t 1000 -m 10000 -hsi 5:10:50:75:90 -f
Note that the margin has now changed to 10000ms i.e 10 seconds in place of the default 0.5 seconds.
PS: Setting diagwait requires a full shutdown of Oracle Clusterware on ALL nodes.
10. Cluster hung. All SQL queries on GV$ views are hanging.
Alert log from all instance have message like below:
INST1: IPC Send timeout detected. Receiver ospid 1650
INST2:IPC Send timeout detected.Sender: ospid 24692
Receiver: inst 1 binc 150 ospid 1650
INST3: IPC Send timeout detected.Sender: ospid 12955
Receiver: inst 1 binc 150 ospid 1650
The ospid on all instances belong to LCK0 - Lock Process
In case of inter-instance lock issues, it's important to identify the instance from where it's initiating.
As seen from above, INST1 is the one that needs to be fixed.
Just identify the process that is causing row cache lock and kill it otherwise reboot node 1.
11. Inconsistent OCR with invalid permissions
% srvctl add db -d testdb -o /oracle/product/10.2
PRKR-1005 : adding of cluster database testdb configuration failed, PROC-5: User does not have permission to perform a cluster registry operation on this key. Authentication error [User does not have permission to perform this operation] [0]
crs_stat doesn't have any trace of it so utilities like crs_setperm/crs_unregister/crs_stop won't work in this case.
ocrdump shows:
[DATABASE.LOG.testdb]
UNDEF :
SECURITY : {USER_PERMISSION : PROCR_ALL_ACCESS, GROUP_PERMISSION : PROCR_ALL_ACCESS, OTHER_PERMISSION : PROCR_READ, USER_NAME : root, GROUP_NAME : root}
[DATABASE.LOG.testdb.INSTANCE]
UNDEF :
SECURITY : {USER_PERMISSION : PROCR_ALL_ACCESS, GROUP_PERMISSION : PROCR_ALL_ACCESS, OTHER_PERMISSION : PROCR_READ, USER_NAME : root, GROUP_NAME : root}
These logs are owned by root and that's the problem.
This means that the resource was perhaps added into OCR using root.
Though it has been removed by root but now it cannot be added by oracle user unless we get rid of the aforementioned.
Shutdown the entire cluster and either restore from previous good backup of OCR using:
ocrconfig -restore backupfilename
You can get list of backups using:
ocrconfig -showbackup
If you are not sure of last good backup, there you can also do the following:
Take export backup of OCR using:
ocrconfig -export /tmp/export -s online
Edit /tmp/export and remove those 2 lines pointing to DATABASE.LOG.testdb and DATABASE.LOG.testdb.INSTANCE owned by root
Import it back now
ocrconfig -import /tmp/export
After starting the cluster, verify using ocrdump.
The OCRDUMPFILE should not have any trace of those leftover log entries owned by root.
####################################
Whenever a node is having issues joining the cluster back post reboot, here is a quick check list I would suggest:
/var/log/messages
ifconfig
ip route
/etc/hosts
/etc/sysconfig/network-scripts/ifcfg-eth*
ethtool
mii-tool
cluvfy
$ORA_CRS_HOME/log
Let us now take a closer look at specifc issues with examples and steps taken for their resolution.
These are all tested on Oracle 10.2.0.4 database on RHEL4 U8 x-64
1. srvctl not able to start Oracle Instance but sqlplus able to start
a. Check racg log for actual error message.
% more $ORACLE_HOME/log/`hostname -s`/racg/ora.{DBNAME}.{INSTANCENAME}.inst.log
b. Check if srvctl is configured to use correct parameter file(pfile/spfile)
% srvctl config database -d {DBNAME} -a
You can also validate parameter file by using sqlplus to see the exact error message.
c. Check ownership for $ORACLE_HOME/log
If this is owned by root, srvctl won't be able to start instance as oracle user.
# chown -R oracle:dba $ORACLE_HOME/log
2. VIP has failed over to another node but is not coming back to the original node
Fix: The node where the VIP has failed over, bring it down manually as root
Example: ifconfig eth0:2 down
PS: Be careful to bring down only VIP. A small typo may bring down your public interface:)
3. Moving OCR to a different location
PS: This can be done while CRS is up as root.
While trying to change ocr mirror or the ocr to a new location, ocrconfig complaints.
The fix is to touch the new file.
Example:
# ocrconfig -replace ocrmirror /crs_new/cludata/ocrfile
PROT-21: Invalid parameter
# touch /crs_new/cludata/ocrfile
# chown root:dba /crs_new/cludata/ocrfile
# ocrconfig -replace ocrmirror /crs_new/cludata/ocrfile
Verify:
a. Validate using "ocrcheck". Device/File Name should point to the new one with integrity check succeeded.
b. Ensure OCR inventory is updated correctly
# cat /etc/oracle/ocr.loc
ocrconfig_loc and ocrmirrorconfig_loc should point to correct locations.
4. Moving Voting Disk to a different location
PS: CRS must be down while moving the voting disk.
The idea is to add new voting disks and delete the older ones.
Find below sample errors and their fix.
# crsctl add css votedisk /crs_new/cludata/cssfile_new
Cluster is not in a ready state for online disk addition
We need to use force option. However, before using force option, ensure CRS is down.
If CRS is up, DO NOT use force option else it may corrupt your OCR.
# crsctl add css votedisk /crs_new/cludata/cssfile_new -force
Now formatting voting disk: /crs_new/cludata/cssfile_new
successful addition of votedisk /crs_new/cludata/cssfile_new.
Verify using "crsctl query css votedisk" and then delete the old votedisks.
While deleting too, you'll need to use force option.
Also verify the permissions of the voting disk files. It should be oracle:dba
If voting disks were added using root, the permission should be changed to oracle:dba
5. Manually registering listener resource to OCR
Listener was registered manually with OCR but srvctl was unable to bring up the listener
Let us first see example of how to manually do this.
From an existing available node, print the listener resource
% crs_stat -p ora.test-server2.LISTENER_TEST-SERVER2.lsnr > /tmp/res
% cat /tmp/res
NAME=ora.test-server2.LISTENER_TEST-SERVER2.lsnr
TYPE=application
ACTION_SCRIPT=/orahome/ora10g/product/10.2.0/db_1/bin/racgwrap
ACTIVE_PLACEMENT=0
AUTO_START=1
CHECK_INTERVAL=600
DESCRIPTION=CRS application for listener on node
FAILOVER_DELAY=0
FAILURE_INTERVAL=0
FAILURE_THRESHOLD=0
HOSTING_MEMBERS=test-server2
OPTIONAL_RESOURCES=
PLACEMENT=restricted
REQUIRED_RESOURCES=ora.test-server2.vip
RESTART_ATTEMPTS=5
SCRIPT_TIMEOUT=600
START_TIMEOUT=0
STOP_TIMEOUT=0
UPTIME_THRESHOLD=7d
USR_ORA_ALERT_NAME=
USR_ORA_CHECK_TIMEOUT=0
USR_ORA_CONNECT_STR=/ as sysdba
USR_ORA_DEBUG=0
USR_ORA_DISCONNECT=false
USR_ORA_FLAGS=
USR_ORA_IF=
USR_ORA_INST_NOT_SHUTDOWN=
USR_ORA_LANG=
USR_ORA_NETMASK=
USR_ORA_OPEN_MODE=
USR_ORA_OPI=false
USR_ORA_PFILE=
USR_ORA_PRECONNECT=none
USR_ORA_SRV=
USR_ORA_START_TIMEOUT=0
USR_ORA_STOP_MODE=immediate
USR_ORA_STOP_TIMEOUT=0
USR_ORA_VIP=
Modify relevant parameters in the resource file to point to correct instance.
Rename as resourcename.cap
% mv /tmp/res /tmp/ora.test-server1.LISTENER_TEST-SERVER1.lsnr.cap
Register with OCR
% crs_register ora.test-server1.LISTENER_TEST-SERVER1.lsnr -dir /tmp/
Start listener
% srvctl start listener -d testdb -n test-server1
While trying to start listener, srvctl is throwing errors like "Unable to read from listener log file"
The listener log file exists.
If resource is registered using root, then srvctl won't be able to start using oracle user.
So all the aforementioned operations while registering the listener manually should be done using oracle user.
6. Services
While checking status of a service, it says "not running"
If we try to start it using srvctl, the error message is "No such service exists" or "already running"
If we try to add service with same name, it says "already exists"
This happens because the service is in an "Unknown" state in the OCR
Using crs_stat, check if any related resource for service(resource names ending with .srv and .cs) is still lying around.
srvctl remove service -f has been tried and the issue persists.
Here is the fix:
# crs_stop -f {resourcename}
# crs_unregister {resourcename}
Now service can be added and started correctly.
7. Post host reboot, CRS is not starting
After host reboot, CRS was not coming up. No CRS logs in $ORA_CRS_HOME
Check /var/log/messages
"Cluster Ready Services waiting on dependencies. Diagnostics in /tmp/crsctl.9559"
No logs seen in /tmp/crsctl.*
Run cluvfy to identify the issue
$ORA_CRS_HOME/bin/cluvfy stage -post crsinst -n {nodename}
/tmp was not writable
/etc/fstab was incorrect and was fixed for making /tmp available
If you see messages like "Shutdown CacheLocal. my hash ids don't match" in the CRS log, then
check if /etc/oracle/ocr.loc is same across all nodes of the cluster.
8. CRS binary restored by copying from existing node in the cluster
CRS not starting with following messages in /var/log/messages;
"Id "h1" respawning too fast: disabled for 5 minutes"
CRSD log showing "no listener"
If CRS binary is restored by copying from existing node in the cluster, then you need to ensure:
a. Hostnames are modified correctly in $ORA_CRS_HOME/log
b. You may need to cleanup socket files from /var/tmp/.oracle
PS:Exercise caution while working with the socket files. If CRS is up, you should never touch those files otherwise reboot may be inevitable.
9. CRS rebooting frequently by oprocd
Check /etc/oracle/oprocd/ and grep for "Rebooting".
Check /var/log/messages and grep for "restart"
If the timestamps are matching, this confirms reboots are being initated by oprocd process.
%ps -ef
grep oprocd
root 10409 9937 0 Feb27 ? 00:00:00 /oracle/product/crs/bin/oprocd.bin run -t 1000 -m 500 -f
-t 1000 means oprocd would wake up every 1000ms
-m 500 means allow upto 500ms margin of error
Basically with these options if oprocd wakes up after > 1.5 secs it’s going to force a reboot.
This is conceptually analogous to what hangcheck timer used to do pre 10.2.0.4 Oracle releases on Linux.
Fix is to set CSS diagwait to 13
#crsctl set css diagwait 13 -force
# /oracle/product/crs/bin/crsctl get css diagwait
13
This actually changes what parameters oprocd runs with
%ps -ef
grep oprocd
root 10409 9937 0 Feb27 ? 00:00:00 /oracle/product/crs/bin/oprocd.bin run -t 1000 -m 10000 -hsi 5:10:50:75:90 -f
Note that the margin has now changed to 10000ms i.e 10 seconds in place of the default 0.5 seconds.
PS: Setting diagwait requires a full shutdown of Oracle Clusterware on ALL nodes.
10. Cluster hung. All SQL queries on GV$ views are hanging.
Alert log from all instance have message like below:
INST1: IPC Send timeout detected. Receiver ospid 1650
INST2:IPC Send timeout detected.Sender: ospid 24692
Receiver: inst 1 binc 150 ospid 1650
INST3: IPC Send timeout detected.Sender: ospid 12955
Receiver: inst 1 binc 150 ospid 1650
The ospid on all instances belong to LCK0 - Lock Process
In case of inter-instance lock issues, it's important to identify the instance from where it's initiating.
As seen from above, INST1 is the one that needs to be fixed.
Just identify the process that is causing row cache lock and kill it otherwise reboot node 1.
11. Inconsistent OCR with invalid permissions
% srvctl add db -d testdb -o /oracle/product/10.2
PRKR-1005 : adding of cluster database testdb configuration failed, PROC-5: User does not have permission to perform a cluster registry operation on this key. Authentication error [User does not have permission to perform this operation] [0]
crs_stat doesn't have any trace of it so utilities like crs_setperm/crs_unregister/crs_stop won't work in this case.
ocrdump shows:
[DATABASE.LOG.testdb]
UNDEF :
SECURITY : {USER_PERMISSION : PROCR_ALL_ACCESS, GROUP_PERMISSION : PROCR_ALL_ACCESS, OTHER_PERMISSION : PROCR_READ, USER_NAME : root, GROUP_NAME : root}
[DATABASE.LOG.testdb.INSTANCE]
UNDEF :
SECURITY : {USER_PERMISSION : PROCR_ALL_ACCESS, GROUP_PERMISSION : PROCR_ALL_ACCESS, OTHER_PERMISSION : PROCR_READ, USER_NAME : root, GROUP_NAME : root}
These logs are owned by root and that's the problem.
This means that the resource was perhaps added into OCR using root.
Though it has been removed by root but now it cannot be added by oracle user unless we get rid of the aforementioned.
Shutdown the entire cluster and either restore from previous good backup of OCR using:
ocrconfig -restore backupfilename
You can get list of backups using:
ocrconfig -showbackup
If you are not sure of last good backup, there you can also do the following:
Take export backup of OCR using:
ocrconfig -export /tmp/export -s online
Edit /tmp/export and remove those 2 lines pointing to DATABASE.LOG.testdb and DATABASE.LOG.testdb.INSTANCE owned by root
Import it back now
ocrconfig -import /tmp/export
After starting the cluster, verify using ocrdump.
The OCRDUMPFILE should not have any trace of those leftover log entries owned by root.
DB,ASM Autostart after OS Reboots Oracle 11g, RHEL5
DB,ASM Autostart after OS Reboots Oracle 11g, RHEL5
#########################################################3
DB,ASM Autostart after OS Reboots:
Steps:
i) create a script dbora and place in /etc/init.d
[root@devstg01 init.d]# cat dbora
#!/bin/sh
# chkconfig: 345 99 10
# description: Oracle auto start-stop script.
#
# Set ORA_HOME to be equivalent to the $ORACLE_HOME
# from which you wish to execute dbstart and dbshut;
#
# Set ORA_OWNER to the user id of the owner of the
# Oracle database in ORA_HOME.
ORA_HOME=/app/oracle/product/DB/11.1.0
LD_LIBRARY_PATH=$ORA_HOME/lib
#ORA_HOME=/u01/app/oracle/product/11.1.0/db_1
ORA_OWNER=oracle
if [ ! -f $ORA_HOME/bin/dbstart ]
then
echo "Oracle startup: cannot start"
exit
fi
case "$1" in
'start')
# Start the Oracle databases:
# The following command assumes that the oracle login
# will not prompt the user for any values
su - $ORA_OWNER -c "$ORA_HOME/bin/dbstart $ORA_HOME"
touch /var/lock/subsys/dbora
;;
'stop')
# Stop the Oracle databases:
# The following command assumes that the oracle login
# will not prompt the user for any values
su - $ORA_OWNER -c "$ORA_HOME/bin/dbshut $ORA_HOME"
rm -f /var/lock/subsys/dbora
;;
Esac
ii) Create symbolic links in the /etc/rc3.d and /etc/rc5.d
/etc/rc3.d
ln -s /etc/init.d/dbora S98dbora
/etc/rc5.d
ln –s /etc/init.d/dbora S97dbora
ln –s /etc/init.d/dbora K99dbora
make sure these dbstartup scripts are fired up before grid control startup script.
iii) chkconfig --add dbora
iv) there is bug with the cssd and DB startup scripts, this will conflict with the ASM startup with cssd process.
edit the /etc/inittab file and move the respawn of cssd between runleves 2 and 3
ex:
l2:2:wait:/etc/rc.d/rc 2
h1:35:respawn:/etc/init.d/init.cssd run >/dev/null 2>&1 > $LOG 2>&1
$ORACLE_HOME_LISTNER/bin/lsnrctl start LISTENER_EMDEVSTG>> $LOG 2>&1 &
VER10LIST=`$ORACLE_HOME_LISTNER/bin/lsnrctl version grep "LSNRCTL for " cut -d' ' -f5 cut -d'.' -f1`
export VER10LIST
else
echo "Failed to auto-start Oracle Net Listener using $ORACLE_HOME_LISTNER/bin/tnslsnr"
fi
fi
snippet from dbstop script:
# Stop Oracle Net Listener
if [ -f $ORACLE_HOME_LISTNER/bin/tnslsnr ] ; then
echo "$0: Stoping Oracle Net Listener" >> $LOG 2>&1
$ORACLE_HOME_LISTNER/bin/lsnrctl stop LISTENER_EMDEVSTG>> $LOG 2>&1 &
else
echo "Failed to auto-stop Oracle Net Listener using $ORACLE_HOME_LISTNER/bin/tnslsnr"
fi
fi
vii) Reboot the Server, your DB, ASM should be started automatically.
viii) Verify the logs in /var/log/messages and DB alert logs.
#########################################################3
DB,ASM Autostart after OS Reboots:
Steps:
i) create a script dbora and place in /etc/init.d
[root@devstg01 init.d]# cat dbora
#!/bin/sh
# chkconfig: 345 99 10
# description: Oracle auto start-stop script.
#
# Set ORA_HOME to be equivalent to the $ORACLE_HOME
# from which you wish to execute dbstart and dbshut;
#
# Set ORA_OWNER to the user id of the owner of the
# Oracle database in ORA_HOME.
ORA_HOME=/app/oracle/product/DB/11.1.0
LD_LIBRARY_PATH=$ORA_HOME/lib
#ORA_HOME=/u01/app/oracle/product/11.1.0/db_1
ORA_OWNER=oracle
if [ ! -f $ORA_HOME/bin/dbstart ]
then
echo "Oracle startup: cannot start"
exit
fi
case "$1" in
'start')
# Start the Oracle databases:
# The following command assumes that the oracle login
# will not prompt the user for any values
su - $ORA_OWNER -c "$ORA_HOME/bin/dbstart $ORA_HOME"
touch /var/lock/subsys/dbora
;;
'stop')
# Stop the Oracle databases:
# The following command assumes that the oracle login
# will not prompt the user for any values
su - $ORA_OWNER -c "$ORA_HOME/bin/dbshut $ORA_HOME"
rm -f /var/lock/subsys/dbora
;;
Esac
ii) Create symbolic links in the /etc/rc3.d and /etc/rc5.d
/etc/rc3.d
ln -s /etc/init.d/dbora S98dbora
/etc/rc5.d
ln –s /etc/init.d/dbora S97dbora
ln –s /etc/init.d/dbora K99dbora
make sure these dbstartup scripts are fired up before grid control startup script.
iii) chkconfig --add dbora
iv) there is bug with the cssd and DB startup scripts, this will conflict with the ASM startup with cssd process.
edit the /etc/inittab file and move the respawn of cssd between runleves 2 and 3
ex:
l2:2:wait:/etc/rc.d/rc 2
h1:35:respawn:/etc/init.d/init.cssd run >/dev/null 2>&1 > $LOG 2>&1
$ORACLE_HOME_LISTNER/bin/lsnrctl start LISTENER_EMDEVSTG>> $LOG 2>&1 &
VER10LIST=`$ORACLE_HOME_LISTNER/bin/lsnrctl version grep "LSNRCTL for " cut -d' ' -f5 cut -d'.' -f1`
export VER10LIST
else
echo "Failed to auto-start Oracle Net Listener using $ORACLE_HOME_LISTNER/bin/tnslsnr"
fi
fi
snippet from dbstop script:
# Stop Oracle Net Listener
if [ -f $ORACLE_HOME_LISTNER/bin/tnslsnr ] ; then
echo "$0: Stoping Oracle Net Listener" >> $LOG 2>&1
$ORACLE_HOME_LISTNER/bin/lsnrctl stop LISTENER_EMDEVSTG>> $LOG 2>&1 &
else
echo "Failed to auto-stop Oracle Net Listener using $ORACLE_HOME_LISTNER/bin/tnslsnr"
fi
fi
vii) Reboot the Server, your DB, ASM should be started automatically.
viii) Verify the logs in /var/log/messages and DB alert logs.
srvctl commands in Oracle RAC
srvctl commands in Oracle RAC
SRVCTL: (Server Control utility)
srvctl command target [options]
commands: enable|disable|start|stop|relocate|status|add|remove|modify|getenv|setenv|unsetenv|config
targets: database/db|instance/inst|service/serv|nodeapps|asm|listener
targets: database/db|instance/inst|service/serv|nodeapps|asm|listener |diskgroup|home|ons|eons|filesystem|gns|oc4j|scan|scan_listener |srvpool|server|VIP -- From Oracle 11g R2
srvctl -help or srvctl -v
srvctl -V -- prints version
srvctl version: 10.2.0.0.0 (or) srvctl version: 11.2.0.1.0
srvctl -h -- print usage
srvctl status service –h
Database:
srvctl add database -d db_name -o ORACLE_HOME [-m domain_name][-p spfile] [-A name|ip/netmask]
[-r {PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY}]
[-s start_options] [-n db_name] [-y {AUTOMATIC|MANUAL}]
srvctl add database -d prod -o /u01/oracle/product/102/prod
srvctl remove database -d db_name [-f]
srvctl remove database -d prod
srvctl start database -d db_name [-o start_options] [-c connect_str|-q]
srvctl start database -d db_name [-o open]
srvctl start database -d db_name -o nomount
srvctl start database -d db_name -o mount
srvctl start db -d prod
srvctl start database -d apps -o open
srvctl stop database -d db_name [-o stop_options] [-c connect_str|-q]
srvctl stop database -d db_name [-o normal]
srvctl stop database -d db_name -o transactional
srvctl stop database -d db_name -o immediate
srvctl stop database -d db_name -o abort
srvctl stop db -d crm -o immediate
srvctl status database -d db_name [-f] [-v] [-S level]
srvctl status database -d db_name -v service_name
srvctl status database -d hrms
srvctl enable database -d db_name
srvctl enable database -d vis
srvctl disable database -d db_name
srvctl disable db -d vis
srvctl config database
srvctl config database -d db_name [-a] [-t]
srvctl config database
srvctl config database -d HYD -a
srvctl modify database -d db_name [-n db_name] [-o ORACLE_HOME] [-m domain_name] [-p spfile]
[-r {PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY}] [-s start_options] [-y {AUTOMATIC|MANUAL}]
srvctl modify database -d hrms -r physical_standby
srvctl modify db -d RAC -p /u03/oradata/RAC/spfileRAC.ora -- moves p file
srvctl modify database –d HYD –o /u01/app/oracle/product/11.1/db –s open
srvctl getenv database -d db_name [-t name_list]
srvctl getenv database -d prod
srvctl setenv database -d db_name {-t name=val[,name=val,...]|-T name=val}
srvctl setenv database –d HYD –t "TNS_ADMIN=/u01/app/oracle/product/11.1/asm/network/admin"
srvctl setenv db -d prod -t LANG=en
srvctl unsetenv database -d db_name [-t name_list]
srvctl unsetenv database -d prod -t CLASSPATH
In 11g Release 2, some command's syntax has been changed:
srvctl add database -d db_unique_name -o ORACLE_HOME [-x node_name] [-m domain_name] [-p spfile] [-r {PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY}] [-s start_options] [-t stop_options] [-n db_name] [-y {AUTOMATIC|MANUAL}] [-g server_pool_list] [-a "diskgroup_list"]
srvctl add database -d prod -o /u01/oracle/product/112/prod -m foo.com -p +dg1/prod/spfileprod.ora -r PRIMARY -s open -t normal -n db2 -y AUTOMATIC -g svrpool1,svrpool2 -a "dg1,dg2"
srvctl remove database -d db_unique_name [-f] [-y] [-v]
srvctl remove database -d prod -y
srvctl stop database -d db_unique_name [-o stop_options] [-f]
srvctl stop database -d dev -f
srvctl status database -d db_unique_name [-f] [-v]
srvctl status db -d sat -v
srvctl enable database -d db_unique_name [-n node_name]
srvctl enable database -d vis -n lnx01
srvctl disable database -d db_unique_name [-n node_name]
srvctl disable db -d vis -n lnx03
srvctl config database [-d db_unique_name [-a]]
srvctl config db -d db_erp -a
srvctl modify database -d db_unique_name [-n db_name] [-o ORACLE_HOME] [-u oracle_user] [-m domain] [-p spfile] [-r {PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY}] [-s start_options] [-t stop_options] [-y {AUTOMATIC|MANUAL}] [-g "server_pool_list"] [-a "diskgroup_list"|-z]
srvctl modify db -d prod -r logical_standby
srvctl modify database -d racTest -a "SYSFILES,LOGS,OLTP"
Instance:
srvctl add instance –d db_name –i inst_name -n node_name
srvctl add instance -d prod -i prod01 -n linux01
srvctl remove instance –d db_name –i inst_name [-f]
srvctl remove instance -d prod -i prod01
srvctl start instance -d db_name -i inst_names [-o start_options] [-c connect_str|-q]
srvctl start instance –d db_name –i inst_names [-o open]
srvctl start instance –d db_name –i inst_names -o nomount
srvctl start instance –d db_name –i inst_names -o mount
srvctl start instance –d dev -i dev2
srvctl stop instance -d db_name -i inst_names [-o stop_options] [-c connect_str|-q]
srvctl stop instance –d db_name –i inst_names [-o normal]
srvctl stop instance –d db_name –i inst_names -o transactional
srvctl stop instance –d db_name –i inst_names -o immediate
srvctl stop instance –d db_name –i inst_names -o abort
srvctl stop inst –d vis -i vis
srvctl status instance –d db_name –i inst_names [-f] [-v] [-S level]
srvctl status inst –d racdb -i racdb2
srvctl enable instance –d db_name –i inst_names
srvctl enable instance -d prod -i "prod1,prod2"
srvctl disable instance –d db_name –i inst_names
srvctl disable inst -d prod -i "prod1,prod3"
srvctl modify instance -d db_name -i inst_name {-s asm_inst_name|-r} -- set dependency of instance to ASM
srvctl modify instance -d db_name -i inst_name -n node_name -- move the instance
srvctl modify instance -d db_name -i inst_name -r -- remove the instance
srvctl getenv instance –d db_name –i inst_name [-t name_list]
srvctl setenv instance –d db_name [–i inst_name] {-t "name=val[,name=val,...]" | -T "name=val"}
srvctl unsetenv instance –d db_name [–i inst_name] [-t name_list]
In 11g Release 2, some command's syntax has been changed:
srvctl start instance -d db_unique_name {-n node_name -i "instance_name_list"} [-o start_options]
srvctl start instance -d prod -n node2
srvctl start inst -d prod -i "prod2,prod3"
srvctl stop instance -d db_unique_name {[-n node_name]|[-i "instance_name_list"]} [-o stop_options] [-f]
srvctl stop inst -d prod -n node1
srvctl stop instance -d prod -i prod1
srvctl status instance -d db_unique_name {-n node_name | -i "instance_name_list"} [-f] [-v]
srvctl status instance -d prod -i "prod1,prod2" -v
srvctl modify instance -d db_unique_name -i instance_name {-n node_name|-z}
srvctl modify instance -d prod -i prod1 -n mynode
srvctl modify inst -d prod -i prod1 -z
Service:
srvctl add service -d db_name -s service_name -r pref_insts [-a avail_insts] [-P TAF_policy]
srvctl add service -d db_name -s service_name -u {-r "new_pref_inst" | -a "new_avail_inst"}
srvctl add service -d RAC -s PRD -r RAC01,RAC02 -a RAC03,RAC04
srvctl add serv -d CRM -s CRM -r CRM1 -a CRM3 -P basic
srvctl remove service -d db_name -s service_name [-i inst_name] [-f]
srvctl remove serv -d dev -s sales
srvctl remove service -d dev -s sales -i dev01,dev02
srvctl start service -d db_name [-s service_names [-i inst_name]] [-o start_options]
srvctl start service -d db_name -s service_names [-o open]
srvctl start service -d db_name -s service_names -o nomount
srvctl start service -d db_name -s service_names -o mount
srvctl start serv -d dwh -s dwh
srvctl stop service -d db_name [-s service_names [-i inst_name]] [-f]
srvctl stop serv -d dwh -s dwh
srvctl status service -d db_name [-s service_names] [-f] [-v] [-S level]
srvctl status service -d dev -s dev
srvctl enable service -d db_name -s service_names [–i inst_name]
srvctl enable service -d apps -s apps1
srvctl disable service -d db_name -s service_names [–i inst_name]
srvctl disable serv -d dev -s dev -i dev1
srvctl config service -d db_name [-s service_name] [-a] [-S level]
srvctl config service -d db_name -a -- -a shows TAF configuration
srvctl config service -d TEST -s test PREF:TST1 AVAIL:TST2
srvctl modify service -d db_name -s service_name -i old_inst_name -t new_inst_name [-f]
srvctl modify service -d db_name -s service_name -i avail_inst_name -r [-f]
srvctl modify service -d db_name -s service_name -n -i preferred_list [-a available_list] [-f]
srvctl modify service -d db_name -s service_name -i old_inst_name -a avail_inst -P TAF_policy
srvctl modify serv -d PROD -s DWH -n -i I1,I2,I3,I4 -a I5,I6
srvctl relocate service -d db_name -s service_name –i old_inst_name -t target_inst [-f]
srvctl getenv service -d db_name -s service_name -t name_list
srvctl setenv service -d db_name [-s service_name] {-t "name=val[,name=val,...]" | -T "name=val"}
srvctl unsetenv service -d db_name -s service_name -t name_list
In 11g Release 2, some command's syntax has been changed:
srvctl add service -d db_unique_name -s service_name [-l [PRIMARY][,PHYSICAL_STANDBY][,LOGICAL_STANDBY][,SNAPSHOT_STANDBY]] [-y {AUTOMATIC|MANUAL}] [-q {true|false}] [-j {SHORT|LONG}] [-B {NONE|SERVICE_TIME|THROUGHPUT}][-e {NONE|SESSION|SELECT}] [-m {NONE|BASIC}][-z failover_retries] [-w failover_delay]
srvctl add service -d rac -s rac1 -q TRUE -m BASIC -e SELECT -z 180 -w 5 -j LONG
srvctl add service -d db_unique_name -s service_name -u {-r preferred_list | -a available_list}
srvctl add service -d db_unique_name -s service_name
-g server_pool [-c {UNIFORM|SINGLETON}] [-k network_number]
[-l [PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY]
[-y {AUTOMATIC|MANUAL}] [-q {TRUE|FALSE}] [-j {SHORT|LONG}]
[-B {NONE|SERVICE_TIME|THROUGHPUT}] [-e {NONE|SESSION|SELECT}]
[-m {NONE|BASIC}] [-P {BASIC|NONE|PRECONNECT}] [-x {TRUE|FALSE}]
[-z failover_retries] [-w failover_delay]
srvctl add service -d db_unique_name -s service_name -r preferred_list [-a available_list] [-P {BASIC|NONE|PRECONNECT}]
[-l [PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY]
[-y {AUTOMATIC|MANUAL}] [-q {TRUE|FALSE}] [-j {SHORT|LONG}]
[-B {NONE|SERVICE_TIME|THROUGHPUT}] [-e {NONE|SESSION|SELECT}]
[-m {NONE|BASIC}] [-x {TRUE|FALSE}] [-z failover_retries] [-w failover_delay]
srvctl add serv -d dev -s sales -r dev01,dev02 -a dev03 -P PRECONNECT
srvctl start service -d db_unique_name [-s "service_name_list" [-n node_name | -i instance_name]] [-o start_options]
srvctl start serv -d dev -s dev
srvctl start service -d dev -s dev -i dev2
srvctl stop service -d db_unique_name [-s "service_name_list"] [-n node_name | -i instance_name] [-f]
srvctl stop service -d dev -s dev
srvctl stop serv -d dev -s dev -i dev2
srvctl status service -d db_unique_name [-s "service_name_list"] [-f] [-v]
srvctl status service -d dev -s dev -v
srvctl enable service -d db_unique_name -s "service_name_list" [-i instance_name | -n node_name]
srvctl enable service -d dev -s dev
srvctl enable serv -d dev -s dev -i dev1
srvctl disable service -d db_unique_name -s "service_name_list" [-i instance_name | -n node_name]
srvctl disable service -d dev -s "dev,marketing"
srvctl disable serv -d dev -s dev -i dev1
srvctl config service -d db_unique_name [-s service_name] [-a]
srvctl config service -d dev -s dev
srvctl modify service -d db_unique_name -s service_name
[-c {UNIFORM|SINGLETON}] [-P {BASIC|PRECONNECT|NONE}]
[-l {[PRIMARY]|[PHYSICAL_STANDBY]|[LOGICAL_STANDBY]|[SNAPSHOT_STANDBY]} [-q {TRUE|FALSE}] [-x {TRUE|FALSE}] [-j {SHORT|LONG}] [-B {NONE|SERVICE_TIME|THROUGHPUT}] [-e {NONE|SESSION|SELECT}] [-m {NONE|BASIC}] [-z failover_retries] [-w failover_delay] [-y {AUTOMATIC|MANUAL}]
srvctl modify service -d db_unique_name -s service_name -i old_instance_name -t new_instance_name [-f]
srvctl modify service -d db_unique_name -s service_name -i avail_inst_name -r [-f]
srvctl modify service -d db_unique_name -s service_name -n -i preferred_list [-a available_list] [-f]
srvctl modify service -d dev -s dev -i dev1 -t dev2
srvctl modify serv -d dev -s dev -i dev1 -r
srvctl modify service -d dev -s dev -n -i dev1 -a dev2
srvctl relocate service -d db_unique_name -s service_name {-c source_node -n target_node|-i old_instance_name -t new_instance_name} [-f]
srvctl relocate service -d dev -s dev -i dev1 -t dev3
Nodeapps:
#srvctl add nodeapps -n node_name -o ORACLE_HOME -A name|ip/netmask[/if1[|if2|...]]
#srvctl add nodeapps -n lnx02 -o $ORACLE_HOME -A 192.168.0.151/255.255.0.0/eth0
#srvctl remove nodeapps -n node_names [-f]
#srvctl start nodeapps -n node_name -- Starts GSD, VIP, listener & ONS
#srvctl stop nodeapps -n node_name [-r] -- Stops GSD, VIP, listener & ONS
#srvctl status nodeapps -n node_name
#srvctl config nodeapps -n node_name [-a] [-g] [-o] [-s] [-l]
-a Display VIP configuration
-g Display GSD configuration
-s Display ONS daemon configuration
-l Display listener configuration
#srvctl modify nodeapps -n node_name [-A new_vip_address]
#srvctl modify nodeapps -n lnx06 -A 10.50.99.43/255.255.252.0/eth0
#srvctl getenv nodeapps -n node_name [-t name_list]
#srvctl setenv nodeapps -n node_name {-t "name=val[,name=val,...]"|-T "name=val"}
#srvctl setenv nodeapps –n adcracdbq3 –t "TNS_ADMIN=/u01/app/oracle/product/11.1/asm/network/admin"
#srvctl unsetenv nodeapps -n node_name [-t name_list]
In 11g Release 2, some command's syntax has been changed:
srvctl add nodeapps -n node_name -A {name|ip}/netmask[/if1[|if2|...]] [-m multicast_ip_address] [-p multicast_port_number] [-l ons_local_port] [-r ons_remote-port] [-t host[:port][,host[:port],...]] [-v]
srvctl add nodeapps -S subnet/netmask[/if1[|if2|...]] [-d dhcp_server_type] [-m multicast_ip_address] [-p multicast_port_number] [-l ons_local_port] [-r ons_remote-port] [-t host[:port][,host[:port],...]] [-v]
#srvctl add nodeapps -n devnode1 -A 1.2.3.4/255.255.255.0
srvctl remove nodeapps [-f] [-y] [-v]
srvctl remove nodeapps
srvctl start nodeapps [-n node_name] [-v]
srvctl start nodeapps
srvctl stop nodeapps [-n node_name] [-r] [-v]
srvctl stop nodeapps
srvctl status nodeapps
srvctl enable nodeapps [-g] [-v]
srvctl enable nodeapps -g -v
srvctl disable nodeapps [-g] [-v]
srvctl disable nodeapps -g -v
srvctl config nodeapps [-a] [-g] [-s] [-e]
srvctl config nodeapps -a -g -s -e
srvctl modify nodeapps [-n node_name -A new_vip_address] [-S subnet/netmask[/if1[|if2|...]] [-m multicast_ip_address] [-p multicast_port_number] [-e eons_listen_port] [-l ons_local_port] [-r ons_remote_port] [-t host[:port][,host:port,...]] [-v]
srvctl modify nodeapps -n mynode1 -A 100.200.300.40/255.255.255.0/eth0
srvctl getenv nodeapps [-a] [-g] [-s] [-e] [-t "name_list"] [-v]
srvctl getenv nodeapps -a
srvctl setenv nodeapps {-t "name=val[,name=val][...]" | -T "name=val"} [-v]
srvctl setenv nodeapps -T "CLASSPATH=/usr/local/jdk/jre/rt.jar" -v
srvctl unsetenv nodeapps -t "name_list" [-v]
srvctl unsetenv nodeapps -t "test_var1,test_var2"
ASM:
srvctl add asm -n node_name -i asminstance -o ORACLE_HOME [-p spfile]
srvctl remove asm -n node_name [-i asminstance] [-f]
srvctl remove asm -n db6
srvctl start asm -n node_name [-i asminstance] [-o start_options] [-c connect_str|-q]
srvctl start asm -n node_name [-i asminstance] [-o open]
srvctl start asm -n node_name [-i asminstance] -o nomount
srvctl start asm -n node_name [-i asminstance] -o mount
srvctl start asm -n linux01
srvctl stop asm -n node_name [-i asminstance] [-o stop_options] [-c connect_str|-q]
srvctl stop asm -n node_name [-i asminstance] [-o normal]
srvctl stop asm -n node_name [-i asminstance] -o transactional
srvctl stop asm -n node_name [-i asminstance] -o immediate
srvctl stop asm -n node_name [-i asminstance]-o abort
srvctl stop asm -n racnode1
srvctl stop asm -n devnode1 -i +asm1
srvctl status asm -n node_name
srvctl status asm -n racnode1
srvctl enable asm -n node_name [-i asminstance]
srvctl enable asm -n lnx03 -i +asm3
srvctl disable asm -n node_name [-i asminstance]
srvctl disable asm -n lnx02 -i +asm2
srvctl config asm -n node_name
srvctl config asm -n lnx08
srvctl modify asm -n node_name -i asminstance [-o ORACLE_HOME] [-p spfile]
srvctl modify asm –n rac6 -i +asm6 –o /u01/app/oracle/product/11.1/asm
In 11g Release 2, some command's syntax has been changed:
srvctl add asm [-l lsnr_name] [-p spfile] [-d asm_diskstring]
srvctl add asm
srvctl add asm -l LISTENERASM -p +dg_data/spfile.ora
srvctl remove asm [-f]
srvctl remove asm -f
srvctl start asm [-n node_name] [-o start_options]
srvctl start asm -n devnode1
srvctl stop asm [-n node_name] [-o stop_options] [-f]
srvctl stop asm -n devnode1 -f
srvctl status asm [-n node_name] [-a]
srvctl status asm -n devnode1 -a
srvctl enable asm [-n node_name]
srvctl enable asm -n devnode1
srvctl disable asm [-n node_name]
srvctl disable asm -n devnode1
srvctl config asm [-a]
srvctl config asm -a
srvctl modify asm [-l lsnr_name] [-p spfile] [-d asm_diskstring]
srvctl modify asm [-n node_name] [-l listener_name] [-d asm_diskstring] [-p spfile_path_name]
srvctl modify asm -l lsnr1
srvctl getenv asm [-t name[, ...]]
srvctl getenv asm
srvctl setenv asm {-t "name=val [,...]" | -T "name=value"}
srvctl setenv asm -t LANG=en
srvctl unsetenv asm -t "name[, ...]"
srvctl unsetenv asm -t CLASSPATH
Listener:
srvctl add listener -n node_name -o ORACLE_HOME [-l listener_name] -- 11g R1 command
srvctl remove listener -n node_name [-l listener_name] -- 11g R1 command
srvctl start listener -n node_name [-l listener_names]
srvctl start listener -n node1
srvctl stop listener -n node_name [-l listener_names]
srvctl stop listener -n node1
srvctl status listener [-n node_name] [-l listener_names] -- 11g R1 command
srvctl status listener -n node2
srvctl config listener -n node_name
srvctl modify listener -n node_name [-l listener_names] -o ORACLE_HOME -- 11g R1 command
srvctl modify listener -n racdb4 -o /u01/app/oracle/product/11.1/asm -l "LISTENER_RACDB4"
In 11g Release 2, some command's syntax has been changed:
srvctl add listener [-l lsnr_name] [-s] [-p "[TCP:]port[, ...][/IPC:key][/NMP:pipe_name][/TCPS:s_port] [/SDP:port]"] [-k network_number] [-o ORACLE_HOME]
srvctl add listener -l LISTENERASM -p "TCP:1522" -o $ORACLE_HOME
srvctl add listener -l listener112 -p 1341 -o /ora/ora112
srvctl remove listener [-l lsnr_name|-a] [-f]
srvctl remove listener -l lsnr01
srvctl stop listener [-n node_name] [-l lsnr_name] [-f]
srvctl enable listener [-l lsnr_name] [-n node_name]
srvctl enable listener -l listener_dev -n node5
srvctl disable listener [-l lsnr_name] [-n node_name]
srvctl disable listener -l listener_dev -n node5
srvctl config listener [-l lsnr_name] [-a]
srvctl config listener
srvctl modify listener [-l listener_name] [-o oracle_home] [-u user_name] [-p "[TCP:]port_list[/IPC:key][/NMP:pipe_name][/TCPS:s_port][/SDP:port]"] [-k network_number]
srvctl modify listener -n node1 -p "TCP:1521,1522"
srvctl getenv listener [-l lsnr_name] [-t name[, ...]]
srvctl getenv listener
srvctl setenv listener [-l lsnr_name] {-t "name=val [,...]" | -T "name=value"}
srvctl setenv listener -t LANG=en
srvctl unsetenv listener [-l lsnr_name] -t "name[, ...]"
srvctl unsetenv listener -t "TNS_ADMIN"
New srvctl commands in 11g Release 2
Diskgroup:
srvctl remove diskgroup -g diskgroup_name [-n node_list] [-f]
srvctl remove diskgroup -g DG1 -f
srvctl start diskgroup -g diskgroup_name [-n node_list]
srvctl start diskgroup -g diskgroup1 -n node1,node2
srvctl stop diskgroup -g diskgroup_name [-n node_list] [-f]
srvctl stop diskgroup -g ASM_FRA_DG
srvctl stop diskgroup -g dg1 -n node1,node2 -f
srvctl status diskgroup -g diskgroup_name [-n node_list] [-a]
srvctl status diskgroup -g dg_data -n node1,node2 -a
srvctl enable diskgroup -g diskgroup_name [-n node_list]
srvctl enable diskgroup -g diskgroup1 -n node1,node2
srvctl disable diskgroup -g diskgroup_name [-n node_list]
srvctl disable diskgroup -g dg_fra -n node1, node2
Home:
srvctl start home -o ORACLE_HOME -s state_file [-n node_name]
srvctl start home -o /u01/app/oracle/product/11.2.0/db_1 -s ~/state.txt
srvctl stop home -o ORACLE_HOME -s state_file [-t stop_options] [-n node_name] [-f]
srvctl stop home -o /u01/app/oracle/product/11.2.0/db_1 -s ~/state.txt
srvctl status home -o ORACLE_HOME -s state_file [-n node_name]
srvctl status home -o /u01/app/oracle/product/11.2.0/db_1 -s ~/state.txt
ONS (Oracle Notification Service) :
srvctl add ons [-l ons-local-port] [-r ons-remote-port] [-t host[:port][,host[:port]...]] [-v]
srvctl add ons -l 6200
srvctl remove ons [-f] [-v]
srvctl remove ons -f
srvctl start ons [-v]
srvctl start ons -v
srvctl stop ons [-v]
srvctl stop ons -v
srvctl status ons
srvctl enable ons [-v]
srvctl enable ons
srvctl disable ons [-v]
srvctl disable ons
srvctl config ons
srvctl modify ons [-l ons-local-port] [-r ons-remote-port] [-t host[:port][,host[:port]...]] [-v]
srvctl modify ons
EONS:
srvctl add eons [-p portnum] [-m multicast-ip-address] [-e eons-listen-port] [-v]
#srvctl add eons -p 2018
srvctl remove eons [-f] [-v]
srvctl remove eons -f
srvctl start eons [-v]
srvctl start eons
srvctl stop eons [-f] [-v]
srvctl stop eons -f
srvctl status eons
srvctl enable eons [-v]
srvctl enable eons
srvctl disable eons [-v]
srvctl disable eons
srvctl config eons
srvctl modify eons [-m multicast_ip_address] [-p multicast_port_number] [-e eons_listen_port] [-v]
srvctl modify eons -p 2018
FileSystem:
srvctl add filesystem -d volume_device -v volume_name -g diskgroup_name [-m mountpoint_path] [-u user_name]
srvctl add filesystem -d /dev/asm/d1volume1 -v VOLUME1 -d RAC_DATA -m /oracle/cluster1/acfs1
srvctl remove filesystem -d volume_device_name [-f]
srvctl remove filesystem -d /dev/asm/racvol1
srvctl start filesystem -d volume_device_name [-n node_name]
srvctl start filesystem -d /dev/asm/racvol3
srvctl stop filesystem -d volume_device_name [-n node_name] [-f]
srvctl stop filesystem -d /dev/asm/racvol1 -f
srvctl status filesystem -d volume_device_name
srvctl status filesystem -d /dev/asm/racvol2
srvctl enable filesystem -d volume_device_name
srvctl enable filesystem -d /dev/asm/racvol9
srvctl disable filesystem -d volume_device_name
srvctl disable filesystem -d /dev/asm/racvol1
srvctl config filesystem -d volume_device_path
srvctl modify filesystem -d volume_device_name -u user_name
srvctl modify filesystem -d /dev/asm/racvol1 -u sysadmin
SrvPool:
srvctl add srvpool -g server_pool [-i importance] [-l min_size] [-u max_size] [-n node_list] [-f]
srvctl add srvpool -g SP1 -i 1 -l 3 -u 7 -n node1,node2
srvctl remove srvpool -g server_pool
srvctl remove srvpool -g srvpool1
srvctl status srvpool [-g server_pool] [-a]
srvctl status srvpool -g srvpool2 -a
srvctl config srvpool [-g server_pool]
srvctl config srvpool -g dbpool
srvctl modify srvpool -g server_pool [-i importance] [-l min_size] [-u max_size] [-n node_name_list] [-f]
srvctl modify srvpool -g srvpool4 -i 0 -l 2 -u 4 -n node3, node4
Server:
srvctl status server -n "server_name_list" [-a]
srvctl status server -n server11 -a
srvctl relocate server -n "server_name_list" -g server_pool_name [-f]
srvctl relocate server -n "linux1, linux2" -g sp2
Scan (Single Client Access Name):
srvctl add scan -n scan_name [-k network_number] [-S subnet/netmask[/if1[|if2|...]]]
#srvctl add scan -n scan.mycluster.example.com
srvctl remove scan [-f]
srvctl remove scan -f
srvctl start scan [-i ordinal_number] [-n node_name]
srvctl start scan -i 1 -n node1
srvctl stop scan [-i ordinal_number] [-f]
srvctl stop scan -i 1
srvctl status scan [-i ordinal_number]
srvctl status scan -i 1
srvctl enable scan [-i ordinal_number]
srvctl enable scan -i 1
srvctl disable scan [-i ordinal_number]
srvctl disable scan -i 3
srvctl config scan [-i ordinal_number]
srvctl config scan -i 2
srvctl modify scan -n scan_name
srvctl modify scan -n scan1
srvctl relocate scan -i ordinal_number [-n node_name]
srvctl relocate scan -i 2 -n node2
ordinal_number=1,2,3
Scan_listener:
srvctl add scan_listener [-l lsnr_name_prefix] [-s] [-p "[TCP:]port_list[/IPC:key][/NMP:pipe_name][/TCPS:s_port] [/SDP:port]"]
#srvctl add scan_listener -l myscanlistener
srvctl remove scan_listener [-f]
srvctl remove scan_listener -f
srvctl start scan_listener [-n node_name] [-i ordinal_number]
srvctl start scan_listener -i 1
srvctl stop scan_listener [-i ordinal_number] [-f]
srvctl stop scan_listener -i 3
srvctl status scan_listener [-i ordinal_number]
srvctl status scan_listener -i 1
srvctl enable scan_listener [-i ordinal_number]
srvctl enable scan_listener -i 2
srvctl disable scan_listener [-i ordinal_number]
srvctl disable scan_listener -i 1
srvctl config scan_listener [-i ordinal_number]
srvctl config scan_listener -i 3
srvctl modify scan_listener {-p [TCP:]port[/IPC:key][/NMP:pipe_name] [/TCPS:s_port][/SDP:port] | -u }
srvctl modify scan_listener -u
srvctl relocate scan_listener -i ordinal_number [-n node_name]
srvctl relocate scan_listener -i 1
ordinal_number=1,2,3
GNS (Grid Naming Service):
srvctl add gns -i ip_address -d domain
srvctl add gns -i 192.124.16.96 -d cluster.mycompany.com
srvctl remove gns [-f]
srvctl remove gns
srvctl start gns [-l log_level] [-n node_name]
srvctl start gns
srvctl stop gns [-n node_name [-v] [-f]
srvctl stop gns
srvctl status gns [-n node_name]
srvctl status gns
srvctl enable gns [-n node_name]
srvctl enable gns
srvctl disable gns [-n node_name]
srvctl disable gns -n devnode2
srvctl config gns [-a] [-d] [-k] [-m] [-n node_name] [-p] [-s] [-V] [-q name] [-l] [-v]
srvctl config gns -n lnx03
srvctl modify gns [-i ip_address] [-d domain]
srvctl modify gns -i 192.000.000.007
srvctl relocate gns [-n node_name]
srvctl relocate gns -n node2
VIP (Virtual Internet Protocol):
srvctl add vip -n node_name -A {name|ip}/netmask[/if1[if2|...]] [-k network_number] [-v]
#srvctl add vip -n node96 -A 192.124.16.96/255.255.255.0 -k 2
srvctl remove vip -i "vip_name_list" [-f] [-y] [-v]
srvctl remove vip -i "vip1,vip2,vip3" -f -y -v
srvctl start vip {-n node_name|-i vip_name} [-v]
srvctl start vip -i dev1-vip -v
srvctl stop vip {-n node_name|-i vip_name} [-r] [-v]
srvctl stop vip -n node1 -v
srvctl status vip {-n node_name|-i vip_name}
srvctl status vip -i node1-vip
srvctl enable vip -i vip_name [-v]
srvctl enable vip -i prod-vip -v
srvctl disable vip -i vip_name [-v]
srvctl disable vip -i vip3 -v
srvctl config vip {-n node_name|-i vip_name}
srvctl config vip -n devnode2
srvctl getenv vip -i vip_name [-t "name_list"] [-v]
srvctl getenv vip -i node1-vip
srvctl setenv vip -i vip_name {-t "name=val[,name=val,...]" | -T "name=val"}
srvctl setenv vip -i dev1-vip -t LANG=en
srvctl unsetenv vip -i vip_name -t "name_list" [-v]
srvctl unsetenv vip -i myvip -t CLASSPATH
OC4J (Oracle Container for Java):
srvctl add oc4j [-v]
srvctl add oc4j
srvctl remove oc4j [-f] [-v]
srvctl remove oc4j
srvctl start ocj4 [-v]
srvctl start ocj4 -v
srvctl stop oc4j [-f] [-v]
srvctl stop oc4j -f -v
srvctl status oc4j [-n node_name]
srvctl status oc4j -n lnx01
srvctl enable oc4j [-n node_name] [-v]
srvctl enable oc4j -n dev3
srvctl disable oc4j [-n node_name] [-v]
srvctl disable oc4j -n dev1
srvctl config oc4j
srvctl modify oc4j -p oc4j_rmi_port [-v]
srvctl modify oc4j -p 5385
srvctl relocate oc4j [-n node_name] [-v]
srvctl relocate oc4j -n lxn06 -v
SRVCTL: (Server Control utility)
srvctl command target [options]
commands: enable|disable|start|stop|relocate|status|add|remove|modify|getenv|setenv|unsetenv|config
targets: database/db|instance/inst|service/serv|nodeapps|asm|listener
targets: database/db|instance/inst|service/serv|nodeapps|asm|listener |diskgroup|home|ons|eons|filesystem|gns|oc4j|scan|scan_listener |srvpool|server|VIP -- From Oracle 11g R2
srvctl -help or srvctl -v
srvctl -V -- prints version
srvctl version: 10.2.0.0.0 (or) srvctl version: 11.2.0.1.0
srvctl -h -- print usage
srvctl status service –h
Database:
srvctl add database -d db_name -o ORACLE_HOME [-m domain_name][-p spfile] [-A name|ip/netmask]
[-r {PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY}]
[-s start_options] [-n db_name] [-y {AUTOMATIC|MANUAL}]
srvctl add database -d prod -o /u01/oracle/product/102/prod
srvctl remove database -d db_name [-f]
srvctl remove database -d prod
srvctl start database -d db_name [-o start_options] [-c connect_str|-q]
srvctl start database -d db_name [-o open]
srvctl start database -d db_name -o nomount
srvctl start database -d db_name -o mount
srvctl start db -d prod
srvctl start database -d apps -o open
srvctl stop database -d db_name [-o stop_options] [-c connect_str|-q]
srvctl stop database -d db_name [-o normal]
srvctl stop database -d db_name -o transactional
srvctl stop database -d db_name -o immediate
srvctl stop database -d db_name -o abort
srvctl stop db -d crm -o immediate
srvctl status database -d db_name [-f] [-v] [-S level]
srvctl status database -d db_name -v service_name
srvctl status database -d hrms
srvctl enable database -d db_name
srvctl enable database -d vis
srvctl disable database -d db_name
srvctl disable db -d vis
srvctl config database
srvctl config database -d db_name [-a] [-t]
srvctl config database
srvctl config database -d HYD -a
srvctl modify database -d db_name [-n db_name] [-o ORACLE_HOME] [-m domain_name] [-p spfile]
[-r {PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY}] [-s start_options] [-y {AUTOMATIC|MANUAL}]
srvctl modify database -d hrms -r physical_standby
srvctl modify db -d RAC -p /u03/oradata/RAC/spfileRAC.ora -- moves p file
srvctl modify database –d HYD –o /u01/app/oracle/product/11.1/db –s open
srvctl getenv database -d db_name [-t name_list]
srvctl getenv database -d prod
srvctl setenv database -d db_name {-t name=val[,name=val,...]|-T name=val}
srvctl setenv database –d HYD –t "TNS_ADMIN=/u01/app/oracle/product/11.1/asm/network/admin"
srvctl setenv db -d prod -t LANG=en
srvctl unsetenv database -d db_name [-t name_list]
srvctl unsetenv database -d prod -t CLASSPATH
In 11g Release 2, some command's syntax has been changed:
srvctl add database -d db_unique_name -o ORACLE_HOME [-x node_name] [-m domain_name] [-p spfile] [-r {PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY}] [-s start_options] [-t stop_options] [-n db_name] [-y {AUTOMATIC|MANUAL}] [-g server_pool_list] [-a "diskgroup_list"]
srvctl add database -d prod -o /u01/oracle/product/112/prod -m foo.com -p +dg1/prod/spfileprod.ora -r PRIMARY -s open -t normal -n db2 -y AUTOMATIC -g svrpool1,svrpool2 -a "dg1,dg2"
srvctl remove database -d db_unique_name [-f] [-y] [-v]
srvctl remove database -d prod -y
srvctl stop database -d db_unique_name [-o stop_options] [-f]
srvctl stop database -d dev -f
srvctl status database -d db_unique_name [-f] [-v]
srvctl status db -d sat -v
srvctl enable database -d db_unique_name [-n node_name]
srvctl enable database -d vis -n lnx01
srvctl disable database -d db_unique_name [-n node_name]
srvctl disable db -d vis -n lnx03
srvctl config database [-d db_unique_name [-a]]
srvctl config db -d db_erp -a
srvctl modify database -d db_unique_name [-n db_name] [-o ORACLE_HOME] [-u oracle_user] [-m domain] [-p spfile] [-r {PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY}] [-s start_options] [-t stop_options] [-y {AUTOMATIC|MANUAL}] [-g "server_pool_list"] [-a "diskgroup_list"|-z]
srvctl modify db -d prod -r logical_standby
srvctl modify database -d racTest -a "SYSFILES,LOGS,OLTP"
Instance:
srvctl add instance –d db_name –i inst_name -n node_name
srvctl add instance -d prod -i prod01 -n linux01
srvctl remove instance –d db_name –i inst_name [-f]
srvctl remove instance -d prod -i prod01
srvctl start instance -d db_name -i inst_names [-o start_options] [-c connect_str|-q]
srvctl start instance –d db_name –i inst_names [-o open]
srvctl start instance –d db_name –i inst_names -o nomount
srvctl start instance –d db_name –i inst_names -o mount
srvctl start instance –d dev -i dev2
srvctl stop instance -d db_name -i inst_names [-o stop_options] [-c connect_str|-q]
srvctl stop instance –d db_name –i inst_names [-o normal]
srvctl stop instance –d db_name –i inst_names -o transactional
srvctl stop instance –d db_name –i inst_names -o immediate
srvctl stop instance –d db_name –i inst_names -o abort
srvctl stop inst –d vis -i vis
srvctl status instance –d db_name –i inst_names [-f] [-v] [-S level]
srvctl status inst –d racdb -i racdb2
srvctl enable instance –d db_name –i inst_names
srvctl enable instance -d prod -i "prod1,prod2"
srvctl disable instance –d db_name –i inst_names
srvctl disable inst -d prod -i "prod1,prod3"
srvctl modify instance -d db_name -i inst_name {-s asm_inst_name|-r} -- set dependency of instance to ASM
srvctl modify instance -d db_name -i inst_name -n node_name -- move the instance
srvctl modify instance -d db_name -i inst_name -r -- remove the instance
srvctl getenv instance –d db_name –i inst_name [-t name_list]
srvctl setenv instance –d db_name [–i inst_name] {-t "name=val[,name=val,...]" | -T "name=val"}
srvctl unsetenv instance –d db_name [–i inst_name] [-t name_list]
In 11g Release 2, some command's syntax has been changed:
srvctl start instance -d db_unique_name {-n node_name -i "instance_name_list"} [-o start_options]
srvctl start instance -d prod -n node2
srvctl start inst -d prod -i "prod2,prod3"
srvctl stop instance -d db_unique_name {[-n node_name]|[-i "instance_name_list"]} [-o stop_options] [-f]
srvctl stop inst -d prod -n node1
srvctl stop instance -d prod -i prod1
srvctl status instance -d db_unique_name {-n node_name | -i "instance_name_list"} [-f] [-v]
srvctl status instance -d prod -i "prod1,prod2" -v
srvctl modify instance -d db_unique_name -i instance_name {-n node_name|-z}
srvctl modify instance -d prod -i prod1 -n mynode
srvctl modify inst -d prod -i prod1 -z
Service:
srvctl add service -d db_name -s service_name -r pref_insts [-a avail_insts] [-P TAF_policy]
srvctl add service -d db_name -s service_name -u {-r "new_pref_inst" | -a "new_avail_inst"}
srvctl add service -d RAC -s PRD -r RAC01,RAC02 -a RAC03,RAC04
srvctl add serv -d CRM -s CRM -r CRM1 -a CRM3 -P basic
srvctl remove service -d db_name -s service_name [-i inst_name] [-f]
srvctl remove serv -d dev -s sales
srvctl remove service -d dev -s sales -i dev01,dev02
srvctl start service -d db_name [-s service_names [-i inst_name]] [-o start_options]
srvctl start service -d db_name -s service_names [-o open]
srvctl start service -d db_name -s service_names -o nomount
srvctl start service -d db_name -s service_names -o mount
srvctl start serv -d dwh -s dwh
srvctl stop service -d db_name [-s service_names [-i inst_name]] [-f]
srvctl stop serv -d dwh -s dwh
srvctl status service -d db_name [-s service_names] [-f] [-v] [-S level]
srvctl status service -d dev -s dev
srvctl enable service -d db_name -s service_names [–i inst_name]
srvctl enable service -d apps -s apps1
srvctl disable service -d db_name -s service_names [–i inst_name]
srvctl disable serv -d dev -s dev -i dev1
srvctl config service -d db_name [-s service_name] [-a] [-S level]
srvctl config service -d db_name -a -- -a shows TAF configuration
srvctl config service -d TEST -s test PREF:TST1 AVAIL:TST2
srvctl modify service -d db_name -s service_name -i old_inst_name -t new_inst_name [-f]
srvctl modify service -d db_name -s service_name -i avail_inst_name -r [-f]
srvctl modify service -d db_name -s service_name -n -i preferred_list [-a available_list] [-f]
srvctl modify service -d db_name -s service_name -i old_inst_name -a avail_inst -P TAF_policy
srvctl modify serv -d PROD -s DWH -n -i I1,I2,I3,I4 -a I5,I6
srvctl relocate service -d db_name -s service_name –i old_inst_name -t target_inst [-f]
srvctl getenv service -d db_name -s service_name -t name_list
srvctl setenv service -d db_name [-s service_name] {-t "name=val[,name=val,...]" | -T "name=val"}
srvctl unsetenv service -d db_name -s service_name -t name_list
In 11g Release 2, some command's syntax has been changed:
srvctl add service -d db_unique_name -s service_name [-l [PRIMARY][,PHYSICAL_STANDBY][,LOGICAL_STANDBY][,SNAPSHOT_STANDBY]] [-y {AUTOMATIC|MANUAL}] [-q {true|false}] [-j {SHORT|LONG}] [-B {NONE|SERVICE_TIME|THROUGHPUT}][-e {NONE|SESSION|SELECT}] [-m {NONE|BASIC}][-z failover_retries] [-w failover_delay]
srvctl add service -d rac -s rac1 -q TRUE -m BASIC -e SELECT -z 180 -w 5 -j LONG
srvctl add service -d db_unique_name -s service_name -u {-r preferred_list | -a available_list}
srvctl add service -d db_unique_name -s service_name
-g server_pool [-c {UNIFORM|SINGLETON}] [-k network_number]
[-l [PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY]
[-y {AUTOMATIC|MANUAL}] [-q {TRUE|FALSE}] [-j {SHORT|LONG}]
[-B {NONE|SERVICE_TIME|THROUGHPUT}] [-e {NONE|SESSION|SELECT}]
[-m {NONE|BASIC}] [-P {BASIC|NONE|PRECONNECT}] [-x {TRUE|FALSE}]
[-z failover_retries] [-w failover_delay]
srvctl add service -d db_unique_name -s service_name -r preferred_list [-a available_list] [-P {BASIC|NONE|PRECONNECT}]
[-l [PRIMARY|PHYSICAL_STANDBY|LOGICAL_STANDBY|SNAPSHOT_STANDBY]
[-y {AUTOMATIC|MANUAL}] [-q {TRUE|FALSE}] [-j {SHORT|LONG}]
[-B {NONE|SERVICE_TIME|THROUGHPUT}] [-e {NONE|SESSION|SELECT}]
[-m {NONE|BASIC}] [-x {TRUE|FALSE}] [-z failover_retries] [-w failover_delay]
srvctl add serv -d dev -s sales -r dev01,dev02 -a dev03 -P PRECONNECT
srvctl start service -d db_unique_name [-s "service_name_list" [-n node_name | -i instance_name]] [-o start_options]
srvctl start serv -d dev -s dev
srvctl start service -d dev -s dev -i dev2
srvctl stop service -d db_unique_name [-s "service_name_list"] [-n node_name | -i instance_name] [-f]
srvctl stop service -d dev -s dev
srvctl stop serv -d dev -s dev -i dev2
srvctl status service -d db_unique_name [-s "service_name_list"] [-f] [-v]
srvctl status service -d dev -s dev -v
srvctl enable service -d db_unique_name -s "service_name_list" [-i instance_name | -n node_name]
srvctl enable service -d dev -s dev
srvctl enable serv -d dev -s dev -i dev1
srvctl disable service -d db_unique_name -s "service_name_list" [-i instance_name | -n node_name]
srvctl disable service -d dev -s "dev,marketing"
srvctl disable serv -d dev -s dev -i dev1
srvctl config service -d db_unique_name [-s service_name] [-a]
srvctl config service -d dev -s dev
srvctl modify service -d db_unique_name -s service_name
[-c {UNIFORM|SINGLETON}] [-P {BASIC|PRECONNECT|NONE}]
[-l {[PRIMARY]|[PHYSICAL_STANDBY]|[LOGICAL_STANDBY]|[SNAPSHOT_STANDBY]} [-q {TRUE|FALSE}] [-x {TRUE|FALSE}] [-j {SHORT|LONG}] [-B {NONE|SERVICE_TIME|THROUGHPUT}] [-e {NONE|SESSION|SELECT}] [-m {NONE|BASIC}] [-z failover_retries] [-w failover_delay] [-y {AUTOMATIC|MANUAL}]
srvctl modify service -d db_unique_name -s service_name -i old_instance_name -t new_instance_name [-f]
srvctl modify service -d db_unique_name -s service_name -i avail_inst_name -r [-f]
srvctl modify service -d db_unique_name -s service_name -n -i preferred_list [-a available_list] [-f]
srvctl modify service -d dev -s dev -i dev1 -t dev2
srvctl modify serv -d dev -s dev -i dev1 -r
srvctl modify service -d dev -s dev -n -i dev1 -a dev2
srvctl relocate service -d db_unique_name -s service_name {-c source_node -n target_node|-i old_instance_name -t new_instance_name} [-f]
srvctl relocate service -d dev -s dev -i dev1 -t dev3
Nodeapps:
#srvctl add nodeapps -n node_name -o ORACLE_HOME -A name|ip/netmask[/if1[|if2|...]]
#srvctl add nodeapps -n lnx02 -o $ORACLE_HOME -A 192.168.0.151/255.255.0.0/eth0
#srvctl remove nodeapps -n node_names [-f]
#srvctl start nodeapps -n node_name -- Starts GSD, VIP, listener & ONS
#srvctl stop nodeapps -n node_name [-r] -- Stops GSD, VIP, listener & ONS
#srvctl status nodeapps -n node_name
#srvctl config nodeapps -n node_name [-a] [-g] [-o] [-s] [-l]
-a Display VIP configuration
-g Display GSD configuration
-s Display ONS daemon configuration
-l Display listener configuration
#srvctl modify nodeapps -n node_name [-A new_vip_address]
#srvctl modify nodeapps -n lnx06 -A 10.50.99.43/255.255.252.0/eth0
#srvctl getenv nodeapps -n node_name [-t name_list]
#srvctl setenv nodeapps -n node_name {-t "name=val[,name=val,...]"|-T "name=val"}
#srvctl setenv nodeapps –n adcracdbq3 –t "TNS_ADMIN=/u01/app/oracle/product/11.1/asm/network/admin"
#srvctl unsetenv nodeapps -n node_name [-t name_list]
In 11g Release 2, some command's syntax has been changed:
srvctl add nodeapps -n node_name -A {name|ip}/netmask[/if1[|if2|...]] [-m multicast_ip_address] [-p multicast_port_number] [-l ons_local_port] [-r ons_remote-port] [-t host[:port][,host[:port],...]] [-v]
srvctl add nodeapps -S subnet/netmask[/if1[|if2|...]] [-d dhcp_server_type] [-m multicast_ip_address] [-p multicast_port_number] [-l ons_local_port] [-r ons_remote-port] [-t host[:port][,host[:port],...]] [-v]
#srvctl add nodeapps -n devnode1 -A 1.2.3.4/255.255.255.0
srvctl remove nodeapps [-f] [-y] [-v]
srvctl remove nodeapps
srvctl start nodeapps [-n node_name] [-v]
srvctl start nodeapps
srvctl stop nodeapps [-n node_name] [-r] [-v]
srvctl stop nodeapps
srvctl status nodeapps
srvctl enable nodeapps [-g] [-v]
srvctl enable nodeapps -g -v
srvctl disable nodeapps [-g] [-v]
srvctl disable nodeapps -g -v
srvctl config nodeapps [-a] [-g] [-s] [-e]
srvctl config nodeapps -a -g -s -e
srvctl modify nodeapps [-n node_name -A new_vip_address] [-S subnet/netmask[/if1[|if2|...]] [-m multicast_ip_address] [-p multicast_port_number] [-e eons_listen_port] [-l ons_local_port] [-r ons_remote_port] [-t host[:port][,host:port,...]] [-v]
srvctl modify nodeapps -n mynode1 -A 100.200.300.40/255.255.255.0/eth0
srvctl getenv nodeapps [-a] [-g] [-s] [-e] [-t "name_list"] [-v]
srvctl getenv nodeapps -a
srvctl setenv nodeapps {-t "name=val[,name=val][...]" | -T "name=val"} [-v]
srvctl setenv nodeapps -T "CLASSPATH=/usr/local/jdk/jre/rt.jar" -v
srvctl unsetenv nodeapps -t "name_list" [-v]
srvctl unsetenv nodeapps -t "test_var1,test_var2"
ASM:
srvctl add asm -n node_name -i asminstance -o ORACLE_HOME [-p spfile]
srvctl remove asm -n node_name [-i asminstance] [-f]
srvctl remove asm -n db6
srvctl start asm -n node_name [-i asminstance] [-o start_options] [-c connect_str|-q]
srvctl start asm -n node_name [-i asminstance] [-o open]
srvctl start asm -n node_name [-i asminstance] -o nomount
srvctl start asm -n node_name [-i asminstance] -o mount
srvctl start asm -n linux01
srvctl stop asm -n node_name [-i asminstance] [-o stop_options] [-c connect_str|-q]
srvctl stop asm -n node_name [-i asminstance] [-o normal]
srvctl stop asm -n node_name [-i asminstance] -o transactional
srvctl stop asm -n node_name [-i asminstance] -o immediate
srvctl stop asm -n node_name [-i asminstance]-o abort
srvctl stop asm -n racnode1
srvctl stop asm -n devnode1 -i +asm1
srvctl status asm -n node_name
srvctl status asm -n racnode1
srvctl enable asm -n node_name [-i asminstance]
srvctl enable asm -n lnx03 -i +asm3
srvctl disable asm -n node_name [-i asminstance]
srvctl disable asm -n lnx02 -i +asm2
srvctl config asm -n node_name
srvctl config asm -n lnx08
srvctl modify asm -n node_name -i asminstance [-o ORACLE_HOME] [-p spfile]
srvctl modify asm –n rac6 -i +asm6 –o /u01/app/oracle/product/11.1/asm
In 11g Release 2, some command's syntax has been changed:
srvctl add asm [-l lsnr_name] [-p spfile] [-d asm_diskstring]
srvctl add asm
srvctl add asm -l LISTENERASM -p +dg_data/spfile.ora
srvctl remove asm [-f]
srvctl remove asm -f
srvctl start asm [-n node_name] [-o start_options]
srvctl start asm -n devnode1
srvctl stop asm [-n node_name] [-o stop_options] [-f]
srvctl stop asm -n devnode1 -f
srvctl status asm [-n node_name] [-a]
srvctl status asm -n devnode1 -a
srvctl enable asm [-n node_name]
srvctl enable asm -n devnode1
srvctl disable asm [-n node_name]
srvctl disable asm -n devnode1
srvctl config asm [-a]
srvctl config asm -a
srvctl modify asm [-l lsnr_name] [-p spfile] [-d asm_diskstring]
srvctl modify asm [-n node_name] [-l listener_name] [-d asm_diskstring] [-p spfile_path_name]
srvctl modify asm -l lsnr1
srvctl getenv asm [-t name[, ...]]
srvctl getenv asm
srvctl setenv asm {-t "name=val [,...]" | -T "name=value"}
srvctl setenv asm -t LANG=en
srvctl unsetenv asm -t "name[, ...]"
srvctl unsetenv asm -t CLASSPATH
Listener:
srvctl add listener -n node_name -o ORACLE_HOME [-l listener_name] -- 11g R1 command
srvctl remove listener -n node_name [-l listener_name] -- 11g R1 command
srvctl start listener -n node_name [-l listener_names]
srvctl start listener -n node1
srvctl stop listener -n node_name [-l listener_names]
srvctl stop listener -n node1
srvctl status listener [-n node_name] [-l listener_names] -- 11g R1 command
srvctl status listener -n node2
srvctl config listener -n node_name
srvctl modify listener -n node_name [-l listener_names] -o ORACLE_HOME -- 11g R1 command
srvctl modify listener -n racdb4 -o /u01/app/oracle/product/11.1/asm -l "LISTENER_RACDB4"
In 11g Release 2, some command's syntax has been changed:
srvctl add listener [-l lsnr_name] [-s] [-p "[TCP:]port[, ...][/IPC:key][/NMP:pipe_name][/TCPS:s_port] [/SDP:port]"] [-k network_number] [-o ORACLE_HOME]
srvctl add listener -l LISTENERASM -p "TCP:1522" -o $ORACLE_HOME
srvctl add listener -l listener112 -p 1341 -o /ora/ora112
srvctl remove listener [-l lsnr_name|-a] [-f]
srvctl remove listener -l lsnr01
srvctl stop listener [-n node_name] [-l lsnr_name] [-f]
srvctl enable listener [-l lsnr_name] [-n node_name]
srvctl enable listener -l listener_dev -n node5
srvctl disable listener [-l lsnr_name] [-n node_name]
srvctl disable listener -l listener_dev -n node5
srvctl config listener [-l lsnr_name] [-a]
srvctl config listener
srvctl modify listener [-l listener_name] [-o oracle_home] [-u user_name] [-p "[TCP:]port_list[/IPC:key][/NMP:pipe_name][/TCPS:s_port][/SDP:port]"] [-k network_number]
srvctl modify listener -n node1 -p "TCP:1521,1522"
srvctl getenv listener [-l lsnr_name] [-t name[, ...]]
srvctl getenv listener
srvctl setenv listener [-l lsnr_name] {-t "name=val [,...]" | -T "name=value"}
srvctl setenv listener -t LANG=en
srvctl unsetenv listener [-l lsnr_name] -t "name[, ...]"
srvctl unsetenv listener -t "TNS_ADMIN"
New srvctl commands in 11g Release 2
Diskgroup:
srvctl remove diskgroup -g diskgroup_name [-n node_list] [-f]
srvctl remove diskgroup -g DG1 -f
srvctl start diskgroup -g diskgroup_name [-n node_list]
srvctl start diskgroup -g diskgroup1 -n node1,node2
srvctl stop diskgroup -g diskgroup_name [-n node_list] [-f]
srvctl stop diskgroup -g ASM_FRA_DG
srvctl stop diskgroup -g dg1 -n node1,node2 -f
srvctl status diskgroup -g diskgroup_name [-n node_list] [-a]
srvctl status diskgroup -g dg_data -n node1,node2 -a
srvctl enable diskgroup -g diskgroup_name [-n node_list]
srvctl enable diskgroup -g diskgroup1 -n node1,node2
srvctl disable diskgroup -g diskgroup_name [-n node_list]
srvctl disable diskgroup -g dg_fra -n node1, node2
Home:
srvctl start home -o ORACLE_HOME -s state_file [-n node_name]
srvctl start home -o /u01/app/oracle/product/11.2.0/db_1 -s ~/state.txt
srvctl stop home -o ORACLE_HOME -s state_file [-t stop_options] [-n node_name] [-f]
srvctl stop home -o /u01/app/oracle/product/11.2.0/db_1 -s ~/state.txt
srvctl status home -o ORACLE_HOME -s state_file [-n node_name]
srvctl status home -o /u01/app/oracle/product/11.2.0/db_1 -s ~/state.txt
ONS (Oracle Notification Service) :
srvctl add ons [-l ons-local-port] [-r ons-remote-port] [-t host[:port][,host[:port]...]] [-v]
srvctl add ons -l 6200
srvctl remove ons [-f] [-v]
srvctl remove ons -f
srvctl start ons [-v]
srvctl start ons -v
srvctl stop ons [-v]
srvctl stop ons -v
srvctl status ons
srvctl enable ons [-v]
srvctl enable ons
srvctl disable ons [-v]
srvctl disable ons
srvctl config ons
srvctl modify ons [-l ons-local-port] [-r ons-remote-port] [-t host[:port][,host[:port]...]] [-v]
srvctl modify ons
EONS:
srvctl add eons [-p portnum] [-m multicast-ip-address] [-e eons-listen-port] [-v]
#srvctl add eons -p 2018
srvctl remove eons [-f] [-v]
srvctl remove eons -f
srvctl start eons [-v]
srvctl start eons
srvctl stop eons [-f] [-v]
srvctl stop eons -f
srvctl status eons
srvctl enable eons [-v]
srvctl enable eons
srvctl disable eons [-v]
srvctl disable eons
srvctl config eons
srvctl modify eons [-m multicast_ip_address] [-p multicast_port_number] [-e eons_listen_port] [-v]
srvctl modify eons -p 2018
FileSystem:
srvctl add filesystem -d volume_device -v volume_name -g diskgroup_name [-m mountpoint_path] [-u user_name]
srvctl add filesystem -d /dev/asm/d1volume1 -v VOLUME1 -d RAC_DATA -m /oracle/cluster1/acfs1
srvctl remove filesystem -d volume_device_name [-f]
srvctl remove filesystem -d /dev/asm/racvol1
srvctl start filesystem -d volume_device_name [-n node_name]
srvctl start filesystem -d /dev/asm/racvol3
srvctl stop filesystem -d volume_device_name [-n node_name] [-f]
srvctl stop filesystem -d /dev/asm/racvol1 -f
srvctl status filesystem -d volume_device_name
srvctl status filesystem -d /dev/asm/racvol2
srvctl enable filesystem -d volume_device_name
srvctl enable filesystem -d /dev/asm/racvol9
srvctl disable filesystem -d volume_device_name
srvctl disable filesystem -d /dev/asm/racvol1
srvctl config filesystem -d volume_device_path
srvctl modify filesystem -d volume_device_name -u user_name
srvctl modify filesystem -d /dev/asm/racvol1 -u sysadmin
SrvPool:
srvctl add srvpool -g server_pool [-i importance] [-l min_size] [-u max_size] [-n node_list] [-f]
srvctl add srvpool -g SP1 -i 1 -l 3 -u 7 -n node1,node2
srvctl remove srvpool -g server_pool
srvctl remove srvpool -g srvpool1
srvctl status srvpool [-g server_pool] [-a]
srvctl status srvpool -g srvpool2 -a
srvctl config srvpool [-g server_pool]
srvctl config srvpool -g dbpool
srvctl modify srvpool -g server_pool [-i importance] [-l min_size] [-u max_size] [-n node_name_list] [-f]
srvctl modify srvpool -g srvpool4 -i 0 -l 2 -u 4 -n node3, node4
Server:
srvctl status server -n "server_name_list" [-a]
srvctl status server -n server11 -a
srvctl relocate server -n "server_name_list" -g server_pool_name [-f]
srvctl relocate server -n "linux1, linux2" -g sp2
Scan (Single Client Access Name):
srvctl add scan -n scan_name [-k network_number] [-S subnet/netmask[/if1[|if2|...]]]
#srvctl add scan -n scan.mycluster.example.com
srvctl remove scan [-f]
srvctl remove scan -f
srvctl start scan [-i ordinal_number] [-n node_name]
srvctl start scan -i 1 -n node1
srvctl stop scan [-i ordinal_number] [-f]
srvctl stop scan -i 1
srvctl status scan [-i ordinal_number]
srvctl status scan -i 1
srvctl enable scan [-i ordinal_number]
srvctl enable scan -i 1
srvctl disable scan [-i ordinal_number]
srvctl disable scan -i 3
srvctl config scan [-i ordinal_number]
srvctl config scan -i 2
srvctl modify scan -n scan_name
srvctl modify scan -n scan1
srvctl relocate scan -i ordinal_number [-n node_name]
srvctl relocate scan -i 2 -n node2
ordinal_number=1,2,3
Scan_listener:
srvctl add scan_listener [-l lsnr_name_prefix] [-s] [-p "[TCP:]port_list[/IPC:key][/NMP:pipe_name][/TCPS:s_port] [/SDP:port]"]
#srvctl add scan_listener -l myscanlistener
srvctl remove scan_listener [-f]
srvctl remove scan_listener -f
srvctl start scan_listener [-n node_name] [-i ordinal_number]
srvctl start scan_listener -i 1
srvctl stop scan_listener [-i ordinal_number] [-f]
srvctl stop scan_listener -i 3
srvctl status scan_listener [-i ordinal_number]
srvctl status scan_listener -i 1
srvctl enable scan_listener [-i ordinal_number]
srvctl enable scan_listener -i 2
srvctl disable scan_listener [-i ordinal_number]
srvctl disable scan_listener -i 1
srvctl config scan_listener [-i ordinal_number]
srvctl config scan_listener -i 3
srvctl modify scan_listener {-p [TCP:]port[/IPC:key][/NMP:pipe_name] [/TCPS:s_port][/SDP:port] | -u }
srvctl modify scan_listener -u
srvctl relocate scan_listener -i ordinal_number [-n node_name]
srvctl relocate scan_listener -i 1
ordinal_number=1,2,3
GNS (Grid Naming Service):
srvctl add gns -i ip_address -d domain
srvctl add gns -i 192.124.16.96 -d cluster.mycompany.com
srvctl remove gns [-f]
srvctl remove gns
srvctl start gns [-l log_level] [-n node_name]
srvctl start gns
srvctl stop gns [-n node_name [-v] [-f]
srvctl stop gns
srvctl status gns [-n node_name]
srvctl status gns
srvctl enable gns [-n node_name]
srvctl enable gns
srvctl disable gns [-n node_name]
srvctl disable gns -n devnode2
srvctl config gns [-a] [-d] [-k] [-m] [-n node_name] [-p] [-s] [-V] [-q name] [-l] [-v]
srvctl config gns -n lnx03
srvctl modify gns [-i ip_address] [-d domain]
srvctl modify gns -i 192.000.000.007
srvctl relocate gns [-n node_name]
srvctl relocate gns -n node2
VIP (Virtual Internet Protocol):
srvctl add vip -n node_name -A {name|ip}/netmask[/if1[if2|...]] [-k network_number] [-v]
#srvctl add vip -n node96 -A 192.124.16.96/255.255.255.0 -k 2
srvctl remove vip -i "vip_name_list" [-f] [-y] [-v]
srvctl remove vip -i "vip1,vip2,vip3" -f -y -v
srvctl start vip {-n node_name|-i vip_name} [-v]
srvctl start vip -i dev1-vip -v
srvctl stop vip {-n node_name|-i vip_name} [-r] [-v]
srvctl stop vip -n node1 -v
srvctl status vip {-n node_name|-i vip_name}
srvctl status vip -i node1-vip
srvctl enable vip -i vip_name [-v]
srvctl enable vip -i prod-vip -v
srvctl disable vip -i vip_name [-v]
srvctl disable vip -i vip3 -v
srvctl config vip {-n node_name|-i vip_name}
srvctl config vip -n devnode2
srvctl getenv vip -i vip_name [-t "name_list"] [-v]
srvctl getenv vip -i node1-vip
srvctl setenv vip -i vip_name {-t "name=val[,name=val,...]" | -T "name=val"}
srvctl setenv vip -i dev1-vip -t LANG=en
srvctl unsetenv vip -i vip_name -t "name_list" [-v]
srvctl unsetenv vip -i myvip -t CLASSPATH
OC4J (Oracle Container for Java):
srvctl add oc4j [-v]
srvctl add oc4j
srvctl remove oc4j [-f] [-v]
srvctl remove oc4j
srvctl start ocj4 [-v]
srvctl start ocj4 -v
srvctl stop oc4j [-f] [-v]
srvctl stop oc4j -f -v
srvctl status oc4j [-n node_name]
srvctl status oc4j -n lnx01
srvctl enable oc4j [-n node_name] [-v]
srvctl enable oc4j -n dev3
srvctl disable oc4j [-n node_name] [-v]
srvctl disable oc4j -n dev1
srvctl config oc4j
srvctl modify oc4j -p oc4j_rmi_port [-v]
srvctl modify oc4j -p 5385
srvctl relocate oc4j [-n node_name] [-v]
srvctl relocate oc4j -n lxn06 -v
ORACLE STREAMS BIDIRECTIONAL CONFIGURATION BEST PRACTICES
ORACLE STREAMS BIDIRECTIONAL CONFIGURATION WITH CONFLICT RESOLUTION BEST PRACTICES
This section recommends best practice for configuring Streams for Oracle Database 10g Release 2. The discussions are divided into the following sections:
• Guidelines for Preparing All Streams Configurations
• Recommendations for Downstream Capture Configurations
• Recommendations for Upstream (Local) Capture Configurations
All users should perform the instructions in the Guidelines for Preparing All Streams Configurations section of this white paper before branching off to either the downstream or the upstream configuration recommendations.
The examples in this article are based on a configuration that uses:
• Two databases
• Stream database administrator privileges
• Automatic Storage Management (ASM).
Guidelines for Preparing All Streams Configurations
The following list summarizes the tasks you must perform to properly prepare each Streams database:
Use Oracle Database 10g Release 2 (10.2.0.4)
Ensure that all Streams databases are running Oracle Database 10g Release 2 (release 10.2.0.4 is recommended) and apply any critical patch sets. Although the source database must be running Oracle Database 10g Release 2, the downstream capture database can run Release 10.2.0.4 or a later release.
See the Oracle Database Upgrade Guide [7] for database upgrade information and the "Patches and Updates" tab on Oracle Metalink for information about critical Streams patches for release 10.2 and higher releases.
Verify platform requirements for Downstream Capture
Downstream capture requires that the source and Destination database are running on the same platform.
Prepare the Source and Target databases Redo logs for Streams
1. Configure the source and target databases in ARCHIVELOG mode. For one-way Streams local capture, you must configure the source database to run in ARCHIVELOG mode, but the best practice is for both the source and target databases to be in ARCHIVELOG mode in case media recovery is required for either of the databases. If replication is bi-directional, then you must configure both source and target databases in ARCHIVELOG mode.
2. Configure the local archive Destination, LOG_ARCHIVE_TARGET_1, parameter and do not use a flash recovery area. Streams local capture and downstream capture require online redo logs and archived redo logs. Do not place archived redo log files in the Flash Recovery area, because this is a fixed-size storage area and the files may be deleted if the amount of space becomes low.
Create a Tablespace Dedicated to Streams
Create a dedicated Streams tablespace to contain the Streams AQ tables and other objects related to Streams, and to allow for better space management for the Streams objects:
• For downstream capture, create the Streams tablespace on the downstream capture database only.
• For upstream capture(and bidirectional), create the Streams tablespace on both the source and target databases.
For example, to create a Streams tablespace of 50 MB, which is the minimum recommended size, issue the following statements:
CREATE TABLESPACE streams_data
LOGGING
EXTENT MANAGEMENT LOCAL AUTOALLOCATE
SEGMENT SPACE MANAGEMENT AUTO
DATAFILE '+DATA' SIZE 50M
AUTOEXTEND ON NEXT 50M MAXSIZE UNLIMITED ;
Create the Streams Administrator database user
Create the Streams administrator account that you will use to create and modify all Streams configurations and perform administrator functions with the DBMS_STREAMS_ADM PL/SQL package. Do this both on the Source and Destination databases.
For example, to create a new user named streamsadmin with the default streams_data tablespace, enter the following SQL*Plus statement as SYSDBA:
drop user STRMADMIN cascade;
commit;
CREATE USER STRMADMIN IDENTIFIED BY STRMADMIN
DEFAULT TABLESPACE STREAMS_DATA
TEMPORARY TABLESPACE TEMP
QUOTA UNLIMITED ON STREAMS_DATA
QUOTA UNLIMITED ON STREAMS_INDEX;
Grant Streams authorization and DBA privileges
Use the DBMS_STREAMS_AUTH PL/SQL procedure to grant all of the privileges necessary for the Streams package procedures to function properly. Do this both on the Source and Destination databases:
GRANT DBA TO STRMADMIN;
BEGIN
DBMS_STREAMS_AUTH.GRANT_ADMIN_PRIVILEGE(
grantee => 'STRMADMIN',
grant_privileges => true);
END;
/
Also, set Logminer to use the Streams tablespace. Do this both on the Source and Destination databases.
exec DBMS_LOGMNR_D.SET_TABLESPACE ('STREAMS_DATA');
Check that the streams administartor account is created:
SELECT * FROM dba_streams_administrator;
Set key initialization parameters
On each Streams database in the configuration, specify the initialization parameters shown below:
Parameter Name Description
AQ_TM_PROCESSES alter system set aq_tm_processes=1 scope=spfile sid='*';
DB_NAME Select name from v$database. Each database should be the same. Ex. PRODDB
DB_UNIQUE_NAME Each database name will be different. In this example, on servers ldbd01/02 the database’s name is PRODDB_TULSA. On servers ldbd10/11 the databases’s name is PRODDB_PLANO.
DB_DOMAIN On each Streams database, specify the network domain where the database resides. In this example, the db_domain is DOMAIN.COM. Therefore the source database is named:
PRODDB_TULSA.DOMAIN.COM
GLOBAL_NAME=TRUE On each Streams database specify GLOBAL_NAME=TRUE to ensure the database links work correctly. For example, the source database is named PRODDB_TULSA.DOMAIN.COM when you select * from global_name;
JOB_QUEUE_PROCESSES=4 Specify the JOB_QUEUE_PROCESSES has at least 4 job queues for Streams. If the parameter is already set, increment the parameter by 4.
COMPATIBLE=10.2 Make certain the COMPATIBLE parameter is at least 10.2.
_JOB_QUEUE_INTERVAL=1 On each Streams database, specify how many seconds the job queue is scanned for work. It is recommended that you set this parameter to 1 (seconds) to improve the propagation job performance and minimize the delay for how often propagation jobs will execute. Since this is a hidden parameter, here is an example of how to set it:
alter system set "_job_queue_interval"=1 scope=spfile sid='*';
TIMED_STATISTICS=TRUE On each Streams database, set the TIMED_STATISTICS=TRUE parameter to allow performance statistics to be gathered for analysis of potential performance bottlenecks.
STATISTICS_LEVEL=TYPICAL
or ALL On each Streams database, set this parameter to TYPICAL to collect the necessary statistics. Set to ALL to gather more than the bare minimum.
SHARED_POOL_SIZE=256M On each Streams database, set this parameter to a minimum value of 256 MB because Streams uses a significant amount of PL/SQL and library cache components. Future performance analysis may providerecommendations for how much to increase the size ofSHARED_POOL_SIZE, especially for the database running Streamapply processes.
STREAMS_POOL_SIZE=256M
On each database where there is either a capture or apply process running, set the STREAMS_POOL_SIZE parameter to a minimum of 256M. Memory from the Streams pool is used by both the Streamsprocesses and the buffered queue, with the LCRs in the buffered queuethe largest component. Use the V$STREAMS_POOL_ADVICE view to help you size the buffered queue.
Create Database links between the Source and Target databases
Before creating database links, ensure that all Streams databases are reachable using Oracle Net aliases. Perform the following steps to create database links:
1. Log into the target database as the Streams administrator user and determine the global database name by issuing the following query:
select * from global_name;
You will use the name returned by this query to create the database link in the next step.
2. Create a TNSNAMES.ORA entry for each database using Streams. Below is an example of the TNSNAMES.ORA file entry:
PRODDB_PLANO.DOMAIN.COM =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = hostname0-vip)(PORT = 1521))
(ADDRESS = (PROTOCOL = TCP)(HOST = hostname1-vip)(PORT = 1521))
(LOAD_BALANCE = yes)
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = PRODDB_PLANO.DOMAIN.COM)
(FAILOVER_MODE =
(TYPE = SELECT)
(METHOD = BASIC)
(RETRIES = 180)
(DELAY = 5)
)
)
)
3. Log into the source database as the Streams administrator.
conn strmadmin/strmadmin
4. Create a database link first from the Source database to the Target database. For example, on the source database, enter the following statement to create a database link from the source database to a target database named PRODDB_TULSA.DOMAIN.COM using the TNSNAMES.ORA alias entry PRODDB_TULSA.DOMAIN.COM.
CREATE DATABASE LINK PRODDB_TULSA.DOMAIN.COM CONNECT TO
STRMADMIN IDENTIFIED BY STRMADMIN USING 'PRODDB_TULSA.DOMAIN.COM';
5. Validate the database link is working by issuing the following select * from dual@ query on the source database:
select * from dual@PRODDB_TULSA.DOMAIN.COM;
DUM
---
X
If an error is returned, it is probably due to an incorrect TNSNAMES.ORA descriptor or to an incorrect database name. Do not proceed to the next step until the database link is working properly.
6. Log into the Streams Administrator account on the Destination database.
conn strmadmin/strmadmin@PRODDB_TULSA.DOMAIN.COM
7. Create a database link from the Destination database to the source database.
CREATE DATABASE LINK PRODDB_PLANO.DOMAIN.COM CONNECT TO
STRMADMIN IDENTIFIED BY STRMADMIN USING 'PRODDB_PLANO.DOMAIN.COM';
8. Validate the database link is working by issuing the following select * from dual@ query on the target database:
select * from dual@PRODDB_PLANO.DOMAIN.COM;
9. If an error occurs, it is probably because of an incorrect TNSNAMES.ORA descriptor or an incorrect database name. Do not proceed to the next section until the database link is working properly.
Set up Directory Objects
In many cases, Oracle Streams instantiates the objects for which changes will be captured and uses Data Pump to initially populate the replica tables at the targetdatabase. As a part of this instantiation process, Data Pump needs a file system directory location on both the source and target database server for staging and moving all Streams objects. Streams uses Oracle Directory objects that point to these file system directory locations.
Directory objects are needed by Streams for instantiation purposes. Ensure thatthe locations to which the directory objects point have sufficient storage space tosupport all objects that Streams instantiates. On both the source and downstream databases servers, determine an appropriate location for the file system directory and create Oracle directory objects on each of the databases. For example, as the Streams administrator user, create the Oracle Directory objects as follows:
create directory streams_dir as '/SSM/oracle/admin/{$ORACLE_SID}/dpdump';
Account for object or tablespace name differences when replicating DDLs
If you choose to replicate DDLs, then you need to account for object or tablespace name differences between the source and target databases. Here are some best practices:
• Avoid system-generated naming for constraints or indexes
• Keep the same tablespace names between databases or use a DDL handler to explicitly address the naming differences
Recommendations for Bidirectional Configurations
Downstream capture is typically used to replicate a full database, to offload processing from the source database to the target database, or to reduce data loss with ASYNC or SYNC redo transport.
Specify initialization parameters on the Source and Target databases.
Note the example below. Be sure that the parameters you specify match the locations and parameter values in your Streams configuration.
Source Database Target Database
LOG_ARCHIVE_TARGET_1=
'LOCATION=+DATA/PRODDB_TULSA/
VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) MANDATORY'
LOG_ARCHIVE_TARGET_2=
'SERVICE=PRODDB_PLANO.DOMAIN.COM
LGWR ASYNC NOREGISTER
VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE)
DB_UNIQUE_NAME=PRODDB_PLANO'
LOG_ARCHIVE_TARGET_STATE_1=ENABLE
LOG_ARCHIVE_TARGET_STATE_2=DEFER
LOG_ARCHIVE_CONFIG='SEND, DG_CONFIG=(PRODDB_TULSA,PRODDB_PLANO)'
LOG_ARCHIVE_MAX_PROCESSES=4
LOG_ARCHIVE_TARGET_1=
'LOCATION=use_db_recovery_file_dest
VALID_FOR=(ONLINE_LOGFILE,PRIMARY_ROLE)'
LOG_ARCHIVE_TARGET_2=
'LOCATION=+DATA/PRODDB_PLANO/ARCH_TULSA_STREAMS
VALID_FOR=(STANDBY_LOGFILE,PRIMARY_ROLE)'
LOG_ARCHIVE_TARGET_STATE_1=ENABLE
LOG_ARCHIVE_TARGET_STATE_2=ENABLE
LOG_ARCHIVE_CONFIG='RECEIVE, DG_CONFIG=(PRODDB_TULSA,PRODDB_PLANO)'
LOG_ARCHIVE_MAX_PROCESSES=4
Configure Standby Redo logs.
To enable real-time mining for capture at the downstream database, you must configure standby redo logs (SRLs) on the downstream database to accept redo from the source database. The Streams capture process mines the standby redo logs and any archived redo logs created from the standby redo logs.
We should have minimum of (No. Of threads)*(groups per Threads + 1). We have total 2 Threads and each has 4 groups. So in this example, we need 5 standby Redo Logs per Thread at Minimum. Get the Max Group# from v$log.
SQL> select thread#, group# from v$log order by 1,2;
THREAD# GROUP#
---------- ----------
1 1
1 2
1 3
1 4
2 5
2 6
2 7
2 8
8 rows selected.
I will start with the Group No. of 9 to add the Standby Redo Logs because the Last Online Redo Group# is 18.
alter system set standby_file_management=manual sid='*';
ALTER DATABASE ADD STANDBY LOGFILE THREAD 1
GROUP 9 SIZE 50M,
GROUP 10 SIZE 50M,
GROUP 11 SIZE 50M,
GROUP 12 SIZE 50M,
GROUP 13 SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE THREAD 2
GROUP 14 SIZE 50M,
GROUP 15 SIZE 50M,
GROUP 16 SIZE 50M,
GROUP 17 SIZE 50M,
GROUP 18 SIZE 50M;
SQL> select thread#, group# from v$standby_log order by 1,2;
THREAD# GROUP#
---------- ----------
1 9
1 10
1 11
1 12
1 13
2 14
2 15
2 16
2 17
2 18
10 rows selected.
alter system set standby_file_management=auto sid='*';
Bidirectional Streams Setup
You must have at least one schema (Ex. "SSM") in two different databases Source and Target.
Set up two queues for Capture and Apply in SRC Database as shown below :
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
begin
dbms_streams_adm.set_up_queue(
queue_table => 'APPLY_PLANOTAB',
queue_name => 'APPLY_PLANO',
queue_user => 'STRMADMIN');
end;
/
begin
dbms_streams_adm.set_up_queue(
queue_table => 'CAPTURE_PLANOTAB',
queue_name => 'CAPTURE_PLANO',
queue_user => 'STRMADMIN');
end;
/
Set up two queues for Capture and Apply in DEST Database as shown below :
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
begin
dbms_streams_adm.set_up_queue(
queue_table => 'APPLY_TULSATAB',
queue_name => 'APPLY_TULSA',
queue_user => 'STRMADMIN');
end;
/
begin
dbms_streams_adm.set_up_queue(
queue_table => 'CAPTURE_TULSATAB',
queue_name => '&DEST_CAP_QUEUE_NAME',
queue_user => 'STRMADMIN');
end;
/
Configure capture process on SRC database
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
begin
dbms_streams_adm.add_schema_rules (
schema_name => 'SSM',
streams_type => 'CAPTURE',
streams_name => 'CAPTURES_PLANO',
queue_name => 'CAPTURE_PLANO',
include_dml => TRUE,
include_ddl => TRUE,
inclusion_rule => TRUE);
end;
/
Configure apply process on SRC database
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
begin
dbms_streams_adm.add_schema_rules (
schema_name => 'SSM',
streams_type => 'APPLY',
streams_name => 'APPLIES_PLANO',
queue_name => 'APPLY_PLANO',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'PRODDB_TULSA.DOMAIN.COM');
end;
/
Configure propagation process on SRC Database:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
begin
dbms_streams_adm.add_schema_propagation_rules (
schema_name => 'SSM',
streams_name => 'PROP_PLANO_TO_TULSA',
source_queue_name => 'CAPTURE_PLANO',
destination_queue_name => 'APPLY_TULSA@PRODDB_TULSA.DOMAIN.COM',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'PRODDB_PLANO.DOMAIN.COM',
queue_to_queue => TRUE);
end;
/
Configure Schema Supplemental logging on SRC Database:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
EXEC DBMS_CAPTURE_ADM.ABORT_SCHEMA_INSTANTIATION('SSM');
EXEC DBMS_CAPTURE_ADM.PREPARE_SCHEMA_INSTANTIATION('SSM','ALL');
Configure capture process on DEST Database:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
begin
dbms_streams_adm.add_schema_rules (
schema_name => 'SSM',
streams_type => 'CAPTURE',
streams_name => 'CAPTURES_TULSA',
queue_name => 'CAPTURE_TULSA',
include_dml => TRUE,
include_ddl => TRUE);
end;
/
Set the schema instantiation SCN on SRC using the SCN of Dest database:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
SET SERVEROUTPUT ON SIZE 1000000
declare
v_scn number;
begin
v_scn := dbms_flashback.get_system_change_number();
dbms_output.put_line('Instantiation SCN is: ' || v_scn);
dbms_apply_adm.set_schema_instantiation_scn@PRODDB_PLANO.DOMAIN.COM (
source_schema_name => 'SSM',
source_database_name => 'PRODDB_TULSA.DOMAIN.COM ',
instantiation_scn => v_scn,
recursive => TRUE);
end;
/
If you experiences any TNS issues, reload the listener on the Source and retry. If you still are receiving errors, check to see what the SCN number is on each database. You will need to export the schema from PLANO and import it into TULSA and then retry.
Configure apply process on DEST:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
begin
dbms_streams_adm.add_schema_rules (
schema_name => 'SSM',
streams_type => 'APPLY',
streams_name => 'APPLIES_TULSA',
queue_name => 'APPLY_TULSA',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'PRODDB_PLANO.DOMAIN.COM');
end;
/
Configure propagation process on DEST:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
begin
dbms_streams_adm.add_schema_propagation_rules (
schema_name => 'SSM',
streams_name => 'PROP_TULSA_TO_PLANO',
source_queue_name => 'CAPTURE_TULSA',
destination_queue_name => 'APPLY_PLANO@PRODDB_PLANO.DOMAIN.COM',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'PRODDB_TULSA.DOMAIN.COM',
queue_to_queue => TRUE);
end;
/
Configure Schema Supplemental logging on DEST Database:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
EXEC DBMS_CAPTURE_ADM.ABORT_SCHEMA_INSTANTIATION('SSM');
EXEC DBMS_CAPTURE_ADM.PREPARE_SCHEMA_INSTANTIATION('SSM','ALL');
At this point, the databases should now be replicating to one another. The next step is in case you need to restart the replication process.
You can confirm the environments are in sync by executing the following SQL on each:
select current_scn from v$database;
If the SCN numbers are identical, the Streams configuration is ready to start the replication.
Set Schema Instantiation on Target Database :
Set schema instantiation on DEST Database using the SCN of SRC database:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
SET SERVEROUTPUT ON SIZE 1000000
declare
v_scn number;
begin
v_scn := dbms_flashback.get_system_change_number();
dbms_output.put_line('Instantiation SCN is: ' || v_scn);
dbms_apply_adm.set_schema_instantiation_scn@PRODDB_TULSA.DOMAIN.COM(
source_schema_name => 'SSM',
source_database_name => 'PRODDB_PLANO.DOMAIN.COM',
instantiation_scn => v_scn,
recursive => TRUE);
end;
/
Add additional setting parameters to ensure the best performance. Also, confirm, the Streams environment is correct.
On the Source:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
exec dbms_aqadm.ALTER_QUEUE_TABLE ('CAPTURE_PLANOTAB',PRIMARY_INSTANCE =>1,SECONDARY_INSTANCE =>2);
exec dbms_aqadm.ALTER_QUEUE_TABLE ('APPLY_PLANOTAB',PRIMARY_INSTANCE =>1,SECONDARY_INSTANCE =>2);
exec dbms_apply_adm.set_parameter('APPLIES_PLANO','disable_on_error','n');
exec dbms_apply_adm.set_parameter('APPLIES_PLANO', 'PARALLELISM', '6');
exec dbms_apply_adm.set_parameter('APPLIES_PLANO', '_TXN_BUFFER_SIZE', '20');
exec dbms_apply_adm.set_parameter('APPLIES_PLANO', '_HASH_TABLE_SIZE', '10000000');
exec dbms_capture_adm.set_parameter('CAPTURES_PLANO','_CHECKPOINT_FREQUENCY','500');
exec dbms_capture_adm.set_parameter('CAPTURES_PLANO','_SGA_SIZE','100');
exec dbms_capture_adm.set_parameter('CAPTURES_PLANO','_CHECKPOINT_FORCE','Y');
exec dbms_capture_adm.alter_capture(capture_name=>'CAPTURES_PLANO ',CHECKPOINT_RETENTION_TIME=>3);
exec dbms_capture_adm.set_parameter ('CAPTURES_PLANO' ,'WRITE_ALERT_LOG','Y');
exec dbms_apply_adm.set_parameter ('APPLIES_PLANO','WRITE_ALERT_LOG','Y');
set pages 0
select * from dba_capture;
select * from dba_propagation;
select * from dba_apply;
select * from dba_apply_error;
Do the same on the DEST:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
exec dbms_aqadm.ALTER_QUEUE_TABLE ('CAPTURE_TULSATAB',PRIMARY_INSTANCE =>1,SECONDARY_INSTANCE =>2);
exec dbms_aqadm.ALTER_QUEUE_TABLE ('APPLY_TULSATAB',PRIMARY_INSTANCE =>1,SECONDARY_INSTANCE =>2);
exec dbms_apply_adm.set_parameter('APPLIES_TULSA','disable_on_error','n');
exec dbms_apply_adm.set_parameter('APPLIES_TULSA', 'PARALLELISM', '6');
exec dbms_apply_adm.set_parameter('APPLIES_TULSA', '_TXN_BUFFER_SIZE', '20');
exec dbms_apply_adm.set_parameter('APPLIES_TULSA', '_HASH_TABLE_SIZE', '10000000');
exec dbms_capture_adm.set_parameter('CAPTURES_TULSA','_CHECKPOINT_FREQUENCY','500');
exec dbms_capture_adm.set_parameter('CAPTURES_TULSA','_SGA_SIZE','100');
exec dbms_capture_adm.set_parameter('CAPTURES_TULSA','_CHECKPOINT_FORCE','Y');
exec dbms_capture_adm.alter_capture(capture_name=>'CAPTURES_TULSA ',CHECKPOINT_RETENTION_TIME=>3);
exec dbms_capture_adm.set_parameter ('CAPTURES_TULSA' ,'WRITE_ALERT_LOG','Y');
exec dbms_apply_adm.set_parameter ('APPLIES_TULSA','WRITE_ALERT_LOG','Y');
set pages 0
select * from dba_capture;
select * from dba_propagation;
select * from dba_apply;
select * from dba_apply_error;
Start the Bidirectional Streams
Start Capture and Apply processes on DEST:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
exec dbms_apply_adm.start_apply(apply_name=> 'APPLIES_TULSA');
exec dbms_capture_adm.start_capture(capture_name=>'CAPTURES_TULSA');
Start Capture and Apply processes on SRC:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
exec dbms_apply_adm.start_apply(apply_name=> 'APPLIES_PLANO');
exec dbms_capture_adm.start_capture(capture_name=>'CAPTURES_PLANO');
Create the Heart Beat table on Source
connect SSM/SSM@PRODDB_PLANO.DOMAIN.COM
drop table SSM.SRC_DEST_HEART_BEAT;
create table SSM.SRC_DEST_HEART_BEAT(SRC_DATE timestamp);
insert into SSM.SRC_DEST_HEART_BEAT(select sysdate from dual);
grant insert,update,delete,select on SRC_DEST_HEART_BEAT to STRMADMIN;
conn STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
variable jobno number
begin
dbms_job.submit(:jobno,'update ssm.src_dest_heart_beat set src_date =(select sysdate from dual);',sysdate,'sysdate+300/(60*60*24)');
commit;
end;
/
create or replace procedure strm_build_proc
is
scn NUMBER;
BEGIN
DBMS_CAPTURE_ADM.BUILD(
first_scn => scn);
DBMS_OUTPUT.PUT_LINE('First SCN Value = ' || scn);
END;
/
variable jobno number
begin
dbms_job.submit(:jobno,'strm_build_proc;',sysdate,'sysdate+ 2/24');
commit;
end;
/
Repeat Job Creation for Heart Beat on the Target
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
variable jobno number
begin
dbms_job.submit(:jobno,'update ssm.dest_src_heart_beat set dest_date =(select sysdate from dual);',sysdate,'sysdate+300/(60*60*24)');
commit;
end;
/
create or replace procedure strm_build_proc
is
scn NUMBER;
BEGIN
DBMS_CAPTURE_ADM.BUILD(
first_scn => scn);
DBMS_OUTPUT.PUT_LINE('First SCN Value = ' || scn);
END;
/
variable jobno number
begin
dbms_job.submit(:jobno,'strm_build_proc;',sysdate,'sysdate+ 2/24');
commit;
end;
/
Create Conflict Procedure on Source Developed for Kiosk_Profile (overwrite)
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
-------CONFLICT RESOLUTION SCRIPT----------
DECLARE
kp_fields DBMS_UTILITY.NAME_ARRAY;
BEGIN
kp_fields(1) := 'profile_id';
kp_fields(2) := 'device_id';
kp_fields(3) := 'device_type';
kp_fields(4) := 'serial_number';
kp_fields(5) := 'airport';
kp_fields(6) := 'area';
kp_fields(7) := 'facility';
kp_fields(8) := 'location';
kp_fields(9) := 'use_actual_departure_time';
kp_fields(10) := 'late_checkin_time';
kp_fields(11) := 'early_checkin_time';
kp_fields(12) := 'baggage_late_checkin_time';
kp_fields(13) := 'nrps_early_checkin_time';
kp_fields(14) := 'nrsa_early_checkin_time';
kp_fields(15) := 'asset_id';
kp_fields(16) := 'operation_hour_start';
kp_fields(17) := 'operation_hour_end';
kp_fields(18) := 'sofware_version';
kp_fields(19) := 'bag_tag_printer_capable';
kp_fields(20) := 'after_midnight_cutoff';
kp_fields(21) := 'primary_km';
kp_fields(22) := 'backup_km';
kp_fields(23) := 'ticket_number';
kp_fields(24) := 'ticket_date';
kp_fields(25) := 'domestic_boarding_time';
kp_fields(26) := 'default_lang';
kp_fields(27) := 'environment';
kp_fields(28) := 'check_bag_only_capable';
kp_fields(29) := 'ticket_stock';
kp_fields(30) := 'ticket_stock_id';
kp_fields(31) := 'baggage_early_checkin_time';
kp_fields(32) := 'baggage_after_midnight_cutoff';
kp_fields(33) := 'international_flag';
kp_fields(34) := 'intl_late_checkin_time';
kp_fields(35) := 'virtual_bag_check';
kp_fields(36) := 'current_tkt_printer';
kp_fields(37) := 'previous_tkt_printer';
kp_fields(38) := 'barcode_2d_capable';
kp_fields(39) := 'intl_baggage_late_checkin_time';
kp_fields(40) := 'dmst_late_checkin_time';
kp_fields(41) := 'dmst_baggage_late_checkin_time';
kp_fields(42) := 'agent_takeover_allowed';
kp_fields(43) := 'client_restart_freq';
kp_fields(44) := 'use_oper_hour_end_for_restart';
kp_fields(45) := 'client_restart_time';
DBMS_APPLY_ADM.SET_UPDATE_CONFLICT_HANDLER(
object_name => 'ssm.kiosk_profile',
method_name => 'overwrite',
resolution_column => 'profile_id',
column_list => kp_fields);
END;
/
Create Conflict Procedure on Destination for Kiosk_Profile (discard)
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
------CONFLICT RESOLUTION SCRIPT--------
DECLARE
kp_fields DBMS_UTILITY.NAME_ARRAY;
BEGIN
kp_fields(1) := 'profile_id';
kp_fields(2) := 'device_id';
kp_fields(3) := 'device_type';
kp_fields(4) := 'serial_number';
kp_fields(5) := 'airport';
kp_fields(6) := 'area';
kp_fields(7) := 'facility';
kp_fields(8) := 'location';
kp_fields(9) := 'use_actual_departure_time';
kp_fields(10) := 'late_checkin_time';
kp_fields(11) := 'early_checkin_time';
kp_fields(12) := 'baggage_late_checkin_time';
kp_fields(13) := 'nrps_early_checkin_time';
kp_fields(14) := 'nrsa_early_checkin_time';
kp_fields(15) := 'asset_id';
kp_fields(16) := 'operation_hour_start';
kp_fields(17) := 'operation_hour_end';
kp_fields(18) := 'sofware_version';
kp_fields(19) := 'bag_tag_printer_capable';
kp_fields(20) := 'after_midnight_cutoff';
kp_fields(21) := 'primary_km';
kp_fields(22) := 'backup_km';
kp_fields(23) := 'ticket_number';
kp_fields(24) := 'ticket_date';
kp_fields(25) := 'domestic_boarding_time';
kp_fields(26) := 'default_lang';
kp_fields(27) := 'environment';
kp_fields(28) := 'check_bag_only_capable';
kp_fields(29) := 'ticket_stock';
kp_fields(30) := 'ticket_stock_id';
kp_fields(31) := 'baggage_early_checkin_time';
kp_fields(32) := 'baggage_after_midnight_cutoff';
kp_fields(33) := 'international_flag';
kp_fields(34) := 'intl_late_checkin_time';
kp_fields(35) := 'virtual_bag_check';
kp_fields(36) := 'current_tkt_printer';
kp_fields(37) := 'previous_tkt_printer';
kp_fields(38) := 'barcode_2d_capable';
kp_fields(39) := 'intl_baggage_late_checkin_time';
kp_fields(40) := 'dmst_late_checkin_time';
kp_fields(41) := 'dmst_baggage_late_checkin_time';
kp_fields(42) := 'agent_takeover_allowed';
kp_fields(43) := 'client_restart_freq';
kp_fields(44) := 'use_oper_hour_end_for_restart';
kp_fields(45) := 'client_restart_time';
DBMS_APPLY_ADM.SET_UPDATE_CONFLICT_HANDLER(
object_name => 'ssm.kiosk_profile',
method_name => 'discard',
resolution_column => 'profile_id',
column_list => kp_fields);
END;
/
Create Procedure to print out transactions that show up in dba_apply_errors
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
CREATE OR REPLACE PROCEDURE print_any(data IN SYS.AnyData) IS
tn VARCHAR2(61);
str VARCHAR2(4000);
chr CHAR(255);
num NUMBER;
dat DATE;
rw RAW(4000);
res NUMBER;
BEGIN
IF data IS NULL THEN
DBMS_OUTPUT.PUT_LINE('NULL value');
RETURN;
END IF;
tn := data.GETTYPENAME();
IF tn = 'SYS.VARCHAR2' THEN
res := data.GETVARCHAR2(str);
DBMS_OUTPUT.PUT_LINE(str);
ELSIF tn = 'SYS.CHAR' then
res := data.GETCHAR(chr);
DBMS_OUTPUT.PUT_LINE(chr);
ELSIF tn = 'SYS.VARCHAR' THEN
res := data.GETVARCHAR(chr);
DBMS_OUTPUT.PUT_LINE(chr);
ELSIF tn = 'SYS.NUMBER' THEN
res := data.GETNUMBER(num);
DBMS_OUTPUT.PUT_LINE(num);
ELSIF tn = 'SYS.DATE' THEN
res := data.GETDATE(dat);
DBMS_OUTPUT.PUT_LINE(dat);
ELSIF tn = 'SYS.RAW' THEN
res := data.GETRAW(rw);
DBMS_OUTPUT.PUT_LINE(RAWTOHEX(rw));
ELSE
DBMS_OUTPUT.PUT_LINE('typename is ' || tn);
END IF;
END print_any;
/
CREATE OR REPLACE PROCEDURE print_lcr(lcr IN SYS.ANYDATA) IS
typenm VARCHAR2(61);
ddllcr SYS.LCR$_DDL_RECORD;
proclcr SYS.LCR$_PROCEDURE_RECORD;
rowlcr SYS.LCR$_ROW_RECORD;
res NUMBER;
newlist SYS.LCR$_ROW_LIST;
oldlist SYS.LCR$_ROW_LIST;
ddl_text CLOB;
BEGIN
typenm := lcr.GETTYPENAME();
DBMS_OUTPUT.PUT_LINE('type name: ' || typenm);
IF (typenm = 'SYS.LCR$_DDL_RECORD') THEN
res := lcr.GETOBJECT(ddllcr);
DBMS_OUTPUT.PUT_LINE('source database: ' ||
ddllcr.GET_SOURCE_DATABASE_NAME);
DBMS_OUTPUT.PUT_LINE('owner: ' || ddllcr.GET_OBJECT_OWNER);
DBMS_OUTPUT.PUT_LINE('object: ' || ddllcr.GET_OBJECT_NAME);
DBMS_OUTPUT.PUT_LINE('is tag null: ' || ddllcr.IS_NULL_TAG);
DBMS_LOB.CREATETEMPORARY(ddl_text, TRUE);
ddllcr.GET_DDL_TEXT(ddl_text);
DBMS_OUTPUT.PUT_LINE('ddl: ' || ddl_text);
DBMS_LOB.FREETEMPORARY(ddl_text);
ELSIF (typenm = 'SYS.LCR$_ROW_RECORD') THEN
res := lcr.GETOBJECT(rowlcr);
DBMS_OUTPUT.PUT_LINE('source database: ' ||
rowlcr.GET_SOURCE_DATABASE_NAME);
DBMS_OUTPUT.PUT_LINE('owner: ' || rowlcr.GET_OBJECT_OWNER);
DBMS_OUTPUT.PUT_LINE('object: ' || rowlcr.GET_OBJECT_NAME);
DBMS_OUTPUT.PUT_LINE('is tag null: ' || rowlcr.IS_NULL_TAG);
DBMS_OUTPUT.PUT_LINE('command_type: ' || rowlcr.GET_COMMAND_TYPE);
oldlist := rowlcr.GET_VALUES('OLD');
FOR i IN 1..oldlist.COUNT LOOP
if oldlist(i) is not null then
DBMS_OUTPUT.PUT_LINE('old(' || i || '): ' || oldlist(i).column_name);
print_any(oldlist(i).data);
END IF;
END LOOP;
newlist := rowlcr.GET_VALUES('NEW');
FOR i in 1..newlist.count LOOP
IF newlist(i) IS NOT NULL THEN
DBMS_OUTPUT.PUT_LINE('new(' || i || '): ' || newlist(i).column_name);
print_any(newlist(i).data);
END IF;
END LOOP;
ELSE
DBMS_OUTPUT.PUT_LINE('Non-LCR Message with type ' || typenm);
END IF;
END print_lcr;
/
CREATE OR REPLACE PROCEDURE print_transaction(ltxnid IN VARCHAR2) IS
i NUMBER;
txnid VARCHAR2(30);
source VARCHAR2(128);
msgcnt NUMBER;
errno NUMBER;
errmsg VARCHAR2(128);
lcr SYS.ANYDATA;
BEGIN
SELECT LOCAL_TRANSACTION_ID,
SOURCE_DATABASE,
MESSAGE_COUNT,
ERROR_NUMBER,
ERROR_MESSAGE
INTO txnid, source, msgcnt, errno, errmsg
FROM DBA_APPLY_ERROR
WHERE LOCAL_TRANSACTION_ID = ltxnid;
DBMS_OUTPUT.PUT_LINE('----- Local Transaction ID: ' || txnid);
DBMS_OUTPUT.PUT_LINE('----- Source Database: ' || source);
DBMS_OUTPUT.PUT_LINE('----Error Number: '||errno);
DBMS_OUTPUT.PUT_LINE('----Message Text: '||errmsg);
FOR i IN 1..msgcnt LOOP
DBMS_OUTPUT.PUT_LINE('--message: ' || i);
lcr := DBMS_APPLY_ADM.GET_ERROR_MESSAGE(i, txnid); -- gets the LCR
print_lcr(lcr);
END LOOP;
END print_transaction;
/
CREATE OR REPLACE PROCEDURE print_errors IS
CURSOR c IS
SELECT LOCAL_TRANSACTION_ID,
SOURCE_DATABASE,
MESSAGE_COUNT,
ERROR_NUMBER,
ERROR_MESSAGE
FROM DBA_APPLY_ERROR
ORDER BY SOURCE_DATABASE, SOURCE_COMMIT_SCN;
i NUMBER;
txnid VARCHAR2(30);
source VARCHAR2(128);
msgcnt NUMBER;
errnum NUMBER := 0;
errno NUMBER;
errmsg VARCHAR2(128);
lcr SYS.AnyData;
r NUMBER;
BEGIN
FOR r IN c LOOP
errnum := errnum + 1;
msgcnt := r.MESSAGE_COUNT;
txnid := r.LOCAL_TRANSACTION_ID;
source := r.SOURCE_DATABASE;
errmsg := r.ERROR_MESSAGE;
errno := r.ERROR_NUMBER;
DBMS_OUTPUT.PUT_LINE('*************************************************');
DBMS_OUTPUT.PUT_LINE('----- ERROR #' || errnum);
DBMS_OUTPUT.PUT_LINE('----- Local Transaction ID: ' || txnid);
DBMS_OUTPUT.PUT_LINE('----- Source Database: ' || source);
DBMS_OUTPUT.PUT_LINE('----Error Number: '||errno);
DBMS_OUTPUT.PUT_LINE('----Message Text: '||errmsg);
FOR i IN 1..msgcnt LOOP
DBMS_OUTPUT.PUT_LINE('--message: ' || i);
lcr := DBMS_APPLY_ADM.GET_ERROR_MESSAGE(i, txnid);
print_lcr(lcr);
END LOOP;
END LOOP;
END print_errors;
/
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
CREATE OR REPLACE PROCEDURE print_any(data IN SYS.AnyData) IS
tn VARCHAR2(61);
str VARCHAR2(4000);
chr CHAR(255);
num NUMBER;
dat DATE;
rw RAW(4000);
res NUMBER;
BEGIN
IF data IS NULL THEN
DBMS_OUTPUT.PUT_LINE('NULL value');
RETURN;
END IF;
tn := data.GETTYPENAME();
IF tn = 'SYS.VARCHAR2' THEN
res := data.GETVARCHAR2(str);
DBMS_OUTPUT.PUT_LINE(str);
ELSIF tn = 'SYS.CHAR' then
res := data.GETCHAR(chr);
DBMS_OUTPUT.PUT_LINE(chr);
ELSIF tn = 'SYS.VARCHAR' THEN
res := data.GETVARCHAR(chr);
DBMS_OUTPUT.PUT_LINE(chr);
ELSIF tn = 'SYS.NUMBER' THEN
res := data.GETNUMBER(num);
DBMS_OUTPUT.PUT_LINE(num);
ELSIF tn = 'SYS.DATE' THEN
res := data.GETDATE(dat);
DBMS_OUTPUT.PUT_LINE(dat);
ELSIF tn = 'SYS.RAW' THEN
res := data.GETRAW(rw);
DBMS_OUTPUT.PUT_LINE(RAWTOHEX(rw));
ELSE
DBMS_OUTPUT.PUT_LINE('typename is ' || tn);
END IF;
END print_any;
/
CREATE OR REPLACE PROCEDURE print_lcr(lcr IN SYS.ANYDATA) IS
typenm VARCHAR2(61);
ddllcr SYS.LCR$_DDL_RECORD;
proclcr SYS.LCR$_PROCEDURE_RECORD;
rowlcr SYS.LCR$_ROW_RECORD;
res NUMBER;
newlist SYS.LCR$_ROW_LIST;
oldlist SYS.LCR$_ROW_LIST;
ddl_text CLOB;
BEGIN
typenm := lcr.GETTYPENAME();
DBMS_OUTPUT.PUT_LINE('type name: ' || typenm);
IF (typenm = 'SYS.LCR$_DDL_RECORD') THEN
res := lcr.GETOBJECT(ddllcr);
DBMS_OUTPUT.PUT_LINE('source database: ' ||
ddllcr.GET_SOURCE_DATABASE_NAME);
DBMS_OUTPUT.PUT_LINE('owner: ' || ddllcr.GET_OBJECT_OWNER);
DBMS_OUTPUT.PUT_LINE('object: ' || ddllcr.GET_OBJECT_NAME);
DBMS_OUTPUT.PUT_LINE('is tag null: ' || ddllcr.IS_NULL_TAG);
DBMS_LOB.CREATETEMPORARY(ddl_text, TRUE);
ddllcr.GET_DDL_TEXT(ddl_text);
DBMS_OUTPUT.PUT_LINE('ddl: ' || ddl_text);
DBMS_LOB.FREETEMPORARY(ddl_text);
ELSIF (typenm = 'SYS.LCR$_ROW_RECORD') THEN
res := lcr.GETOBJECT(rowlcr);
DBMS_OUTPUT.PUT_LINE('source database: ' ||
rowlcr.GET_SOURCE_DATABASE_NAME);
DBMS_OUTPUT.PUT_LINE('owner: ' || rowlcr.GET_OBJECT_OWNER);
DBMS_OUTPUT.PUT_LINE('object: ' || rowlcr.GET_OBJECT_NAME);
DBMS_OUTPUT.PUT_LINE('is tag null: ' || rowlcr.IS_NULL_TAG);
DBMS_OUTPUT.PUT_LINE('command_type: ' || rowlcr.GET_COMMAND_TYPE);
oldlist := rowlcr.GET_VALUES('OLD');
FOR i IN 1..oldlist.COUNT LOOP
if oldlist(i) is not null then
DBMS_OUTPUT.PUT_LINE('old(' || i || '): ' || oldlist(i).column_name);
print_any(oldlist(i).data);
END IF;
END LOOP;
newlist := rowlcr.GET_VALUES('NEW');
FOR i in 1..newlist.count LOOP
IF newlist(i) IS NOT NULL THEN
DBMS_OUTPUT.PUT_LINE('new(' || i || '): ' || newlist(i).column_name);
print_any(newlist(i).data);
END IF;
END LOOP;
ELSE
DBMS_OUTPUT.PUT_LINE('Non-LCR Message with type ' || typenm);
END IF;
END print_lcr;
/
CREATE OR REPLACE PROCEDURE print_transaction(ltxnid IN VARCHAR2) IS
i NUMBER;
txnid VARCHAR2(30);
source VARCHAR2(128);
msgcnt NUMBER;
errno NUMBER;
errmsg VARCHAR2(128);
lcr SYS.ANYDATA;
BEGIN
SELECT LOCAL_TRANSACTION_ID,
SOURCE_DATABASE,
MESSAGE_COUNT,
ERROR_NUMBER,
ERROR_MESSAGE
INTO txnid, source, msgcnt, errno, errmsg
FROM DBA_APPLY_ERROR
WHERE LOCAL_TRANSACTION_ID = ltxnid;
DBMS_OUTPUT.PUT_LINE('----- Local Transaction ID: ' || txnid);
DBMS_OUTPUT.PUT_LINE('----- Source Database: ' || source);
DBMS_OUTPUT.PUT_LINE('----Error Number: '||errno);
DBMS_OUTPUT.PUT_LINE('----Message Text: '||errmsg);
FOR i IN 1..msgcnt LOOP
DBMS_OUTPUT.PUT_LINE('--message: ' || i);
lcr := DBMS_APPLY_ADM.GET_ERROR_MESSAGE(i, txnid); -- gets the LCR
print_lcr(lcr);
END LOOP;
END print_transaction;
/
CREATE OR REPLACE PROCEDURE print_errors IS
CURSOR c IS
SELECT LOCAL_TRANSACTION_ID,
SOURCE_DATABASE,
MESSAGE_COUNT,
ERROR_NUMBER,
ERROR_MESSAGE
FROM DBA_APPLY_ERROR
ORDER BY SOURCE_DATABASE, SOURCE_COMMIT_SCN;
i NUMBER;
txnid VARCHAR2(30);
source VARCHAR2(128);
msgcnt NUMBER;
errnum NUMBER := 0;
errno NUMBER;
errmsg VARCHAR2(128);
lcr SYS.AnyData;
r NUMBER;
BEGIN
FOR r IN c LOOP
errnum := errnum + 1;
msgcnt := r.MESSAGE_COUNT;
txnid := r.LOCAL_TRANSACTION_ID;
source := r.SOURCE_DATABASE;
errmsg := r.ERROR_MESSAGE;
errno := r.ERROR_NUMBER;
DBMS_OUTPUT.PUT_LINE('*************************************************');
DBMS_OUTPUT.PUT_LINE('----- ERROR #' || errnum);
DBMS_OUTPUT.PUT_LINE('----- Local Transaction ID: ' || txnid);
DBMS_OUTPUT.PUT_LINE('----- Source Database: ' || source);
DBMS_OUTPUT.PUT_LINE('----Error Number: '||errno);
DBMS_OUTPUT.PUT_LINE('----Message Text: '||errmsg);
FOR i IN 1..msgcnt LOOP
DBMS_OUTPUT.PUT_LINE('--message: ' || i);
lcr := DBMS_APPLY_ADM.GET_ERROR_MESSAGE(i, txnid);
print_lcr(lcr);
END LOOP;
END LOOP;
END print_errors;
/
Hint and Tricks: Commands that are Used to look into Streams
Capture Side
alter session set nls_date_format='dd-mon-yyyy hh24:mi:ss';
select START_SCN ,FIRST_SCN , CAPTURED_SCN,APPLIED_SCN,status from dba_capture;
--To check is the messages are spilling
select CNUM_MSGS,NUM_MSGS,SPILL_MSGS,CSPILL_MSGS from v$buffered_queues;
--- to check if there are any open transactions which are not commited
select * from v$streams_transaction;
The following query confirm if the required logfile is registered
select a.name, a.status from v$archived_log a, v$streams_capture s
where s.CAPTURE_MESSAGE_NUMBER
between a.FIRST_CHANGE# and a.NEXT_CHANGE#;
col CONSUMER_NAME format a20
col SOURCE_DATABASE format a20
col MODIFIEDS_TIME format a20
SELECT r.CONSUMER_NAME,
r.SOURCE_DATABASE,
r.SEQUENCE#,
r.NAME,
r.first_scn,
r.FIRST_TIME,
r.MODIFIED_TIME,
r.DICTIONARY_BEGIN,
r.DICTIONARY_END
FROM DBA_REGISTERED_ARCHIVED_LOG r, DBA_CAPTURE c
WHERE r.CONSUMER_NAME = c.CAPTURE_NAME
ORDER BY source_database, consumer_name, r.first_scn;
Propagation
select propagation_name,status from dba_propagation;
SELECT p.propagation_name, p.SOURCE_QUEUE_OWNER ||'.'||
p.SOURCE_QUEUE_NAME ||'@'||
g.GLOBAL_NAME "Source Queue",
p.DESTINATION_QUEUE_OWNER ||'.'||
p.DESTINATION_QUEUE_NAME ||'@'||
p.DESTINATION_DBLINK "Destination Queue",
p.queue_to_queue,
p.status, p.error_date, p.error_message
FROM DBA_PROPAGATION p, GLOBAL_NAME g;
Apply Side
alter session set nls_date_format='dd-mon-yyyy hh24:mi:ss';
Exec DBMS_APPLY_ADM.STOP_APPLY('APPLIES_TULSA');
EXEC DBMS_APPLY_ADM.START_APPLY('APPLIES_TULSA');
select apply_name, apply_captured, status from dba_apply;
-- to check if the messages are being dequeued
SELECT APPLY_NAME, STATE, TOTAL_MESSAGES_DEQUEUED, to_char (DEQUEUE_TIME ,'dd-mon-yyyy hh24:mi:ss') FROM V$STREAMS_APPLY_READER;
select CNUM_MSGS,NUM_MSGS,SPILL_MSGS,CSPILL_MSGS from v$buffered_queues;
select * from dba_apply_progress;
COLUMN PROCESS_NAME HEADING "Coaordinator|Process|Name" FORMAT A11
COLUMN SID HEADING 'Session|ID' FORMAT 9999
COLUMN SERIAL# HEADING 'Session|Serial|Number' FORMAT 9999
COLUMN STATE HEADING 'State' FORMAT A21
COLUMN TOTAL_RECEIVED HEADING 'Total|Trans|Received' FORMAT 999999
COLUMN TOTAL_APPLIED HEADING 'Total|Trans|Applied' FORMAT 999999
COLUMN TOTAL_ERRORS HEADING 'Total|Apply|Errors' FORMAT 9999
select SUBSTR(s.PROGRAM,INSTR(S.PROGRAM,'(')+1,4) PROCESS_NAME,
c.SID,
c.SERIAL#,
c.STATE,
c.TOTAL_RECEIVED,
c.TOTAL_APPLIED,
c.TOTAL_ERRORS
FROM V$STREAMS_APPLY_COORDINATOR c, V$SESSION s
WHERE c.APPLY_NAME = 'APPLY_EQARTOU3' AND
c.SID = s.SID AND
c.SERIAL# = s.SERIAL#
/
Is an Apply Server Performing Poorly for Certain Transactions?
If an apply process is not performing well, then the reason might be that one or more apply servers used by the apply process are taking an inordinate amount of time to apply certain transactions. The following query displays information about the transactions being applied by each apply server used by an apply process named "applies_tulsa":
COLUMN SERVER_ID HEADING 'Apply Server ID' FORMAT 99999999
COLUMN STATE HEADING 'Apply Server State' FORMAT A20
COLUMN APPLIED_MESSAGE_NUMBER HEADING 'Applied Message|Number' FORMAT
99999999
COLUMN MESSAGE_SEQUENCE HEADING 'Message Sequence|Number' FORMAT 99999999
SELECT SERVER_ID, STATE, APPLIED_MESSAGE_NUMBER, MESSAGE_SEQUENCE
FROM V$STREAMS_APPLY_SERVER
WHERE APPLY_NAME = 'APPLIES_TULSA'
ORDER BY SERVER_ID;
If you run this query repeatedly, then over time the apply server state, applied message number, and message sequence number should continue to change for each apply server as it applies transactions. If these values do not change for one or more apply servers, then the apply server might not be performing well. In this case, you should make sure that, for each table to which the apply process applies changes, every key column has an index.
If you have many such tables, then you might need to determine the specific table and DML or DDL operation that is causing an apply server to perform poorly. To do so, run the following query when an apply server is taking an inordinately long time to apply a transaction. In this example, assume that the name of the apply process is "applies_tulsa" and that apply server number two is performing poorly:
COLUMN OPERATION HEADING 'Operation' FORMAT A20
COLUMN OPTIONS HEADING 'Options' FORMAT A20
COLUMN OBJECT_OWNER HEADING 'Object|Owner' FORMAT A10
COLUMN OBJECT_NAME HEADING 'Object|Name' FORMAT A10
COLUMN COST HEADING 'Cost' FORMAT 99999999
SELECT p.OPERATION, p.OPTIONS, p.OBJECT_OWNER, p.OBJECT_NAME, p.COST
FROM V$SQL_PLAN p, V$SESSION s, V$STREAMS_APPLY_SERVER a
WHERE a.APPLY_NAME = 'APPLIES_TULSA' AND a.SERVER_ID = 2
AND s.SID = a.SID
AND p.HASH_VALUE = s.SQL_HASH_VALUE;
This query returns the operation being performed currently by the specified apply server. The query also returns the owner and name of the table on which the operation is being performed and the cost of the operation. Make sure each key column in this table has an index. If the
results show FULL for the COST column, then the operation is causing full table scans, and indexing the table's key columns might solve the problem.
In addition, you can run the following query to determine the specific DML or DDL SQL statement that is causing an apply server to perform poorly, assuming that the name of the apply process is "applies_tulsa" and that apply server number two is performing poorly:
SELECT t.SQL_TEXT
FROM V$SESSION s, V$SQLTEXT t, V$STREAMS_APPLY_SERVER a
WHERE a.APPLY_NAME = 'APPLIES_TULSA' AND a.SERVER_ID = 2
AND s.SID = a.SID
AND s.SQL_ADDRESS = t.ADDRESS
AND s.SQL_HASH_VALUE = t.HASH_VALUE
ORDER BY PIECE;
This query returns the SQL statement being run currently by the specified apply server. The statement includes the name of the table to which the transaction is being applied. Make sure each key column in this table has an index.
If the SQL statement returned by the previous query is less than one thousand characters long, then you can run the following simplified query instead:
SELECT t.SQL_TEXT
FROM V$SESSION s, V$SQLAREA t, V$STREAMS_APPLY_SERVER a
WHERE a.APPLY_NAME = 'APPLIES_TULSA' AND a.SERVER_ID = 2
AND s.SID = a.SID
AND s.SQL_ADDRESS = t.ADDRESS
AND s.SQL_HASH_VALUE = t.HASH_VALUE;
To delete all errors and Specific Transaction in dba_apply_error
exec dbms_apply_adm.delete_all_errors('&APPLY_NAME');
For a specific transaction
exec dbms_apply_adm.delete_error('&LOCAL_TRANSACTION_ID');
To check what SQL is in long running transaction, start log mining
alter session set nls_date_format='dd-mon-yyyy hh24:mi:ss';
select name,sequence# from v$archived_log where &commitscn (obtained from health check) between first_change# and next_change#;
From the output of name column from above command give the logfile to the below command:
exec dbms_logmnr.add_logfile('&1');
dbms_logmnr.start_logmnr(options=>dbms_logmnr.DICT_FROM_ONLINE_CATALOG);
select XIDUSN || '.' || XIDSLT || '.' || XIDSQN from v$streams_transaction ;
select timestamp,commit_timestamp,SQL_REDO from v$logmnr_contents where XIDUSN || '.' || XIDSLT || '.' || XIDSQN = '4.1.93557';
exec dbms_logmnr.end_logmnr();
To printout the transactions that has been errored out in dba_apply_error
select LOCAL_TRANSACTION_ID from dba_apply_error;
SET SERVEROUTPUT ON SIZE 1000000
EXEC print_transaction('&LOCAL_TRANSACTION_ID');
To Skip the current Transaction
On the Capture side
exec dbms_capture_am.set_parameter('&capture_name', '_ignore_transaction', '&txn_id');
Txn_id obtained from below command
select XIDUSN || '.' || XIDSLT || '.' || XIDSQN from v$streams_transaction ;
exec dbms_capture.stop_capture ('&capture_name');
exec dbms_capture.start_capture ('&capture_name');
On the Apply side
exec dbms_apply_am.set_parameter('&apply_name', '_ignore_transaction', '&txn_id' );
exec dbms_apply.stop_apply ('&apply_name');
exec dbms_apply.start_ apply ('&apply_name');
Periodic Maintenance
Checkpoint periodically
Periodically force capture to checkpoint. This checkpoint is not the same as a database checkpoint. To force capture to checkpoint, use the capture parameter _CHECKPOINT_FORCE and set the value to YES.
Forcing a checkpoint ensure that the DBA_CAPTURE view columns CAPTURED_SCN and APPLIED_SCN are maintained.
Perform periodic maintenance
1. Move FIRST_SCN. Alter the capture FIRST_SCN, the lowest possible SCN available for capturing changes, so that Streams metadata tables can be purged and space in SYSAUX tablespace reclaimed.
2. Dump fresh copy of Dictionary to redo. Issue a DBMS_CAPTURE_ADM.BUILD command to dump a current copy of the data dictionary to the redo logs. Doing this will reduce the amount of logs to be processed in case of additional capture process creation or process rebuild.
3. Prepare database objects for instantiation Issue a DBMS_CAPTURE_ADM.PREPARE_GLOBAL_INSTANTIATION. This is used in conjunction with the BUILD in B above for new capture creation or rebuild purposes.
Additional Recommendations
1. Please make sure there are no network issues. If so install OSWATCHER from Metalink Note.301137.1 and upload it to Oracle Support for further analysis.
2. Check the batch jobs that have commit point greater than 1000 .If so please reduce the commit point less than 1000.
3. Do more testing with the environment.
This section recommends best practice for configuring Streams for Oracle Database 10g Release 2. The discussions are divided into the following sections:
• Guidelines for Preparing All Streams Configurations
• Recommendations for Downstream Capture Configurations
• Recommendations for Upstream (Local) Capture Configurations
All users should perform the instructions in the Guidelines for Preparing All Streams Configurations section of this white paper before branching off to either the downstream or the upstream configuration recommendations.
The examples in this article are based on a configuration that uses:
• Two databases
• Stream database administrator privileges
• Automatic Storage Management (ASM).
Guidelines for Preparing All Streams Configurations
The following list summarizes the tasks you must perform to properly prepare each Streams database:
Use Oracle Database 10g Release 2 (10.2.0.4)
Ensure that all Streams databases are running Oracle Database 10g Release 2 (release 10.2.0.4 is recommended) and apply any critical patch sets. Although the source database must be running Oracle Database 10g Release 2, the downstream capture database can run Release 10.2.0.4 or a later release.
See the Oracle Database Upgrade Guide [7] for database upgrade information and the "Patches and Updates" tab on Oracle Metalink for information about critical Streams patches for release 10.2 and higher releases.
Verify platform requirements for Downstream Capture
Downstream capture requires that the source and Destination database are running on the same platform.
Prepare the Source and Target databases Redo logs for Streams
1. Configure the source and target databases in ARCHIVELOG mode. For one-way Streams local capture, you must configure the source database to run in ARCHIVELOG mode, but the best practice is for both the source and target databases to be in ARCHIVELOG mode in case media recovery is required for either of the databases. If replication is bi-directional, then you must configure both source and target databases in ARCHIVELOG mode.
2. Configure the local archive Destination, LOG_ARCHIVE_TARGET_1, parameter and do not use a flash recovery area. Streams local capture and downstream capture require online redo logs and archived redo logs. Do not place archived redo log files in the Flash Recovery area, because this is a fixed-size storage area and the files may be deleted if the amount of space becomes low.
Create a Tablespace Dedicated to Streams
Create a dedicated Streams tablespace to contain the Streams AQ tables and other objects related to Streams, and to allow for better space management for the Streams objects:
• For downstream capture, create the Streams tablespace on the downstream capture database only.
• For upstream capture(and bidirectional), create the Streams tablespace on both the source and target databases.
For example, to create a Streams tablespace of 50 MB, which is the minimum recommended size, issue the following statements:
CREATE TABLESPACE streams_data
LOGGING
EXTENT MANAGEMENT LOCAL AUTOALLOCATE
SEGMENT SPACE MANAGEMENT AUTO
DATAFILE '+DATA' SIZE 50M
AUTOEXTEND ON NEXT 50M MAXSIZE UNLIMITED ;
Create the Streams Administrator database user
Create the Streams administrator account that you will use to create and modify all Streams configurations and perform administrator functions with the DBMS_STREAMS_ADM PL/SQL package. Do this both on the Source and Destination databases.
For example, to create a new user named streamsadmin with the default streams_data tablespace, enter the following SQL*Plus statement as SYSDBA:
drop user STRMADMIN cascade;
commit;
CREATE USER STRMADMIN IDENTIFIED BY STRMADMIN
DEFAULT TABLESPACE STREAMS_DATA
TEMPORARY TABLESPACE TEMP
QUOTA UNLIMITED ON STREAMS_DATA
QUOTA UNLIMITED ON STREAMS_INDEX;
Grant Streams authorization and DBA privileges
Use the DBMS_STREAMS_AUTH PL/SQL procedure to grant all of the privileges necessary for the Streams package procedures to function properly. Do this both on the Source and Destination databases:
GRANT DBA TO STRMADMIN;
BEGIN
DBMS_STREAMS_AUTH.GRANT_ADMIN_PRIVILEGE(
grantee => 'STRMADMIN',
grant_privileges => true);
END;
/
Also, set Logminer to use the Streams tablespace. Do this both on the Source and Destination databases.
exec DBMS_LOGMNR_D.SET_TABLESPACE ('STREAMS_DATA');
Check that the streams administartor account is created:
SELECT * FROM dba_streams_administrator;
Set key initialization parameters
On each Streams database in the configuration, specify the initialization parameters shown below:
Parameter Name Description
AQ_TM_PROCESSES alter system set aq_tm_processes=1 scope=spfile sid='*';
DB_NAME Select name from v$database. Each database should be the same. Ex. PRODDB
DB_UNIQUE_NAME Each database name will be different. In this example, on servers ldbd01/02 the database’s name is PRODDB_TULSA. On servers ldbd10/11 the databases’s name is PRODDB_PLANO.
DB_DOMAIN On each Streams database, specify the network domain where the database resides. In this example, the db_domain is DOMAIN.COM. Therefore the source database is named:
PRODDB_TULSA.DOMAIN.COM
GLOBAL_NAME=TRUE On each Streams database specify GLOBAL_NAME=TRUE to ensure the database links work correctly. For example, the source database is named PRODDB_TULSA.DOMAIN.COM when you select * from global_name;
JOB_QUEUE_PROCESSES=4 Specify the JOB_QUEUE_PROCESSES has at least 4 job queues for Streams. If the parameter is already set, increment the parameter by 4.
COMPATIBLE=10.2 Make certain the COMPATIBLE parameter is at least 10.2.
_JOB_QUEUE_INTERVAL=1 On each Streams database, specify how many seconds the job queue is scanned for work. It is recommended that you set this parameter to 1 (seconds) to improve the propagation job performance and minimize the delay for how often propagation jobs will execute. Since this is a hidden parameter, here is an example of how to set it:
alter system set "_job_queue_interval"=1 scope=spfile sid='*';
TIMED_STATISTICS=TRUE On each Streams database, set the TIMED_STATISTICS=TRUE parameter to allow performance statistics to be gathered for analysis of potential performance bottlenecks.
STATISTICS_LEVEL=TYPICAL
or ALL On each Streams database, set this parameter to TYPICAL to collect the necessary statistics. Set to ALL to gather more than the bare minimum.
SHARED_POOL_SIZE=256M On each Streams database, set this parameter to a minimum value of 256 MB because Streams uses a significant amount of PL/SQL and library cache components. Future performance analysis may providerecommendations for how much to increase the size ofSHARED_POOL_SIZE, especially for the database running Streamapply processes.
STREAMS_POOL_SIZE=256M
On each database where there is either a capture or apply process running, set the STREAMS_POOL_SIZE parameter to a minimum of 256M. Memory from the Streams pool is used by both the Streamsprocesses and the buffered queue, with the LCRs in the buffered queuethe largest component. Use the V$STREAMS_POOL_ADVICE view to help you size the buffered queue.
Create Database links between the Source and Target databases
Before creating database links, ensure that all Streams databases are reachable using Oracle Net aliases. Perform the following steps to create database links:
1. Log into the target database as the Streams administrator user and determine the global database name by issuing the following query:
select * from global_name;
You will use the name returned by this query to create the database link in the next step.
2. Create a TNSNAMES.ORA entry for each database using Streams. Below is an example of the TNSNAMES.ORA file entry:
PRODDB_PLANO.DOMAIN.COM =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = hostname0-vip)(PORT = 1521))
(ADDRESS = (PROTOCOL = TCP)(HOST = hostname1-vip)(PORT = 1521))
(LOAD_BALANCE = yes)
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = PRODDB_PLANO.DOMAIN.COM)
(FAILOVER_MODE =
(TYPE = SELECT)
(METHOD = BASIC)
(RETRIES = 180)
(DELAY = 5)
)
)
)
3. Log into the source database as the Streams administrator.
conn strmadmin/strmadmin
4. Create a database link first from the Source database to the Target database. For example, on the source database, enter the following statement to create a database link from the source database to a target database named PRODDB_TULSA.DOMAIN.COM using the TNSNAMES.ORA alias entry PRODDB_TULSA.DOMAIN.COM.
CREATE DATABASE LINK PRODDB_TULSA.DOMAIN.COM CONNECT TO
STRMADMIN IDENTIFIED BY STRMADMIN USING 'PRODDB_TULSA.DOMAIN.COM';
5. Validate the database link is working by issuing the following select * from dual@ query on the source database:
select * from dual@PRODDB_TULSA.DOMAIN.COM;
DUM
---
X
If an error is returned, it is probably due to an incorrect TNSNAMES.ORA descriptor or to an incorrect database name. Do not proceed to the next step until the database link is working properly.
6. Log into the Streams Administrator account on the Destination database.
conn strmadmin/strmadmin@PRODDB_TULSA.DOMAIN.COM
7. Create a database link from the Destination database to the source database.
CREATE DATABASE LINK PRODDB_PLANO.DOMAIN.COM CONNECT TO
STRMADMIN IDENTIFIED BY STRMADMIN USING 'PRODDB_PLANO.DOMAIN.COM';
8. Validate the database link is working by issuing the following select * from dual@ query on the target database:
select * from dual@PRODDB_PLANO.DOMAIN.COM;
9. If an error occurs, it is probably because of an incorrect TNSNAMES.ORA descriptor or an incorrect database name. Do not proceed to the next section until the database link is working properly.
Set up Directory Objects
In many cases, Oracle Streams instantiates the objects for which changes will be captured and uses Data Pump to initially populate the replica tables at the targetdatabase. As a part of this instantiation process, Data Pump needs a file system directory location on both the source and target database server for staging and moving all Streams objects. Streams uses Oracle Directory objects that point to these file system directory locations.
Directory objects are needed by Streams for instantiation purposes. Ensure thatthe locations to which the directory objects point have sufficient storage space tosupport all objects that Streams instantiates. On both the source and downstream databases servers, determine an appropriate location for the file system directory and create Oracle directory objects on each of the databases. For example, as the Streams administrator user, create the Oracle Directory objects as follows:
create directory streams_dir as '/SSM/oracle/admin/{$ORACLE_SID}/dpdump';
Account for object or tablespace name differences when replicating DDLs
If you choose to replicate DDLs, then you need to account for object or tablespace name differences between the source and target databases. Here are some best practices:
• Avoid system-generated naming for constraints or indexes
• Keep the same tablespace names between databases or use a DDL handler to explicitly address the naming differences
Recommendations for Bidirectional Configurations
Downstream capture is typically used to replicate a full database, to offload processing from the source database to the target database, or to reduce data loss with ASYNC or SYNC redo transport.
Specify initialization parameters on the Source and Target databases.
Note the example below. Be sure that the parameters you specify match the locations and parameter values in your Streams configuration.
Source Database Target Database
LOG_ARCHIVE_TARGET_1=
'LOCATION=+DATA/PRODDB_TULSA/
VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) MANDATORY'
LOG_ARCHIVE_TARGET_2=
'SERVICE=PRODDB_PLANO.DOMAIN.COM
LGWR ASYNC NOREGISTER
VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE)
DB_UNIQUE_NAME=PRODDB_PLANO'
LOG_ARCHIVE_TARGET_STATE_1=ENABLE
LOG_ARCHIVE_TARGET_STATE_2=DEFER
LOG_ARCHIVE_CONFIG='SEND, DG_CONFIG=(PRODDB_TULSA,PRODDB_PLANO)'
LOG_ARCHIVE_MAX_PROCESSES=4
LOG_ARCHIVE_TARGET_1=
'LOCATION=use_db_recovery_file_dest
VALID_FOR=(ONLINE_LOGFILE,PRIMARY_ROLE)'
LOG_ARCHIVE_TARGET_2=
'LOCATION=+DATA/PRODDB_PLANO/ARCH_TULSA_STREAMS
VALID_FOR=(STANDBY_LOGFILE,PRIMARY_ROLE)'
LOG_ARCHIVE_TARGET_STATE_1=ENABLE
LOG_ARCHIVE_TARGET_STATE_2=ENABLE
LOG_ARCHIVE_CONFIG='RECEIVE, DG_CONFIG=(PRODDB_TULSA,PRODDB_PLANO)'
LOG_ARCHIVE_MAX_PROCESSES=4
Configure Standby Redo logs.
To enable real-time mining for capture at the downstream database, you must configure standby redo logs (SRLs) on the downstream database to accept redo from the source database. The Streams capture process mines the standby redo logs and any archived redo logs created from the standby redo logs.
We should have minimum of (No. Of threads)*(groups per Threads + 1). We have total 2 Threads and each has 4 groups. So in this example, we need 5 standby Redo Logs per Thread at Minimum. Get the Max Group# from v$log.
SQL> select thread#, group# from v$log order by 1,2;
THREAD# GROUP#
---------- ----------
1 1
1 2
1 3
1 4
2 5
2 6
2 7
2 8
8 rows selected.
I will start with the Group No. of 9 to add the Standby Redo Logs because the Last Online Redo Group# is 18.
alter system set standby_file_management=manual sid='*';
ALTER DATABASE ADD STANDBY LOGFILE THREAD 1
GROUP 9 SIZE 50M,
GROUP 10 SIZE 50M,
GROUP 11 SIZE 50M,
GROUP 12 SIZE 50M,
GROUP 13 SIZE 50M;
ALTER DATABASE ADD STANDBY LOGFILE THREAD 2
GROUP 14 SIZE 50M,
GROUP 15 SIZE 50M,
GROUP 16 SIZE 50M,
GROUP 17 SIZE 50M,
GROUP 18 SIZE 50M;
SQL> select thread#, group# from v$standby_log order by 1,2;
THREAD# GROUP#
---------- ----------
1 9
1 10
1 11
1 12
1 13
2 14
2 15
2 16
2 17
2 18
10 rows selected.
alter system set standby_file_management=auto sid='*';
Bidirectional Streams Setup
You must have at least one schema (Ex. "SSM") in two different databases Source and Target.
Set up two queues for Capture and Apply in SRC Database as shown below :
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
begin
dbms_streams_adm.set_up_queue(
queue_table => 'APPLY_PLANOTAB',
queue_name => 'APPLY_PLANO',
queue_user => 'STRMADMIN');
end;
/
begin
dbms_streams_adm.set_up_queue(
queue_table => 'CAPTURE_PLANOTAB',
queue_name => 'CAPTURE_PLANO',
queue_user => 'STRMADMIN');
end;
/
Set up two queues for Capture and Apply in DEST Database as shown below :
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
begin
dbms_streams_adm.set_up_queue(
queue_table => 'APPLY_TULSATAB',
queue_name => 'APPLY_TULSA',
queue_user => 'STRMADMIN');
end;
/
begin
dbms_streams_adm.set_up_queue(
queue_table => 'CAPTURE_TULSATAB',
queue_name => '&DEST_CAP_QUEUE_NAME',
queue_user => 'STRMADMIN');
end;
/
Configure capture process on SRC database
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
begin
dbms_streams_adm.add_schema_rules (
schema_name => 'SSM',
streams_type => 'CAPTURE',
streams_name => 'CAPTURES_PLANO',
queue_name => 'CAPTURE_PLANO',
include_dml => TRUE,
include_ddl => TRUE,
inclusion_rule => TRUE);
end;
/
Configure apply process on SRC database
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
begin
dbms_streams_adm.add_schema_rules (
schema_name => 'SSM',
streams_type => 'APPLY',
streams_name => 'APPLIES_PLANO',
queue_name => 'APPLY_PLANO',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'PRODDB_TULSA.DOMAIN.COM');
end;
/
Configure propagation process on SRC Database:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
begin
dbms_streams_adm.add_schema_propagation_rules (
schema_name => 'SSM',
streams_name => 'PROP_PLANO_TO_TULSA',
source_queue_name => 'CAPTURE_PLANO',
destination_queue_name => 'APPLY_TULSA@PRODDB_TULSA.DOMAIN.COM',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'PRODDB_PLANO.DOMAIN.COM',
queue_to_queue => TRUE);
end;
/
Configure Schema Supplemental logging on SRC Database:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
EXEC DBMS_CAPTURE_ADM.ABORT_SCHEMA_INSTANTIATION('SSM');
EXEC DBMS_CAPTURE_ADM.PREPARE_SCHEMA_INSTANTIATION('SSM','ALL');
Configure capture process on DEST Database:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
begin
dbms_streams_adm.add_schema_rules (
schema_name => 'SSM',
streams_type => 'CAPTURE',
streams_name => 'CAPTURES_TULSA',
queue_name => 'CAPTURE_TULSA',
include_dml => TRUE,
include_ddl => TRUE);
end;
/
Set the schema instantiation SCN on SRC using the SCN of Dest database:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
SET SERVEROUTPUT ON SIZE 1000000
declare
v_scn number;
begin
v_scn := dbms_flashback.get_system_change_number();
dbms_output.put_line('Instantiation SCN is: ' || v_scn);
dbms_apply_adm.set_schema_instantiation_scn@PRODDB_PLANO.DOMAIN.COM (
source_schema_name => 'SSM',
source_database_name => 'PRODDB_TULSA.DOMAIN.COM ',
instantiation_scn => v_scn,
recursive => TRUE);
end;
/
If you experiences any TNS issues, reload the listener on the Source and retry. If you still are receiving errors, check to see what the SCN number is on each database. You will need to export the schema from PLANO and import it into TULSA and then retry.
Configure apply process on DEST:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
begin
dbms_streams_adm.add_schema_rules (
schema_name => 'SSM',
streams_type => 'APPLY',
streams_name => 'APPLIES_TULSA',
queue_name => 'APPLY_TULSA',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'PRODDB_PLANO.DOMAIN.COM');
end;
/
Configure propagation process on DEST:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
begin
dbms_streams_adm.add_schema_propagation_rules (
schema_name => 'SSM',
streams_name => 'PROP_TULSA_TO_PLANO',
source_queue_name => 'CAPTURE_TULSA',
destination_queue_name => 'APPLY_PLANO@PRODDB_PLANO.DOMAIN.COM',
include_dml => TRUE,
include_ddl => TRUE,
source_database => 'PRODDB_TULSA.DOMAIN.COM',
queue_to_queue => TRUE);
end;
/
Configure Schema Supplemental logging on DEST Database:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
EXEC DBMS_CAPTURE_ADM.ABORT_SCHEMA_INSTANTIATION('SSM');
EXEC DBMS_CAPTURE_ADM.PREPARE_SCHEMA_INSTANTIATION('SSM','ALL');
At this point, the databases should now be replicating to one another. The next step is in case you need to restart the replication process.
You can confirm the environments are in sync by executing the following SQL on each:
select current_scn from v$database;
If the SCN numbers are identical, the Streams configuration is ready to start the replication.
Set Schema Instantiation on Target Database :
Set schema instantiation on DEST Database using the SCN of SRC database:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
SET SERVEROUTPUT ON SIZE 1000000
declare
v_scn number;
begin
v_scn := dbms_flashback.get_system_change_number();
dbms_output.put_line('Instantiation SCN is: ' || v_scn);
dbms_apply_adm.set_schema_instantiation_scn@PRODDB_TULSA.DOMAIN.COM(
source_schema_name => 'SSM',
source_database_name => 'PRODDB_PLANO.DOMAIN.COM',
instantiation_scn => v_scn,
recursive => TRUE);
end;
/
Add additional setting parameters to ensure the best performance. Also, confirm, the Streams environment is correct.
On the Source:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
exec dbms_aqadm.ALTER_QUEUE_TABLE ('CAPTURE_PLANOTAB',PRIMARY_INSTANCE =>1,SECONDARY_INSTANCE =>2);
exec dbms_aqadm.ALTER_QUEUE_TABLE ('APPLY_PLANOTAB',PRIMARY_INSTANCE =>1,SECONDARY_INSTANCE =>2);
exec dbms_apply_adm.set_parameter('APPLIES_PLANO','disable_on_error','n');
exec dbms_apply_adm.set_parameter('APPLIES_PLANO', 'PARALLELISM', '6');
exec dbms_apply_adm.set_parameter('APPLIES_PLANO', '_TXN_BUFFER_SIZE', '20');
exec dbms_apply_adm.set_parameter('APPLIES_PLANO', '_HASH_TABLE_SIZE', '10000000');
exec dbms_capture_adm.set_parameter('CAPTURES_PLANO','_CHECKPOINT_FREQUENCY','500');
exec dbms_capture_adm.set_parameter('CAPTURES_PLANO','_SGA_SIZE','100');
exec dbms_capture_adm.set_parameter('CAPTURES_PLANO','_CHECKPOINT_FORCE','Y');
exec dbms_capture_adm.alter_capture(capture_name=>'CAPTURES_PLANO ',CHECKPOINT_RETENTION_TIME=>3);
exec dbms_capture_adm.set_parameter ('CAPTURES_PLANO' ,'WRITE_ALERT_LOG','Y');
exec dbms_apply_adm.set_parameter ('APPLIES_PLANO','WRITE_ALERT_LOG','Y');
set pages 0
select * from dba_capture;
select * from dba_propagation;
select * from dba_apply;
select * from dba_apply_error;
Do the same on the DEST:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
exec dbms_aqadm.ALTER_QUEUE_TABLE ('CAPTURE_TULSATAB',PRIMARY_INSTANCE =>1,SECONDARY_INSTANCE =>2);
exec dbms_aqadm.ALTER_QUEUE_TABLE ('APPLY_TULSATAB',PRIMARY_INSTANCE =>1,SECONDARY_INSTANCE =>2);
exec dbms_apply_adm.set_parameter('APPLIES_TULSA','disable_on_error','n');
exec dbms_apply_adm.set_parameter('APPLIES_TULSA', 'PARALLELISM', '6');
exec dbms_apply_adm.set_parameter('APPLIES_TULSA', '_TXN_BUFFER_SIZE', '20');
exec dbms_apply_adm.set_parameter('APPLIES_TULSA', '_HASH_TABLE_SIZE', '10000000');
exec dbms_capture_adm.set_parameter('CAPTURES_TULSA','_CHECKPOINT_FREQUENCY','500');
exec dbms_capture_adm.set_parameter('CAPTURES_TULSA','_SGA_SIZE','100');
exec dbms_capture_adm.set_parameter('CAPTURES_TULSA','_CHECKPOINT_FORCE','Y');
exec dbms_capture_adm.alter_capture(capture_name=>'CAPTURES_TULSA ',CHECKPOINT_RETENTION_TIME=>3);
exec dbms_capture_adm.set_parameter ('CAPTURES_TULSA' ,'WRITE_ALERT_LOG','Y');
exec dbms_apply_adm.set_parameter ('APPLIES_TULSA','WRITE_ALERT_LOG','Y');
set pages 0
select * from dba_capture;
select * from dba_propagation;
select * from dba_apply;
select * from dba_apply_error;
Start the Bidirectional Streams
Start Capture and Apply processes on DEST:
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
exec dbms_apply_adm.start_apply(apply_name=> 'APPLIES_TULSA');
exec dbms_capture_adm.start_capture(capture_name=>'CAPTURES_TULSA');
Start Capture and Apply processes on SRC:
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
exec dbms_apply_adm.start_apply(apply_name=> 'APPLIES_PLANO');
exec dbms_capture_adm.start_capture(capture_name=>'CAPTURES_PLANO');
Create the Heart Beat table on Source
connect SSM/SSM@PRODDB_PLANO.DOMAIN.COM
drop table SSM.SRC_DEST_HEART_BEAT;
create table SSM.SRC_DEST_HEART_BEAT(SRC_DATE timestamp);
insert into SSM.SRC_DEST_HEART_BEAT(select sysdate from dual);
grant insert,update,delete,select on SRC_DEST_HEART_BEAT to STRMADMIN;
conn STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
variable jobno number
begin
dbms_job.submit(:jobno,'update ssm.src_dest_heart_beat set src_date =(select sysdate from dual);',sysdate,'sysdate+300/(60*60*24)');
commit;
end;
/
create or replace procedure strm_build_proc
is
scn NUMBER;
BEGIN
DBMS_CAPTURE_ADM.BUILD(
first_scn => scn);
DBMS_OUTPUT.PUT_LINE('First SCN Value = ' || scn);
END;
/
variable jobno number
begin
dbms_job.submit(:jobno,'strm_build_proc;',sysdate,'sysdate+ 2/24');
commit;
end;
/
Repeat Job Creation for Heart Beat on the Target
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
variable jobno number
begin
dbms_job.submit(:jobno,'update ssm.dest_src_heart_beat set dest_date =(select sysdate from dual);',sysdate,'sysdate+300/(60*60*24)');
commit;
end;
/
create or replace procedure strm_build_proc
is
scn NUMBER;
BEGIN
DBMS_CAPTURE_ADM.BUILD(
first_scn => scn);
DBMS_OUTPUT.PUT_LINE('First SCN Value = ' || scn);
END;
/
variable jobno number
begin
dbms_job.submit(:jobno,'strm_build_proc;',sysdate,'sysdate+ 2/24');
commit;
end;
/
Create Conflict Procedure on Source Developed for Kiosk_Profile (overwrite)
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
-------CONFLICT RESOLUTION SCRIPT----------
DECLARE
kp_fields DBMS_UTILITY.NAME_ARRAY;
BEGIN
kp_fields(1) := 'profile_id';
kp_fields(2) := 'device_id';
kp_fields(3) := 'device_type';
kp_fields(4) := 'serial_number';
kp_fields(5) := 'airport';
kp_fields(6) := 'area';
kp_fields(7) := 'facility';
kp_fields(8) := 'location';
kp_fields(9) := 'use_actual_departure_time';
kp_fields(10) := 'late_checkin_time';
kp_fields(11) := 'early_checkin_time';
kp_fields(12) := 'baggage_late_checkin_time';
kp_fields(13) := 'nrps_early_checkin_time';
kp_fields(14) := 'nrsa_early_checkin_time';
kp_fields(15) := 'asset_id';
kp_fields(16) := 'operation_hour_start';
kp_fields(17) := 'operation_hour_end';
kp_fields(18) := 'sofware_version';
kp_fields(19) := 'bag_tag_printer_capable';
kp_fields(20) := 'after_midnight_cutoff';
kp_fields(21) := 'primary_km';
kp_fields(22) := 'backup_km';
kp_fields(23) := 'ticket_number';
kp_fields(24) := 'ticket_date';
kp_fields(25) := 'domestic_boarding_time';
kp_fields(26) := 'default_lang';
kp_fields(27) := 'environment';
kp_fields(28) := 'check_bag_only_capable';
kp_fields(29) := 'ticket_stock';
kp_fields(30) := 'ticket_stock_id';
kp_fields(31) := 'baggage_early_checkin_time';
kp_fields(32) := 'baggage_after_midnight_cutoff';
kp_fields(33) := 'international_flag';
kp_fields(34) := 'intl_late_checkin_time';
kp_fields(35) := 'virtual_bag_check';
kp_fields(36) := 'current_tkt_printer';
kp_fields(37) := 'previous_tkt_printer';
kp_fields(38) := 'barcode_2d_capable';
kp_fields(39) := 'intl_baggage_late_checkin_time';
kp_fields(40) := 'dmst_late_checkin_time';
kp_fields(41) := 'dmst_baggage_late_checkin_time';
kp_fields(42) := 'agent_takeover_allowed';
kp_fields(43) := 'client_restart_freq';
kp_fields(44) := 'use_oper_hour_end_for_restart';
kp_fields(45) := 'client_restart_time';
DBMS_APPLY_ADM.SET_UPDATE_CONFLICT_HANDLER(
object_name => 'ssm.kiosk_profile',
method_name => 'overwrite',
resolution_column => 'profile_id',
column_list => kp_fields);
END;
/
Create Conflict Procedure on Destination for Kiosk_Profile (discard)
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
------CONFLICT RESOLUTION SCRIPT--------
DECLARE
kp_fields DBMS_UTILITY.NAME_ARRAY;
BEGIN
kp_fields(1) := 'profile_id';
kp_fields(2) := 'device_id';
kp_fields(3) := 'device_type';
kp_fields(4) := 'serial_number';
kp_fields(5) := 'airport';
kp_fields(6) := 'area';
kp_fields(7) := 'facility';
kp_fields(8) := 'location';
kp_fields(9) := 'use_actual_departure_time';
kp_fields(10) := 'late_checkin_time';
kp_fields(11) := 'early_checkin_time';
kp_fields(12) := 'baggage_late_checkin_time';
kp_fields(13) := 'nrps_early_checkin_time';
kp_fields(14) := 'nrsa_early_checkin_time';
kp_fields(15) := 'asset_id';
kp_fields(16) := 'operation_hour_start';
kp_fields(17) := 'operation_hour_end';
kp_fields(18) := 'sofware_version';
kp_fields(19) := 'bag_tag_printer_capable';
kp_fields(20) := 'after_midnight_cutoff';
kp_fields(21) := 'primary_km';
kp_fields(22) := 'backup_km';
kp_fields(23) := 'ticket_number';
kp_fields(24) := 'ticket_date';
kp_fields(25) := 'domestic_boarding_time';
kp_fields(26) := 'default_lang';
kp_fields(27) := 'environment';
kp_fields(28) := 'check_bag_only_capable';
kp_fields(29) := 'ticket_stock';
kp_fields(30) := 'ticket_stock_id';
kp_fields(31) := 'baggage_early_checkin_time';
kp_fields(32) := 'baggage_after_midnight_cutoff';
kp_fields(33) := 'international_flag';
kp_fields(34) := 'intl_late_checkin_time';
kp_fields(35) := 'virtual_bag_check';
kp_fields(36) := 'current_tkt_printer';
kp_fields(37) := 'previous_tkt_printer';
kp_fields(38) := 'barcode_2d_capable';
kp_fields(39) := 'intl_baggage_late_checkin_time';
kp_fields(40) := 'dmst_late_checkin_time';
kp_fields(41) := 'dmst_baggage_late_checkin_time';
kp_fields(42) := 'agent_takeover_allowed';
kp_fields(43) := 'client_restart_freq';
kp_fields(44) := 'use_oper_hour_end_for_restart';
kp_fields(45) := 'client_restart_time';
DBMS_APPLY_ADM.SET_UPDATE_CONFLICT_HANDLER(
object_name => 'ssm.kiosk_profile',
method_name => 'discard',
resolution_column => 'profile_id',
column_list => kp_fields);
END;
/
Create Procedure to print out transactions that show up in dba_apply_errors
connect STRMADMIN/STRMADMIN@PRODDB_PLANO.DOMAIN.COM
CREATE OR REPLACE PROCEDURE print_any(data IN SYS.AnyData) IS
tn VARCHAR2(61);
str VARCHAR2(4000);
chr CHAR(255);
num NUMBER;
dat DATE;
rw RAW(4000);
res NUMBER;
BEGIN
IF data IS NULL THEN
DBMS_OUTPUT.PUT_LINE('NULL value');
RETURN;
END IF;
tn := data.GETTYPENAME();
IF tn = 'SYS.VARCHAR2' THEN
res := data.GETVARCHAR2(str);
DBMS_OUTPUT.PUT_LINE(str);
ELSIF tn = 'SYS.CHAR' then
res := data.GETCHAR(chr);
DBMS_OUTPUT.PUT_LINE(chr);
ELSIF tn = 'SYS.VARCHAR' THEN
res := data.GETVARCHAR(chr);
DBMS_OUTPUT.PUT_LINE(chr);
ELSIF tn = 'SYS.NUMBER' THEN
res := data.GETNUMBER(num);
DBMS_OUTPUT.PUT_LINE(num);
ELSIF tn = 'SYS.DATE' THEN
res := data.GETDATE(dat);
DBMS_OUTPUT.PUT_LINE(dat);
ELSIF tn = 'SYS.RAW' THEN
res := data.GETRAW(rw);
DBMS_OUTPUT.PUT_LINE(RAWTOHEX(rw));
ELSE
DBMS_OUTPUT.PUT_LINE('typename is ' || tn);
END IF;
END print_any;
/
CREATE OR REPLACE PROCEDURE print_lcr(lcr IN SYS.ANYDATA) IS
typenm VARCHAR2(61);
ddllcr SYS.LCR$_DDL_RECORD;
proclcr SYS.LCR$_PROCEDURE_RECORD;
rowlcr SYS.LCR$_ROW_RECORD;
res NUMBER;
newlist SYS.LCR$_ROW_LIST;
oldlist SYS.LCR$_ROW_LIST;
ddl_text CLOB;
BEGIN
typenm := lcr.GETTYPENAME();
DBMS_OUTPUT.PUT_LINE('type name: ' || typenm);
IF (typenm = 'SYS.LCR$_DDL_RECORD') THEN
res := lcr.GETOBJECT(ddllcr);
DBMS_OUTPUT.PUT_LINE('source database: ' ||
ddllcr.GET_SOURCE_DATABASE_NAME);
DBMS_OUTPUT.PUT_LINE('owner: ' || ddllcr.GET_OBJECT_OWNER);
DBMS_OUTPUT.PUT_LINE('object: ' || ddllcr.GET_OBJECT_NAME);
DBMS_OUTPUT.PUT_LINE('is tag null: ' || ddllcr.IS_NULL_TAG);
DBMS_LOB.CREATETEMPORARY(ddl_text, TRUE);
ddllcr.GET_DDL_TEXT(ddl_text);
DBMS_OUTPUT.PUT_LINE('ddl: ' || ddl_text);
DBMS_LOB.FREETEMPORARY(ddl_text);
ELSIF (typenm = 'SYS.LCR$_ROW_RECORD') THEN
res := lcr.GETOBJECT(rowlcr);
DBMS_OUTPUT.PUT_LINE('source database: ' ||
rowlcr.GET_SOURCE_DATABASE_NAME);
DBMS_OUTPUT.PUT_LINE('owner: ' || rowlcr.GET_OBJECT_OWNER);
DBMS_OUTPUT.PUT_LINE('object: ' || rowlcr.GET_OBJECT_NAME);
DBMS_OUTPUT.PUT_LINE('is tag null: ' || rowlcr.IS_NULL_TAG);
DBMS_OUTPUT.PUT_LINE('command_type: ' || rowlcr.GET_COMMAND_TYPE);
oldlist := rowlcr.GET_VALUES('OLD');
FOR i IN 1..oldlist.COUNT LOOP
if oldlist(i) is not null then
DBMS_OUTPUT.PUT_LINE('old(' || i || '): ' || oldlist(i).column_name);
print_any(oldlist(i).data);
END IF;
END LOOP;
newlist := rowlcr.GET_VALUES('NEW');
FOR i in 1..newlist.count LOOP
IF newlist(i) IS NOT NULL THEN
DBMS_OUTPUT.PUT_LINE('new(' || i || '): ' || newlist(i).column_name);
print_any(newlist(i).data);
END IF;
END LOOP;
ELSE
DBMS_OUTPUT.PUT_LINE('Non-LCR Message with type ' || typenm);
END IF;
END print_lcr;
/
CREATE OR REPLACE PROCEDURE print_transaction(ltxnid IN VARCHAR2) IS
i NUMBER;
txnid VARCHAR2(30);
source VARCHAR2(128);
msgcnt NUMBER;
errno NUMBER;
errmsg VARCHAR2(128);
lcr SYS.ANYDATA;
BEGIN
SELECT LOCAL_TRANSACTION_ID,
SOURCE_DATABASE,
MESSAGE_COUNT,
ERROR_NUMBER,
ERROR_MESSAGE
INTO txnid, source, msgcnt, errno, errmsg
FROM DBA_APPLY_ERROR
WHERE LOCAL_TRANSACTION_ID = ltxnid;
DBMS_OUTPUT.PUT_LINE('----- Local Transaction ID: ' || txnid);
DBMS_OUTPUT.PUT_LINE('----- Source Database: ' || source);
DBMS_OUTPUT.PUT_LINE('----Error Number: '||errno);
DBMS_OUTPUT.PUT_LINE('----Message Text: '||errmsg);
FOR i IN 1..msgcnt LOOP
DBMS_OUTPUT.PUT_LINE('--message: ' || i);
lcr := DBMS_APPLY_ADM.GET_ERROR_MESSAGE(i, txnid); -- gets the LCR
print_lcr(lcr);
END LOOP;
END print_transaction;
/
CREATE OR REPLACE PROCEDURE print_errors IS
CURSOR c IS
SELECT LOCAL_TRANSACTION_ID,
SOURCE_DATABASE,
MESSAGE_COUNT,
ERROR_NUMBER,
ERROR_MESSAGE
FROM DBA_APPLY_ERROR
ORDER BY SOURCE_DATABASE, SOURCE_COMMIT_SCN;
i NUMBER;
txnid VARCHAR2(30);
source VARCHAR2(128);
msgcnt NUMBER;
errnum NUMBER := 0;
errno NUMBER;
errmsg VARCHAR2(128);
lcr SYS.AnyData;
r NUMBER;
BEGIN
FOR r IN c LOOP
errnum := errnum + 1;
msgcnt := r.MESSAGE_COUNT;
txnid := r.LOCAL_TRANSACTION_ID;
source := r.SOURCE_DATABASE;
errmsg := r.ERROR_MESSAGE;
errno := r.ERROR_NUMBER;
DBMS_OUTPUT.PUT_LINE('*************************************************');
DBMS_OUTPUT.PUT_LINE('----- ERROR #' || errnum);
DBMS_OUTPUT.PUT_LINE('----- Local Transaction ID: ' || txnid);
DBMS_OUTPUT.PUT_LINE('----- Source Database: ' || source);
DBMS_OUTPUT.PUT_LINE('----Error Number: '||errno);
DBMS_OUTPUT.PUT_LINE('----Message Text: '||errmsg);
FOR i IN 1..msgcnt LOOP
DBMS_OUTPUT.PUT_LINE('--message: ' || i);
lcr := DBMS_APPLY_ADM.GET_ERROR_MESSAGE(i, txnid);
print_lcr(lcr);
END LOOP;
END LOOP;
END print_errors;
/
connect STRMADMIN/STRMADMIN@PRODDB_TULSA.DOMAIN.COM
CREATE OR REPLACE PROCEDURE print_any(data IN SYS.AnyData) IS
tn VARCHAR2(61);
str VARCHAR2(4000);
chr CHAR(255);
num NUMBER;
dat DATE;
rw RAW(4000);
res NUMBER;
BEGIN
IF data IS NULL THEN
DBMS_OUTPUT.PUT_LINE('NULL value');
RETURN;
END IF;
tn := data.GETTYPENAME();
IF tn = 'SYS.VARCHAR2' THEN
res := data.GETVARCHAR2(str);
DBMS_OUTPUT.PUT_LINE(str);
ELSIF tn = 'SYS.CHAR' then
res := data.GETCHAR(chr);
DBMS_OUTPUT.PUT_LINE(chr);
ELSIF tn = 'SYS.VARCHAR' THEN
res := data.GETVARCHAR(chr);
DBMS_OUTPUT.PUT_LINE(chr);
ELSIF tn = 'SYS.NUMBER' THEN
res := data.GETNUMBER(num);
DBMS_OUTPUT.PUT_LINE(num);
ELSIF tn = 'SYS.DATE' THEN
res := data.GETDATE(dat);
DBMS_OUTPUT.PUT_LINE(dat);
ELSIF tn = 'SYS.RAW' THEN
res := data.GETRAW(rw);
DBMS_OUTPUT.PUT_LINE(RAWTOHEX(rw));
ELSE
DBMS_OUTPUT.PUT_LINE('typename is ' || tn);
END IF;
END print_any;
/
CREATE OR REPLACE PROCEDURE print_lcr(lcr IN SYS.ANYDATA) IS
typenm VARCHAR2(61);
ddllcr SYS.LCR$_DDL_RECORD;
proclcr SYS.LCR$_PROCEDURE_RECORD;
rowlcr SYS.LCR$_ROW_RECORD;
res NUMBER;
newlist SYS.LCR$_ROW_LIST;
oldlist SYS.LCR$_ROW_LIST;
ddl_text CLOB;
BEGIN
typenm := lcr.GETTYPENAME();
DBMS_OUTPUT.PUT_LINE('type name: ' || typenm);
IF (typenm = 'SYS.LCR$_DDL_RECORD') THEN
res := lcr.GETOBJECT(ddllcr);
DBMS_OUTPUT.PUT_LINE('source database: ' ||
ddllcr.GET_SOURCE_DATABASE_NAME);
DBMS_OUTPUT.PUT_LINE('owner: ' || ddllcr.GET_OBJECT_OWNER);
DBMS_OUTPUT.PUT_LINE('object: ' || ddllcr.GET_OBJECT_NAME);
DBMS_OUTPUT.PUT_LINE('is tag null: ' || ddllcr.IS_NULL_TAG);
DBMS_LOB.CREATETEMPORARY(ddl_text, TRUE);
ddllcr.GET_DDL_TEXT(ddl_text);
DBMS_OUTPUT.PUT_LINE('ddl: ' || ddl_text);
DBMS_LOB.FREETEMPORARY(ddl_text);
ELSIF (typenm = 'SYS.LCR$_ROW_RECORD') THEN
res := lcr.GETOBJECT(rowlcr);
DBMS_OUTPUT.PUT_LINE('source database: ' ||
rowlcr.GET_SOURCE_DATABASE_NAME);
DBMS_OUTPUT.PUT_LINE('owner: ' || rowlcr.GET_OBJECT_OWNER);
DBMS_OUTPUT.PUT_LINE('object: ' || rowlcr.GET_OBJECT_NAME);
DBMS_OUTPUT.PUT_LINE('is tag null: ' || rowlcr.IS_NULL_TAG);
DBMS_OUTPUT.PUT_LINE('command_type: ' || rowlcr.GET_COMMAND_TYPE);
oldlist := rowlcr.GET_VALUES('OLD');
FOR i IN 1..oldlist.COUNT LOOP
if oldlist(i) is not null then
DBMS_OUTPUT.PUT_LINE('old(' || i || '): ' || oldlist(i).column_name);
print_any(oldlist(i).data);
END IF;
END LOOP;
newlist := rowlcr.GET_VALUES('NEW');
FOR i in 1..newlist.count LOOP
IF newlist(i) IS NOT NULL THEN
DBMS_OUTPUT.PUT_LINE('new(' || i || '): ' || newlist(i).column_name);
print_any(newlist(i).data);
END IF;
END LOOP;
ELSE
DBMS_OUTPUT.PUT_LINE('Non-LCR Message with type ' || typenm);
END IF;
END print_lcr;
/
CREATE OR REPLACE PROCEDURE print_transaction(ltxnid IN VARCHAR2) IS
i NUMBER;
txnid VARCHAR2(30);
source VARCHAR2(128);
msgcnt NUMBER;
errno NUMBER;
errmsg VARCHAR2(128);
lcr SYS.ANYDATA;
BEGIN
SELECT LOCAL_TRANSACTION_ID,
SOURCE_DATABASE,
MESSAGE_COUNT,
ERROR_NUMBER,
ERROR_MESSAGE
INTO txnid, source, msgcnt, errno, errmsg
FROM DBA_APPLY_ERROR
WHERE LOCAL_TRANSACTION_ID = ltxnid;
DBMS_OUTPUT.PUT_LINE('----- Local Transaction ID: ' || txnid);
DBMS_OUTPUT.PUT_LINE('----- Source Database: ' || source);
DBMS_OUTPUT.PUT_LINE('----Error Number: '||errno);
DBMS_OUTPUT.PUT_LINE('----Message Text: '||errmsg);
FOR i IN 1..msgcnt LOOP
DBMS_OUTPUT.PUT_LINE('--message: ' || i);
lcr := DBMS_APPLY_ADM.GET_ERROR_MESSAGE(i, txnid); -- gets the LCR
print_lcr(lcr);
END LOOP;
END print_transaction;
/
CREATE OR REPLACE PROCEDURE print_errors IS
CURSOR c IS
SELECT LOCAL_TRANSACTION_ID,
SOURCE_DATABASE,
MESSAGE_COUNT,
ERROR_NUMBER,
ERROR_MESSAGE
FROM DBA_APPLY_ERROR
ORDER BY SOURCE_DATABASE, SOURCE_COMMIT_SCN;
i NUMBER;
txnid VARCHAR2(30);
source VARCHAR2(128);
msgcnt NUMBER;
errnum NUMBER := 0;
errno NUMBER;
errmsg VARCHAR2(128);
lcr SYS.AnyData;
r NUMBER;
BEGIN
FOR r IN c LOOP
errnum := errnum + 1;
msgcnt := r.MESSAGE_COUNT;
txnid := r.LOCAL_TRANSACTION_ID;
source := r.SOURCE_DATABASE;
errmsg := r.ERROR_MESSAGE;
errno := r.ERROR_NUMBER;
DBMS_OUTPUT.PUT_LINE('*************************************************');
DBMS_OUTPUT.PUT_LINE('----- ERROR #' || errnum);
DBMS_OUTPUT.PUT_LINE('----- Local Transaction ID: ' || txnid);
DBMS_OUTPUT.PUT_LINE('----- Source Database: ' || source);
DBMS_OUTPUT.PUT_LINE('----Error Number: '||errno);
DBMS_OUTPUT.PUT_LINE('----Message Text: '||errmsg);
FOR i IN 1..msgcnt LOOP
DBMS_OUTPUT.PUT_LINE('--message: ' || i);
lcr := DBMS_APPLY_ADM.GET_ERROR_MESSAGE(i, txnid);
print_lcr(lcr);
END LOOP;
END LOOP;
END print_errors;
/
Hint and Tricks: Commands that are Used to look into Streams
Capture Side
alter session set nls_date_format='dd-mon-yyyy hh24:mi:ss';
select START_SCN ,FIRST_SCN , CAPTURED_SCN,APPLIED_SCN,status from dba_capture;
--To check is the messages are spilling
select CNUM_MSGS,NUM_MSGS,SPILL_MSGS,CSPILL_MSGS from v$buffered_queues;
--- to check if there are any open transactions which are not commited
select * from v$streams_transaction;
The following query confirm if the required logfile is registered
select a.name, a.status from v$archived_log a, v$streams_capture s
where s.CAPTURE_MESSAGE_NUMBER
between a.FIRST_CHANGE# and a.NEXT_CHANGE#;
col CONSUMER_NAME format a20
col SOURCE_DATABASE format a20
col MODIFIEDS_TIME format a20
SELECT r.CONSUMER_NAME,
r.SOURCE_DATABASE,
r.SEQUENCE#,
r.NAME,
r.first_scn,
r.FIRST_TIME,
r.MODIFIED_TIME,
r.DICTIONARY_BEGIN,
r.DICTIONARY_END
FROM DBA_REGISTERED_ARCHIVED_LOG r, DBA_CAPTURE c
WHERE r.CONSUMER_NAME = c.CAPTURE_NAME
ORDER BY source_database, consumer_name, r.first_scn;
Propagation
select propagation_name,status from dba_propagation;
SELECT p.propagation_name, p.SOURCE_QUEUE_OWNER ||'.'||
p.SOURCE_QUEUE_NAME ||'@'||
g.GLOBAL_NAME "Source Queue",
p.DESTINATION_QUEUE_OWNER ||'.'||
p.DESTINATION_QUEUE_NAME ||'@'||
p.DESTINATION_DBLINK "Destination Queue",
p.queue_to_queue,
p.status, p.error_date, p.error_message
FROM DBA_PROPAGATION p, GLOBAL_NAME g;
Apply Side
alter session set nls_date_format='dd-mon-yyyy hh24:mi:ss';
Exec DBMS_APPLY_ADM.STOP_APPLY('APPLIES_TULSA');
EXEC DBMS_APPLY_ADM.START_APPLY('APPLIES_TULSA');
select apply_name, apply_captured, status from dba_apply;
-- to check if the messages are being dequeued
SELECT APPLY_NAME, STATE, TOTAL_MESSAGES_DEQUEUED, to_char (DEQUEUE_TIME ,'dd-mon-yyyy hh24:mi:ss') FROM V$STREAMS_APPLY_READER;
select CNUM_MSGS,NUM_MSGS,SPILL_MSGS,CSPILL_MSGS from v$buffered_queues;
select * from dba_apply_progress;
COLUMN PROCESS_NAME HEADING "Coaordinator|Process|Name" FORMAT A11
COLUMN SID HEADING 'Session|ID' FORMAT 9999
COLUMN SERIAL# HEADING 'Session|Serial|Number' FORMAT 9999
COLUMN STATE HEADING 'State' FORMAT A21
COLUMN TOTAL_RECEIVED HEADING 'Total|Trans|Received' FORMAT 999999
COLUMN TOTAL_APPLIED HEADING 'Total|Trans|Applied' FORMAT 999999
COLUMN TOTAL_ERRORS HEADING 'Total|Apply|Errors' FORMAT 9999
select SUBSTR(s.PROGRAM,INSTR(S.PROGRAM,'(')+1,4) PROCESS_NAME,
c.SID,
c.SERIAL#,
c.STATE,
c.TOTAL_RECEIVED,
c.TOTAL_APPLIED,
c.TOTAL_ERRORS
FROM V$STREAMS_APPLY_COORDINATOR c, V$SESSION s
WHERE c.APPLY_NAME = 'APPLY_EQARTOU3' AND
c.SID = s.SID AND
c.SERIAL# = s.SERIAL#
/
Is an Apply Server Performing Poorly for Certain Transactions?
If an apply process is not performing well, then the reason might be that one or more apply servers used by the apply process are taking an inordinate amount of time to apply certain transactions. The following query displays information about the transactions being applied by each apply server used by an apply process named "applies_tulsa":
COLUMN SERVER_ID HEADING 'Apply Server ID' FORMAT 99999999
COLUMN STATE HEADING 'Apply Server State' FORMAT A20
COLUMN APPLIED_MESSAGE_NUMBER HEADING 'Applied Message|Number' FORMAT
99999999
COLUMN MESSAGE_SEQUENCE HEADING 'Message Sequence|Number' FORMAT 99999999
SELECT SERVER_ID, STATE, APPLIED_MESSAGE_NUMBER, MESSAGE_SEQUENCE
FROM V$STREAMS_APPLY_SERVER
WHERE APPLY_NAME = 'APPLIES_TULSA'
ORDER BY SERVER_ID;
If you run this query repeatedly, then over time the apply server state, applied message number, and message sequence number should continue to change for each apply server as it applies transactions. If these values do not change for one or more apply servers, then the apply server might not be performing well. In this case, you should make sure that, for each table to which the apply process applies changes, every key column has an index.
If you have many such tables, then you might need to determine the specific table and DML or DDL operation that is causing an apply server to perform poorly. To do so, run the following query when an apply server is taking an inordinately long time to apply a transaction. In this example, assume that the name of the apply process is "applies_tulsa" and that apply server number two is performing poorly:
COLUMN OPERATION HEADING 'Operation' FORMAT A20
COLUMN OPTIONS HEADING 'Options' FORMAT A20
COLUMN OBJECT_OWNER HEADING 'Object|Owner' FORMAT A10
COLUMN OBJECT_NAME HEADING 'Object|Name' FORMAT A10
COLUMN COST HEADING 'Cost' FORMAT 99999999
SELECT p.OPERATION, p.OPTIONS, p.OBJECT_OWNER, p.OBJECT_NAME, p.COST
FROM V$SQL_PLAN p, V$SESSION s, V$STREAMS_APPLY_SERVER a
WHERE a.APPLY_NAME = 'APPLIES_TULSA' AND a.SERVER_ID = 2
AND s.SID = a.SID
AND p.HASH_VALUE = s.SQL_HASH_VALUE;
This query returns the operation being performed currently by the specified apply server. The query also returns the owner and name of the table on which the operation is being performed and the cost of the operation. Make sure each key column in this table has an index. If the
results show FULL for the COST column, then the operation is causing full table scans, and indexing the table's key columns might solve the problem.
In addition, you can run the following query to determine the specific DML or DDL SQL statement that is causing an apply server to perform poorly, assuming that the name of the apply process is "applies_tulsa" and that apply server number two is performing poorly:
SELECT t.SQL_TEXT
FROM V$SESSION s, V$SQLTEXT t, V$STREAMS_APPLY_SERVER a
WHERE a.APPLY_NAME = 'APPLIES_TULSA' AND a.SERVER_ID = 2
AND s.SID = a.SID
AND s.SQL_ADDRESS = t.ADDRESS
AND s.SQL_HASH_VALUE = t.HASH_VALUE
ORDER BY PIECE;
This query returns the SQL statement being run currently by the specified apply server. The statement includes the name of the table to which the transaction is being applied. Make sure each key column in this table has an index.
If the SQL statement returned by the previous query is less than one thousand characters long, then you can run the following simplified query instead:
SELECT t.SQL_TEXT
FROM V$SESSION s, V$SQLAREA t, V$STREAMS_APPLY_SERVER a
WHERE a.APPLY_NAME = 'APPLIES_TULSA' AND a.SERVER_ID = 2
AND s.SID = a.SID
AND s.SQL_ADDRESS = t.ADDRESS
AND s.SQL_HASH_VALUE = t.HASH_VALUE;
To delete all errors and Specific Transaction in dba_apply_error
exec dbms_apply_adm.delete_all_errors('&APPLY_NAME');
For a specific transaction
exec dbms_apply_adm.delete_error('&LOCAL_TRANSACTION_ID');
To check what SQL is in long running transaction, start log mining
alter session set nls_date_format='dd-mon-yyyy hh24:mi:ss';
select name,sequence# from v$archived_log where &commitscn (obtained from health check) between first_change# and next_change#;
From the output of name column from above command give the logfile to the below command:
exec dbms_logmnr.add_logfile('&1');
dbms_logmnr.start_logmnr(options=>dbms_logmnr.DICT_FROM_ONLINE_CATALOG);
select XIDUSN || '.' || XIDSLT || '.' || XIDSQN from v$streams_transaction ;
select timestamp,commit_timestamp,SQL_REDO from v$logmnr_contents where XIDUSN || '.' || XIDSLT || '.' || XIDSQN = '4.1.93557';
exec dbms_logmnr.end_logmnr();
To printout the transactions that has been errored out in dba_apply_error
select LOCAL_TRANSACTION_ID from dba_apply_error;
SET SERVEROUTPUT ON SIZE 1000000
EXEC print_transaction('&LOCAL_TRANSACTION_ID');
To Skip the current Transaction
On the Capture side
exec dbms_capture_am.set_parameter('&capture_name', '_ignore_transaction', '&txn_id');
Txn_id obtained from below command
select XIDUSN || '.' || XIDSLT || '.' || XIDSQN from v$streams_transaction ;
exec dbms_capture.stop_capture ('&capture_name');
exec dbms_capture.start_capture ('&capture_name');
On the Apply side
exec dbms_apply_am.set_parameter('&apply_name', '_ignore_transaction', '&txn_id' );
exec dbms_apply.stop_apply ('&apply_name');
exec dbms_apply.start_ apply ('&apply_name');
Periodic Maintenance
Checkpoint periodically
Periodically force capture to checkpoint. This checkpoint is not the same as a database checkpoint. To force capture to checkpoint, use the capture parameter _CHECKPOINT_FORCE and set the value to YES.
Forcing a checkpoint ensure that the DBA_CAPTURE view columns CAPTURED_SCN and APPLIED_SCN are maintained.
Perform periodic maintenance
1. Move FIRST_SCN. Alter the capture FIRST_SCN, the lowest possible SCN available for capturing changes, so that Streams metadata tables can be purged and space in SYSAUX tablespace reclaimed.
2. Dump fresh copy of Dictionary to redo. Issue a DBMS_CAPTURE_ADM.BUILD command to dump a current copy of the data dictionary to the redo logs. Doing this will reduce the amount of logs to be processed in case of additional capture process creation or process rebuild.
3. Prepare database objects for instantiation Issue a DBMS_CAPTURE_ADM.PREPARE_GLOBAL_INSTANTIATION. This is used in conjunction with the BUILD in B above for new capture creation or rebuild purposes.
Additional Recommendations
1. Please make sure there are no network issues. If so install OSWATCHER from Metalink Note.301137.1 and upload it to Oracle Support for further analysis.
2. Check the batch jobs that have commit point greater than 1000 .If so please reduce the commit point less than 1000.
3. Do more testing with the environment.
Subscribe to:
Posts (Atom)