############################################################################
### 12c How to plugin a non-CDB database to a Container Database (CDB) ###
############################################################################
$ sqlplus sys as sysdba
SQL> shutdown immediate;
Database closed.
Database dismounted.
ORACLE instance shut down.
SQL> startup mount
SQL> alter database open read only;
BEGIN
DBMS_PDB.DESCRIBE(
pdb_descr_file => '/u01/scripts/rac.xml');
END;
/
DECLARE
compatible CONSTANT VARCHAR2(3) :=
CASE DBMS_PDB.CHECK_PLUG_COMPATIBILITY(
pdb_descr_file => '/u01/scripts/rac.xml',
pdb_name => 'RAC')
WHEN TRUE THEN 'YES'
ELSE 'NO'
END;
BEGIN
DBMS_OUTPUT.PUT_LINE(compatible);
END;
/
CREATE PLUGGABLE DATABASE RAC using '/u01/scripts/rac.xml' nocopy tempfile reuse;
select CON_ID, NAME, OPEN_MODE from V$PDBS;
select PDB_NAME, DBID , CON_ID, STATUS from CDB_PDBS;
ALTER SESSION SET CONTAINER =rac;
show con_name
alter session SET CONTAINER=CDB$ROOT;
show con_name
select PDB_NAME, DBID , CON_ID, STATUS from CDB_PDBS;
@/u01/products/rdbms_12102/rdbms/admin/noncdb_to_pdb.sql
#####################################
### Add and Start New PDB service ###
#####################################
./srvctl add service -db cdb12c -service pdb_srv -preferred cdb12c1 -pdb RAC
./srvctl start service -db cdb12c -s pdb_srv
########################################################################################################################
### Save state of PDB. By defualt the PDB's starup in mounted state. Use the following cmd to persere the open state.###
########################################################################################################################
SQL> alter pluggable database UCPDB save state;
alter pluggable database JOHNPDB save state;
##############################
### Connect to PDB Service ###
##############################
sqlplus system/racattack@collabn1:1521/ucpdb
Schema/Password@HOST:Port/Service_Name
################################################
### Open all PDB's that are on mounted mode ###
################################################
SQL> alter pluggable database all open;
##################
### Switch CDB ###
##################
alter session set container=CD$ROOT;
###############################
### Display active services ###
###############################
select name, con_id from v$active_services order by 1;
NAME CON_ID
---------------------------------------------------------------- ----------
SYS$BACKGROUND 1
SYS$USERS 1
cdb12c 1
cdb12cXDB 1
pdb_srv 3
rac 3
ucpdb 4
##########################
### Display Containers ###
##########################
SQL> select con_id, name, open_mode from v$containers;
CON_ID NAME OPEN_MODE
---------- ------------------------------ ----------
1 CDB$ROOT READ WRITE
2 PDB$SEED READ ONLY
3 RAC READ WRITE
4 UCPDB READ WRITE
###########################
### Display PDB Status ###
###########################
set linesize 500
column pdb_name format a50
select pdb_name, status from cdb_pdbs;
PDB_NAME STATUS
-------------------------------------------------- ---------
PDB$SEED NORMAL
RAC NORMAL
UCPDB NORMAL
###############################
### Display Open_Mode of PDB###
###############################
select name, open_mode from v$pdbs;
NAME OPEN_MODE
------------------------------ ----------
PDB$SEED READ ONLY
RAC READ WRITE
UCPDB READ WRITE
#########################################################
### Display all datafile at the root container level ###
#########################################################
set linesize 500
column FILE_NAME format a100
select file_name,con_id from cdb_data_files order by con_id;
FILE_NAME CON_ID
---------------------------------------------------------------------------------------------------- ----------
+DATA/CDB12C/DATAFILE/system.295.908285565 1
+DATA/CDB12C/DATAFILE/users.303.908285931 1
+DATA/CDB12C/DATAFILE/sysaux.297.908285699 1
+DATA/CDB12C/DATAFILE/undotbs1.299.908285801 1
+DATA/CDB12C/DATAFILE/undotbs2.302.908285907 1
+DATA/RAC/DATAFILE/users.260.839965263 3
+DATA/RAC/DATAFILE/system.259.839965187 3
+DATA/RAC/DATAFILE/sysaux.258.839965083 3
+DATA/CDB12C/2FFB70F5EDFF486AE053334EA8C0FF4E/DATAFILE/system.308.908621995 4
+DATA/CDB12C/2FFB70F5EDFF486AE053334EA8C0FF4E/DATAFILE/users.311.908622247 4
+DATA/CDB12C/2FFB70F5EDFF486AE053334EA8C0FF4E/DATAFILE/sysaux.309.908621995 4
###########################################################
### Display all tablespaces at the root container level ###
###########################################################
set linesize 500
set pagesize 500
column tablespace_name format a50
select tablespace_name, con_id from cdb_tablespaces order by con_id;
TABLESPACE_NAME CON_ID
-------------------------------------------------- ----------
SYSTEM 1
USERS 1
SYSAUX 1
TEMP 1
UNDOTBS1 1
UNDOTBS2 1
SYSTEM 3
SYSAUX 3
UNDOTBS2 3
TEMP 3
USERS 3
UNDOTBS1 3
SYSTEM 4
USERS 4
TEMP 4
SYSAUX 4
#########################################################
### Display all tempfiles at the root container level ###
#########################################################
set linesize 500
column FILE_NAME format a100
select file_name,con_id from cdb_temp_files order by con_id;
FILE_NAME CON_ID
---------------------------------------------------------------------------------------------------- ----------
+DATA/CDB12C/TEMPFILE/temp.300.908285829 1
+DATA/CDB12C/F2CCC568CEF64318E043334EA8C05910/TEMPFILE/temp.307.908366901 3
+DATA/CDB12C/2FFB70F5EDFF486AE053334EA8C0FF4E/TEMPFILE/temp.310.908622149 4
##################################################
### Create common user while connected to root ###
##################################################
create user c##1 identified by racattack container=all;
set linesize 500
set pagesize 500
column username format a50
select username, common, con_id from cdb_users where username like 'C##%';
USERNAME COM CON_ID
-------------------------------------------------- --- ----------
C##1 YES 3
C##1 YES 1
C##1 YES 4
##################
### Drop PDB ###
##################
--Connect to container DB
$ export ORACLE_SID=cdb1
$ . oraenv
$ sqlplus / as sydba
SQL> show con_name
CON_NAME
------------------------------
CDB$ROOT
--Close all or just the PDB to drop
SQL> alter pluggable database all close immediate;
Pluggable database altered.
SQL> select name, open_mode from v$pdbs;
NAME OPEN_MODE
------------------------------ ----------
PDB$SEED READ ONLY
JOHNPDB MOUNTED
TIGGER MOUNTED
--DROP PDB
SQL> drop pluggable database TIGGER including datafiles;
Pluggable database dropped.
--Verify PDB is gone.
SQL> select name, open_mode from v$pdbs;
NAME OPEN_MODE
------------------------------ ----------
PDB$SEED READ ONLY
JOHNPDB MOUNTED
#############
Overview
#############
Purpose
This tutorial covers the steps for using Oracle Recovery Manager (Oracle RMAN) to perform a point-in-time recovery for a pluggable database (PDB).
Time to Complete
Approximately 30 minutes
Introduction
A database point-in-time recovery (DBPITR) is sometimes referred to as an incomplete recovery. This recovery capability is suitable for the following situations:
A user error or corruption removes needed data or introduces corrupted data. For example, a user or DBA erroneously deletes or updates the contents of one or more tables, drops database objects that are still needed during an update to an application, or runs a large batch update that fails midway.
A database upgrade fails or an upgrade script goes awry.
A complete database recovery after a media failure cannot succeed because you do not have all of the needed redo logs or incremental backups.
Here are the DBPITR requirements:
Your database must be running in ARCHIVELOG mode.
You must have backups of all data files before the target SCN for DBPITR.
You must have archived redo logs between the SCN of the backups and the target SCN.
Scenario
In this tutorial, you use Oracle RMAN to perform a point-in-time recovery on a PDB. You make some erroneous data updates and then recover the database to the state it was just before you performed the data updates.
Prerequisites
Before starting this tutorial, you should:
Ensure that you have enough disk space to hold a complete backup copy of the CDB, including all of the PDBs.
Install Oracle Database 12c.
Create one CDB with two PDBs in it.
The environment used in the development of this tutorial is as follows:
ORACLE_HOME: /u01/app/oracle/product/12.1.0
TNS Listener port: 1521
Container databases:
SID: cdb1
SID: cdb2
Pluggable databases (in cdb1):
pdb1
pdb2
##################################
Preparing to Back up the CDB
##################################
Before backing up the database, you must create the backup directory and then put the database in ARCHIVELOG mode.
Creating a Backup Directory
Navigate to /stage or wherever you can find enough available space to store the backup files.
cd /stage
Create a subdirectory called db_backup_files under /stage.
mkdir db_backup_files
Create a subdirectory called cdb1 under /stage/db_backup_files.
cd db_backup_files
mkdir cdb1
Placing the CDB in ARCHIVELOG Mode
In SQL*Plus, connect as sysdba to the cdb1 container database.
. oraenv
[enter cdb1 at the prompt]
sqlplus / as sysdba
Shut down the database.
shutdown immediate
Start the database in mount mode.
startup mount
Turn on database archiving.
alter database archivelog;
Open the CDB.
alter database open;
Open all of the PDBs.
alter pluggable database all open;
Set the Flash Recovery Area (FRA) size.
alter system set db_recovery_file_dest_size = 2G scope=both;
exit
############################
Backing Up the CDB
############################
Use Oracle RMAN to back up the database to the directory that was specified in the previous section.
Perform the following steps in a new terminal window:
Set the NLS_DATE_FORMAT environment variable so that the date and time values displayed by RMAN can be more easily read.
Make sure that ORACLE_HOME is set correctly.
Set ORACLE_SID to cdb1.
export NLS_DATE_FORMAT='DD-MM-YYYY HH:MI:SS'
Connect to Oracle RMAN.
rman target /
Set the Oracle RMAN backup device type and backup file location.
configure channel device type disk format '/stage/db_backup_files/cdb1/%U';
Turn on automatic backup of control files.
configure controlfile autobackup on;
Back up the database and archive logs.
backup database plus archivelog;
exit
###########################################
Performing Erroneous updates in the PDB
###########################################
In this section, you set up a tablespace, schema, and table in each PDBs. Next, you insert some records in the tables. One of the batch inserts will be done "in error."
Creating a Tablespace, Schema, and Table in PDB1
In SQL*Plus, connect to pdb1 as sysdba.
sqlplus sys/oracle@localhost:1521/pdb1 as sysdba
Create a small tablespace where a small table will be stored.
create tablespace dj_pdb1 datafile '/u01/app/oracle/oradata/cdb1/pdb1/dj_pdb1.dbf' size 10m;
Create a schema that will own the table.
create user dj identified by dj temporary tablespace temp default tablespace dj_pdb1;
Grant the schema the necessary privileges.
grant create session, create table, unlimited tablespace to dj;
Create the table in the schema.
create table dj.t1(c varchar2(100)) tablespace dj_pdb1;
###########################################
Performing Data Updates in PDB1
###########################################
Make a note of the current SCN.
select timestamp_to_scn(sysdate) from v$database;
Insert data in the table.
begin
for i in 1.. 10000 loop
insert into dj.t1 values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
end loop;
commit;
end;
/
Make a note of the SCN and remain in SQL*Plus.
select timestamp_to_scn(sysdate) from v$database;
#################################################
Creating a Tablespace, Schema, and Table in PDB2
#################################################
In SQL*Plus, connect to pdb2 as sysdba.
connect sys/oracle@localhost:1521/pdb2 as sysdba
Create a small tablespace where a small table will be stored.
create tablespace jfv_pdb2 datafile '+DATA' size 10m;
Create a schema that will own the table.
create user jfv identified by jfv temporary tablespace temp default tablespace jfv_pdb2;
Grant the schema the necessary privileges.
grant create session, create table, unlimited tablespace to jfv;
Create the table in the schema.
create table jfv.t2(c varchar2(100)) tablespace jfv_pdb2;
#################################################
Performing Data Updates in PDB2
#################################################
Insert a row in the table and commit the transaction.
insert into jfv.t2 values ('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
commit;
Make a note of the current SCN, which is the point to which you will recover the database.
select timestamp_to_scn(sysdate) from v$database;
Insert 10,000 more rows into the table.
begin
for i in 1.. 10000 loop
insert into jfv.t2 values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
end loop;
commit;
end;
/
Make a note of the SCN.
select timestamp_to_scn(sysdate) from v$database;
################################################
Recovering the PDB to a Certain Point in Time
################################################
To perform point-in-time recovery, you must first close the PDB. Use Oracle RMAN to recover until the SCN before the erroneous data updates to the table in PDB2.
Close the pdb2 pluggable database and exit SQL*Plus.
alter pluggable database pdb2 close;
exit
Connect to Oracle RMAN.
rman target /
Perform point-in-time recovery in Oracle RMAN.
Make sure that you change the SET UNTIL SCN value to the SCN that you recorded in step #1 in the "Performing Data Updates in PDB2" section. Also, set the AUXILIARY DESTINATION value to a location with enough free space to hold a copy of the database files.
run {
set until SCN = 2263440 ;
restore pluggable database pdb2;
recover pluggable database pdb2 auxiliary destination='/stage/db_backup_files/cdb1';
alter pluggable database pdb2 open resetlogs;
}
Exit Oracle RMAN.
exit
################################################
Verifying Sucess of the Point-in-Time Recovery
################################################
If the point-in-time recovery was successful, you should see only one row in the jfv.t2 table.
In SQL*Plus, connect to PDB2.
sqlplus jfv/jfv@localhost:1521/pdb2
Check to see that the table contains only the one row inserted before the 10,000 record batch insert.
select * from t2;
exit
################################################
Resetting your environment
################################################
Perform the following steps to reset your environment prior to repeating the activities covered in this OBE or starting another OBE.
Remove the jfv user and jfv_pdb2 tablespace from pdb2 .
. oraenv
[enter cdb2 at the prompt]
sqlplus sys/oracle@localhost:1521/pdb2 as sysdba
drop user jfv cascade;
drop tablespace jfv_pdb2 including contents;
Remove the dj user and dj_pdb1 tablespace from pdb1.
connect sys/oracle@localhost:1521/pdb1 as sysdba
drop user dj cascade;
drop tablespace dj_pdb1 including contents;
Take the database out of ARCHIVELOG mode .
connect / as sysdba
shutdown immediate
startup mount
alter database noarchivelog;
alter database open;
alter pluggable database all open;
exit
Remove the backup files.
rm -rf /stage/db_backup_files
sqlplus system/racattack@dgprm:1521/TIGGER
###########################################
Performing Erroneous updates in the PDB
###########################################
In this section, you set up a tablespace, schema, and table in each PDBs. Next, you insert some records in the tables. One of the batch inserts will be done "in error."
Creating a Tablespace, Schema, and Table in PDB1
In SQL*Plus, connect to pdb1 as sysdba.
sqlplus sys/oracle@localhost:1521/pdb1 as sysdba
Create a small tablespace where a small table will be stored.
create tablespace dj_pdb1 datafile '+DATA' size 10m;
Create a schema that will own the table.
create user dj identified by dj temporary tablespace temp default tablespace dj_pdb1;
Grant the schema the necessary privileges.
grant create session, create table, unlimited tablespace to dj;
Create the table in the schema.
create table dj.t1(c varchar2(100)) tablespace dj_pdb1;
###########################################
Performing Data Updates in PDB1
###########################################
Make a note of the current SCN.
select timestamp_to_scn(sysdate) from v$database;
Insert data in the table.
begin
for i in 1.. 10000 loop
insert into dj.t1 values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
end loop;
commit;
end;
/
Make a note of the SCN and remain in SQL*Plus.
select timestamp_to_scn(sysdate) from v$database;
Thursday, June 30, 2016
Oracle RAC 12c Upgrade log
[oracle@collabn1 ~]$ cat /etc/oratab
+ASM1:/u01/app/12.1.0/grid:N: # line added by Agent
RAC1:/u01/app/oracle/product/12.1.0/dbhome_1:Y: # line added by Agent
RAC:/u01/app/oracle/product/12.1.0/dbhome_1:N: # line added by Agent
[oracle@collabn1 ~]$ export ORACLE_SID=+ASM1
[oracle@collabn1 ~]$ . oraenv
ORACLE_SID = [+ASM1] ?
The Oracle base remains unchanged with value /u01/app/oracle
[oracle@collabn1 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.DATA.dg
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
ora.FRA.dg
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
ora.LISTENER.lsnr
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
ora.asm
ONLINE ONLINE collabn1 Started,STABLE
ONLINE ONLINE collabn2 Started,STABLE
ora.net1.network
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
ora.ons
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE collabn1 STABLE
ora.LISTENER_SCAN2.lsnr
1 ONLINE ONLINE collabn2 STABLE
ora.LISTENER_SCAN3.lsnr
1 ONLINE ONLINE collabn2 STABLE
ora.collabn1.vip
1 ONLINE ONLINE collabn1 STABLE
ora.collabn2.vip
1 ONLINE ONLINE collabn2 STABLE
ora.cvu
1 ONLINE ONLINE collabn2 STABLE
ora.oc4j
1 OFFLINE OFFLINE STABLE
ora.rac.db
1 ONLINE ONLINE collabn1 Open,STABLE
2 ONLINE ONLINE collabn2 Open,STABLE
ora.scan1.vip
1 ONLINE ONLINE collabn1 STABLE
ora.scan2.vip
1 ONLINE ONLINE collabn2 STABLE
ora.scan3.vip
1 ONLINE ONLINE collabn2 STABLE
--------------------------------------------------------------------------------
$ export ORACLE_SID=+ASM1
$ . oraenv
$ ocrconfig -showbackup
PROT-24: Auto backups for the Oracle Cluster Registry are not available
collabn1 2014/02/18 21:58:23 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_215823.ocr
collabn1 2014/02/18 21:58:05 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_215805.ocr
collabn1 2014/02/18 21:48:20 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_214820.ocr
collabn1 2014/02/18 21:43:24 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_214324.ocr
$ su
Password:
[root@collabn1 orachk]# ocrconfig -manualbackup
$ ocrconfig -showbackup
PROT-24: Auto backups for the Oracle Cluster Registry are not available
collabn2 2016/03/01 11:25:46 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20160301_112546.ocr
collabn1 2014/02/18 21:58:23 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_215823.ocr
collabn1 2014/02/18 21:58:05 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_215805.ocr
collabn1 2014/02/18 21:48:20 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_214820.ocr
collabn1 2014/02/18 21:43:24 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_214324.ocr
# mkdir -p /u01/products/grid_12102
# mkdir -p /u01/scripts
# chown -R oracle:oinstall /u01/products
# chown -R oracle:oinstall /u01/scripts
# chmod -R 775 /u01/scripts
# chmod -R 775 /u01/products
$ mkdir -p /u01/scripts/orachk
$ cd /media/sf_12cR1/DB_TOOLS
$ cp orachk.zip /u01/scripts/orachk
$ cd /u01/scripts/orachk
$ unzip orachk.zip
$ export ORACLE_SID=+ASM1
$ . oraenv
$ crsctl query crs releaseversion
Oracle High Availability Services release version on the local node is [12.1.0.1.0]
$ crsctl query crs softwareversion
Oracle Clusterware version on node [collabn1] is [12.1.0.1.0]
$ crsctl query crs activeversion -f
Oracle Clusterware active version on the cluster is [12.1.0.1.0]. The cluster upgrade state is [NORMAL]. The cluster active patch level is [0].
$ crsctl stat res -t
$ export ORACLE_SID=RAC1
$ . oraenv
$ sqlplus / as sysdba
set linesize 500
set pagesize 500
column HOST_NAME format a20
column STATUS format a6
column INSTANCE_NAME format a13
column STIME format a35
column uptime format a55
select Host_Name, Status, database_status, Instance_Name
,'Started At: ' || to_char(startup_time,'DD-MON-YYYY HH24:MI:SS') stime
,'Uptime: ' || floor(sysdate - startup_time) || ' days(s) ' ||
trunc( 24*((sysdate-startup_time) -
trunc(sysdate-startup_time))) || ' hour(s) ' ||
mod(trunc(1440*((sysdate-startup_time) -
trunc(sysdate-startup_time))), 60) ||' minute(s) ' ||
mod(trunc(86400*((sysdate-startup_time) -
trunc(sysdate-startup_time))), 60) ||' seconds' uptime
from v$instance
/
HOST_NAME STATUS DATABASE_STATUS INSTANCE_NAME STIME UPTIME
-------------------- ------ ----------------- ------------- ----------------------------------- -------------------------------------------------------
collabn1.racattack OPEN ACTIVE RAC1 Started At: 01-MAR-2016 11:03:29 Uptime: 0 days(s) 0 hour(s) 16 minute(s) 51 seconds
ssh collabn2
# mkdir -p /u01/products/grid_12102
# mkdir -p /u01/scripts
# chown -R oracle:oinstall /u01/products
# chown -R oracle:oinstall /u01/scripts
# chmod -R 775 /u01/scripts
# chmod -R 775 /u01/products
$ mkdir -p /u01/scripts/orachk
$ cd /media/sf_12cR1/DB_TOOLS
$ cp orachk.zip /u01/scripts/orachk
$ cd /u01/scripts/orachk
$ unzip orachk.zip
$ export ORACLE_SID=+ASM2
$ . oraenv
$ crsctl query crs releaseversion
Oracle High Availability Services release version on the local node is [12.1.0.1.0]
$ crsctl query crs softwareversion
Oracle Clusterware version on node [collabn1] is [12.1.0.1.0]
$ crsctl query crs activeversion -f
Oracle Clusterware active version on the cluster is [12.1.0.1.0]. The cluster upgrade state is [NORMAL]. The cluster active patch level is [0].
$ crsctl stat res -t
$ export ORACLE_SID=RAC2
$ . oraenv
$ sqlplus / as sysdba
set linesize 500
set pagesize 500
column HOST_NAME format a20
column STATUS format a6
column INSTANCE_NAME format a13
column STIME format a35
column uptime format a55
select Host_Name, Status, database_status, Instance_Name
,'Started At: ' || to_char(startup_time,'DD-MON-YYYY HH24:MI:SS') stime
,'Uptime: ' || floor(sysdate - startup_time) || ' days(s) ' ||
trunc( 24*((sysdate-startup_time) -
trunc(sysdate-startup_time))) || ' hour(s) ' ||
mod(trunc(1440*((sysdate-startup_time) -
trunc(sysdate-startup_time))), 60) ||' minute(s) ' ||
mod(trunc(86400*((sysdate-startup_time) -
trunc(sysdate-startup_time))), 60) ||' seconds' uptime
from v$instance
/
HOST_NAME STATUS DATABASE_STATUS INSTANCE_NAME STIME UPTIME
-------------------- ------ ----------------- ------------- ----------------------------------- -------------------------------------------------------
collabn2.racattack OPEN ACTIVE RAC2 Started At: 01-MAR-2016 11:04:52 Uptime: 0 days(s) 0 hour(s) 13 minute(s) 54 seconds
$ cd /u01/scripts/orachk
./orachk -u -o pre
$ cd /media/sf_12cR1/12102_INSTALL/grid
$ ./runcluvfy.sh stage -pre crsinst -upgrade -rolling -src_crshome /u01/app/12.1.0/grid -dest_crshome /u01/products/grid_12102 -dest_version 12.1.0.2.0 -fixup -verbose
[root@collabn1 grid]# dd if=/dev/zero of=/home/swapfile bs=2048 count=1048576
1048576+0 records in
1048576+0 records out
2147483648 bytes (2.1 GB) copied, 29.3987 s, 73.0 MB/s
[root@collabn1 grid]# mkswap /home/swapfile
mkswap: /home/swapfile: warning: don't erase bootbits sectors
on whole disk. Use -f to force.
Setting up swapspace version 1, size = 2097148 KiB
no label, UUID=5f6d4b0f-d763-4e62-b6d4-d40c647776f8
[root@collabn1 grid]# swapon /home/swapfile
[root@collabn1 grid]# swapon -a
[root@collabn1 grid]# swapon -s
Filename Type Size Used Priority
/dev/dm-1 partition 4063228 105108 -1
/home/swapfile file 2097148 0 -2
[root@collabn1 home]# vi /etc/fstab
Add the following line to keep the swap file after reboot:
/home/swapfile none swap sw 0 0
# vi /etc/resolv.conf
Add the following line:
options timeout:1 attempts:1 rotate
# cat /etc/resolv.conf
# Generated by NetworkManager
search racattack
nameserver 192.168.78.51
nameserver 192.168.78.52
options timeout:1 attempts:1 rotate
Checking integrity of file "/etc/resolv.conf" across nodes
Checking the file "/etc/resolv.conf" to make sure only one of 'domain' and 'search' entries is defined
"domain" and "search" entries do not coexist in any "/etc/resolv.conf" file
Checking if 'domain' entry in file "/etc/resolv.conf" is consistent across the nodes...
"domain" entry does not exist in any "/etc/resolv.conf" file
Checking if 'search' entry in file "/etc/resolv.conf" is consistent across the nodes...
Checking file "/etc/resolv.conf" to make sure that only one 'search' entry is defined
More than one "search" entry does not exist in any "/etc/resolv.conf" file
All nodes have same "search" order defined in file "/etc/resolv.conf"
Checking DNS response time for an unreachable node
Node Name Status
------------------------------------ ------------------------
collabn1 failed
collabn2 failed
PRVF-5636 : The DNS response time for an unreachable node exceeded "15000" ms on following nodes: collabn1,collabn2
checking DNS response from all servers in "/etc/resolv.conf"
******************************************************************************************
Following is the list of fixable prerequisites selected to fix in this session
******************************************************************************************
-------------- --------------- ----------------
Check failed. Failed on nodes Reboot required?
-------------- --------------- ----------------
OS Kernel Parameter: collabn2,collabn1 no
panic_on_oops
Execute "/tmp/CVU_12.1.0.2.0_oracle/runfixup.sh" as root user on nodes "collabn2,collabn1" to perform the fix up operations manually
Press ENTER key to continue after execution of "/tmp/CVU_12.1.0.2.0_oracle/runfixup.sh" has completed on nodes "collabn2,collabn1"
Fix: OS Kernel Parameter: panic_on_oops
Node Name Status
------------------------------------ ------------------------
collabn2 failed
collabn1 failed
ERROR:
PRVG-9023 : Manual fix up command "/tmp/CVU_12.1.0.2.0_oracle/runfixup.sh" was not issued by root user on node "collabn2"
PRVG-9023 : Manual fix up command "/tmp/CVU_12.1.0.2.0_oracle/runfixup.sh" was not issued by root user on node "collabn1"
/media/sf_12cR1/DB_TOOLS/
change: the following lines in /ect/named.conf
zone "." IN {
type hint;
file "named.ca";
};
to:
zone "." IN {
type hint;
file "/dev/null";
};
$ chmod 775 $ORACLE_BASE/cfgtoollogs
$ chmod 770 $ORACLE_BASE/cfgtoollogs/dbca
$ chmod 770 $ORACLE_BASE/admin
#############################################################################################################
RAC DATABASE UPGRADE
#############################################################################################################
# mkdir -p /u01/products/rdbms_12102
# chown -R oracle:oinstall /u01/products/rdbms_12102
# chmod -R 775 /u01/products/rdbms_12102
$ cd /media/sf_12cR1/12102_INSTALL/database
$ ./runInstaller
# cd /u01/products/rdbms_12102
[root@collabn1 rdbms_12102]# ./root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /u01/products/rdbms_12102
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
# cd /u01/products/rdbms_12102
[root@collabn2 rdbms_12102]# ./root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /u01/products/rdbms_12102
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
[oracle@collabn1]
$ mkdir -p /u01/app/oracle/product/12.1.0/dbhome_1/temp
$ cd /u01/app/oracle/product/12.1.0/dbhome_1/temp
$ cp /u01/products/rdbms_12102/rdbms/admin/preupgrd.sql .
$ cp /u01/products/rdbms_12102/rdbms/admin/utluppkg.sql .
export ORACLE_SID=RAC1
. oraenv
$ sqlplus / as sysdba
SQL> @preupgrd.sql
[oracle@collabn2]
$ mkdir -p /u01/app/oracle/product/12.1.0/dbhome_1/temp
$ cd /u01/app/oracle/product/12.1.0/dbhome_1/temp
$ cp /u01/products/rdbms_12102/rdbms/admin/preupgrd.sql .
$ cp /u01/products/rdbms_12102/rdbms/admin/utluppkg.sql .
export ORACLE_SID=RAC2
. oraenv
$ sqlplus / as sysdba
SQL> @preupgrd.sql
Loading Pre-Upgrade Package...
***************************************************************************
Executing Pre-Upgrade Checks in RAC...
***************************************************************************
************************************************************
====>> ERRORS FOUND for RAC <<====
The following are *** ERROR LEVEL CONDITIONS *** that must be addressed
prior to attempting your upgrade.
Failure to do so will result in a failed upgrade.
You MUST resolve the above errors prior to upgrade
************************************************************
************************************************************
====>> PRE-UPGRADE RESULTS for RAC <<====
ACTIONS REQUIRED:
1. Review results of the pre-upgrade checks:
/u01/app/oracle/cfgtoollogs/RAC/preupgrade/preupgrade.log
2. Execute in the SOURCE environment BEFORE upgrade:
/u01/app/oracle/cfgtoollogs/RAC/preupgrade/preupgrade_fixups.sql
3. Execute in the NEW environment AFTER upgrade:
/u01/app/oracle/cfgtoollogs/RAC/preupgrade/postupgrade_fixups.sql
************************************************************
***************************************************************************
Pre-Upgrade Checks in RAC Completed.
***************************************************************************
***************************************************************************
***************************************************************************
$ cd /media/sf_12cR1/12102_INSTALL/grid
$ ./runcluvfy.sh stage -pre dbinst -upgrade -src_dbhome /u01/app/oracle/product/12.1.0/dbhome_1 -dest_dbhome /u01/products/rdbms_12102 -dest_version 12.1.0.2.0 -fixup -verbose
[oracle@collabn1 grid]$ ./runcluvfy.sh stage -pre dbinst -upgrade -src_dbhome /u01/app/oracle/product/12.1.0/dbhome_1 -dest_dbhome /u01/products/rdbms_12102 -dest_version 12.1.0.2.0 -fixup -verbose
Performing pre-checks for database installation
Checking node reachability...
Check: Node reachability from node "collabn1"
Destination Node Reachable?
------------------------------------ ------------------------
collabn1 yes
collabn2 yes
Result: Node reachability check passed from node "collabn1"
Checking user equivalence...
Check: User equivalence for user "oracle"
Node Name Status
------------------------------------ ------------------------
collabn2 passed
collabn1 passed
Result: User equivalence check passed for user "oracle"
Specify user name for database "rac" [default "DBSNMP"] :
Specify password for user "DBSNMP" in database "rac" :
Checking node connectivity...
Checking hosts config file...
Node Name Status
------------------------------------ ------------------------
collabn1 passed
collabn2 passed
Verification of the hosts config file successful
Interface information for node "collabn1"
Name IP Address Subnet Gateway Def. Gateway HW Address MTU
------ --------------- --------------- --------------- --------------- ----------------- ------
eth0 192.168.78.51 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:C0:4A:14 1500
eth0 192.168.78.61 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:C0:4A:14 1500
eth0 192.168.78.253 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:C0:4A:14 1500
eth0 192.168.78.251 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:C0:4A:14 1500
eth1 172.16.100.51 172.16.100.0 0.0.0.0 10.0.4.2 08:00:27:2B:4B:56 1500
eth1 169.254.200.132 169.254.0.0 0.0.0.0 10.0.4.2 08:00:27:2B:4B:56 1500
Interface information for node "collabn2"
Name IP Address Subnet Gateway Def. Gateway HW Address MTU
------ --------------- --------------- --------------- --------------- ----------------- ------
eth0 192.168.78.52 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:3C:18:FE 1500
eth0 192.168.78.62 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:3C:18:FE 1500
eth0 192.168.78.252 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:3C:18:FE 1500
eth1 172.16.100.52 172.16.100.0 0.0.0.0 10.0.4.2 08:00:27:E1:BA:80 1500
eth1 169.254.223.245 169.254.0.0 0.0.0.0 10.0.4.2 08:00:27:E1:BA:80 1500
Check: Node connectivity using interfaces on subnet "192.168.78.0"
Check: Node connectivity of subnet "192.168.78.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
collabn1[192.168.78.51] collabn1[192.168.78.251] yes
collabn1[192.168.78.51] collabn2[192.168.78.252] yes
collabn1[192.168.78.51] collabn2[192.168.78.62] yes
collabn1[192.168.78.51] collabn1[192.168.78.253] yes
collabn1[192.168.78.51] collabn1[192.168.78.61] yes
collabn1[192.168.78.51] collabn2[192.168.78.52] yes
collabn1[192.168.78.251] collabn2[192.168.78.252] yes
collabn1[192.168.78.251] collabn2[192.168.78.62] yes
collabn1[192.168.78.251] collabn1[192.168.78.253] yes
collabn1[192.168.78.251] collabn1[192.168.78.61] yes
collabn1[192.168.78.251] collabn2[192.168.78.52] yes
collabn2[192.168.78.252] collabn2[192.168.78.62] yes
collabn2[192.168.78.252] collabn1[192.168.78.253] yes
collabn2[192.168.78.252] collabn1[192.168.78.61] yes
collabn2[192.168.78.252] collabn2[192.168.78.52] yes
collabn2[192.168.78.62] collabn1[192.168.78.253] yes
collabn2[192.168.78.62] collabn1[192.168.78.61] yes
collabn2[192.168.78.62] collabn2[192.168.78.52] yes
collabn1[192.168.78.253] collabn1[192.168.78.61] yes
collabn1[192.168.78.253] collabn2[192.168.78.52] yes
collabn1[192.168.78.61] collabn2[192.168.78.52] yes
Result: Node connectivity passed for subnet "192.168.78.0" with node(s) collabn1,collabn2
Check: TCP connectivity of subnet "192.168.78.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
collabn1 : 192.168.78.51 collabn1 : 192.168.78.51 passed
collabn1 : 192.168.78.251 collabn1 : 192.168.78.51 passed
collabn2 : 192.168.78.252 collabn1 : 192.168.78.51 passed
collabn2 : 192.168.78.62 collabn1 : 192.168.78.51 passed
collabn1 : 192.168.78.253 collabn1 : 192.168.78.51 passed
collabn1 : 192.168.78.61 collabn1 : 192.168.78.51 passed
collabn2 : 192.168.78.52 collabn1 : 192.168.78.51 passed
collabn1 : 192.168.78.51 collabn1 : 192.168.78.251 passed
collabn1 : 192.168.78.251 collabn1 : 192.168.78.251 passed
collabn2 : 192.168.78.252 collabn1 : 192.168.78.251 passed
collabn2 : 192.168.78.62 collabn1 : 192.168.78.251 passed
collabn1 : 192.168.78.253 collabn1 : 192.168.78.251 passed
collabn1 : 192.168.78.61 collabn1 : 192.168.78.251 passed
collabn2 : 192.168.78.52 collabn1 : 192.168.78.251 passed
collabn1 : 192.168.78.51 collabn2 : 192.168.78.252 passed
collabn1 : 192.168.78.251 collabn2 : 192.168.78.252 passed
collabn2 : 192.168.78.252 collabn2 : 192.168.78.252 passed
collabn2 : 192.168.78.62 collabn2 : 192.168.78.252 passed
collabn1 : 192.168.78.253 collabn2 : 192.168.78.252 passed
collabn1 : 192.168.78.61 collabn2 : 192.168.78.252 passed
collabn2 : 192.168.78.52 collabn2 : 192.168.78.252 passed
collabn1 : 192.168.78.51 collabn2 : 192.168.78.62 passed
collabn1 : 192.168.78.251 collabn2 : 192.168.78.62 passed
collabn2 : 192.168.78.252 collabn2 : 192.168.78.62 passed
collabn2 : 192.168.78.62 collabn2 : 192.168.78.62 passed
collabn1 : 192.168.78.253 collabn2 : 192.168.78.62 passed
collabn1 : 192.168.78.61 collabn2 : 192.168.78.62 passed
collabn2 : 192.168.78.52 collabn2 : 192.168.78.62 passed
collabn1 : 192.168.78.51 collabn1 : 192.168.78.253 passed
collabn1 : 192.168.78.251 collabn1 : 192.168.78.253 passed
collabn2 : 192.168.78.252 collabn1 : 192.168.78.253 passed
collabn2 : 192.168.78.62 collabn1 : 192.168.78.253 passed
collabn1 : 192.168.78.253 collabn1 : 192.168.78.253 passed
collabn1 : 192.168.78.61 collabn1 : 192.168.78.253 passed
collabn2 : 192.168.78.52 collabn1 : 192.168.78.253 passed
collabn1 : 192.168.78.51 collabn1 : 192.168.78.61 passed
collabn1 : 192.168.78.251 collabn1 : 192.168.78.61 passed
collabn2 : 192.168.78.252 collabn1 : 192.168.78.61 passed
collabn2 : 192.168.78.62 collabn1 : 192.168.78.61 passed
collabn1 : 192.168.78.253 collabn1 : 192.168.78.61 passed
collabn1 : 192.168.78.61 collabn1 : 192.168.78.61 passed
collabn2 : 192.168.78.52 collabn1 : 192.168.78.61 passed
collabn1 : 192.168.78.51 collabn2 : 192.168.78.52 passed
collabn1 : 192.168.78.251 collabn2 : 192.168.78.52 passed
collabn2 : 192.168.78.252 collabn2 : 192.168.78.52 passed
collabn2 : 192.168.78.62 collabn2 : 192.168.78.52 passed
collabn1 : 192.168.78.253 collabn2 : 192.168.78.52 passed
collabn1 : 192.168.78.61 collabn2 : 192.168.78.52 passed
collabn2 : 192.168.78.52 collabn2 : 192.168.78.52 passed
Result: TCP connectivity check passed for subnet "192.168.78.0"
Check: Node connectivity using interfaces on subnet "172.16.100.0"
Check: Node connectivity of subnet "172.16.100.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
collabn2[172.16.100.52] collabn1[172.16.100.51] yes
Result: Node connectivity passed for subnet "172.16.100.0" with node(s) collabn2,collabn1
Check: TCP connectivity of subnet "172.16.100.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
collabn2 : 172.16.100.52 collabn2 : 172.16.100.52 passed
collabn1 : 172.16.100.51 collabn2 : 172.16.100.52 passed
collabn2 : 172.16.100.52 collabn1 : 172.16.100.51 passed
collabn1 : 172.16.100.51 collabn1 : 172.16.100.51 passed
Result: TCP connectivity check passed for subnet "172.16.100.0"
Checking subnet mask consistency...
Subnet mask consistency check passed for subnet "192.168.78.0".
Subnet mask consistency check passed for subnet "172.16.100.0".
Subnet mask consistency check passed.
Result: Node connectivity check passed
Checking multicast communication...
Checking subnet "172.16.100.0" for multicast communication with multicast group "224.0.0.251"...
Check of subnet "172.16.100.0" for multicast communication with multicast group "224.0.0.251" passed.
Check of multicast communication passed.
Check: Total memory
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 4.3535GB (4564940.0KB) 1GB (1048576.0KB) passed
collabn1 4.3535GB (4564940.0KB) 1GB (1048576.0KB) passed
Result: Total memory check passed
Check: Available memory
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 3.0986GB (3249148.0KB) 50MB (51200.0KB) passed
collabn1 2.7023GB (2833548.0KB) 50MB (51200.0KB) passed
Result: Available memory check passed
Check: Swap space
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 5.875GB (6160376.0KB) 4.3535GB (4564940.0KB) passed
collabn1 5.875GB (6160376.0KB) 4.3535GB (4564940.0KB) passed
Result: Swap space check passed
Check: Free disk space for "collabn2:/u01/products/rdbms_12102,collabn2:/tmp"
Path Node Name Mount point Available Required Status
---------------- ------------ ------------ ------------ ------------ ------------
/u01/products/rdbms_12102 collabn2 / 6.625GB 7.4GB failed
/tmp collabn2 / 6.625GB 7.4GB failed
Result: Free disk space check failed for "collabn2:/u01/products/rdbms_12102,collabn2:/tmp"
Check: Free disk space for "collabn1:/u01/products/rdbms_12102,collabn1:/tmp"
Path Node Name Mount point Available Required Status
---------------- ------------ ------------ ------------ ------------ ------------
/u01/products/rdbms_12102 collabn1 / 8.2351GB 7.4GB passed
/tmp collabn1 / 8.2351GB 7.4GB passed
Result: Free disk space check passed for "collabn1:/u01/products/rdbms_12102,collabn1:/tmp"
Check: User existence for "oracle"
Node Name Status Comment
------------ ------------------------ ------------------------
collabn2 passed exists(54321)
collabn1 passed exists(54321)
Checking for multiple users with UID value 54321
Result: Check for multiple users with UID value 54321 passed
Result: User existence check passed for "oracle"
Check: Group existence for "oinstall"
Node Name Status Comment
------------ ------------------------ ------------------------
collabn2 passed exists
collabn1 passed exists
Result: Group existence check passed for "oinstall"
Check: Group existence for "dba"
Node Name Status Comment
------------ ------------------------ ------------------------
collabn2 passed exists
collabn1 passed exists
Result: Group existence check passed for "dba"
Check: Membership of user "oracle" in group "oinstall" [as Primary]
Node Name User Exists Group Exists User in Group Primary Status
---------------- ------------ ------------ ------------ ------------ ------------
collabn2 yes yes yes yes passed
collabn1 yes yes yes yes passed
Result: Membership check for user "oracle" in group "oinstall" [as Primary] passed
Check: Membership of user "oracle" in group "dba"
Node Name User Exists Group Exists User in Group Status
---------------- ------------ ------------ ------------ ----------------
collabn2 yes yes yes passed
collabn1 yes yes yes passed
Result: Membership check for user "oracle" in group "dba" passed
Check: Run level
Node Name run level Required Status
------------ ------------------------ ------------------------ ----------
collabn2 5 3,5 passed
collabn1 5 3,5 passed
Result: Run level check passed
Check: Hard limits for "maximum open file descriptors"
Node Name Type Available Required Status
---------------- ------------ ------------ ------------ ----------------
collabn2 hard 65536 65536 passed
collabn1 hard 65536 65536 passed
Result: Hard limits check passed for "maximum open file descriptors"
Check: Soft limits for "maximum open file descriptors"
Node Name Type Available Required Status
---------------- ------------ ------------ ------------ ----------------
collabn2 soft 1024 1024 passed
collabn1 soft 65536 1024 passed
Result: Soft limits check passed for "maximum open file descriptors"
Check: Hard limits for "maximum user processes"
Node Name Type Available Required Status
---------------- ------------ ------------ ------------ ----------------
collabn2 hard 16384 16384 passed
collabn1 hard 16384 16384 passed
Result: Hard limits check passed for "maximum user processes"
Check: Soft limits for "maximum user processes"
Node Name Type Available Required Status
---------------- ------------ ------------ ------------ ----------------
collabn2 soft 16384 2047 passed
collabn1 soft 16384 2047 passed
Result: Soft limits check passed for "maximum user processes"
There are no oracle patches required for home "/u01/app/oracle/product/12.1.0/dbhome_1".
There are no oracle patches required for home "/u01/products/rdbms_12102".
Check: System architecture
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 x86_64 x86_64 passed
collabn1 x86_64 x86_64 passed
Result: System architecture check passed
Check: Kernel version
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 3.8.13-26.1.1.el6uek.x86_64 2.6.39 passed
collabn1 3.8.13-26.1.1.el6uek.x86_64 2.6.39 passed
Result: Kernel version check passed
Check: Kernel parameter for "semmsl"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 250 250 250 passed
collabn2 250 250 250 passed
Result: Kernel parameter check passed for "semmsl"
Check: Kernel parameter for "semmns"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 32000 32000 32000 passed
collabn2 32000 32000 32000 passed
Result: Kernel parameter check passed for "semmns"
Check: Kernel parameter for "semopm"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 100 100 100 passed
collabn2 100 100 100 passed
Result: Kernel parameter check passed for "semopm"
Check: Kernel parameter for "semmni"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 128 128 128 passed
collabn2 128 128 128 passed
Result: Kernel parameter check passed for "semmni"
Check: Kernel parameter for "shmmax"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 4398046511104 4398046511104 2337249280 passed
collabn2 4398046511104 4398046511104 2337249280 passed
Result: Kernel parameter check passed for "shmmax"
Check: Kernel parameter for "shmmni"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 4096 4096 4096 passed
collabn2 4096 4096 4096 passed
Result: Kernel parameter check passed for "shmmni"
Check: Kernel parameter for "shmall"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 4294967296 4294967296 456494 passed
collabn2 4294967296 4294967296 456494 passed
Result: Kernel parameter check passed for "shmall"
Check: Kernel parameter for "file-max"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 6815744 6815744 6815744 passed
collabn2 6815744 6815744 6815744 passed
Result: Kernel parameter check passed for "file-max"
Check: Kernel parameter for "ip_local_port_range"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 between 9000 & 65500 between 9000 & 65500 between 9000 & 65535 passed
collabn2 between 9000 & 65500 between 9000 & 65500 between 9000 & 65535 passed
Result: Kernel parameter check passed for "ip_local_port_range"
Check: Kernel parameter for "rmem_default"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 262144 262144 262144 passed
collabn2 262144 262144 262144 passed
Result: Kernel parameter check passed for "rmem_default"
Check: Kernel parameter for "rmem_max"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 4194304 4194304 4194304 passed
collabn2 4194304 4194304 4194304 passed
Result: Kernel parameter check passed for "rmem_max"
Check: Kernel parameter for "wmem_default"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 262144 262144 262144 passed
collabn2 262144 262144 262144 passed
Result: Kernel parameter check passed for "wmem_default"
Check: Kernel parameter for "wmem_max"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 1048576 1048576 1048576 passed
collabn2 1048576 1048576 1048576 passed
Result: Kernel parameter check passed for "wmem_max"
Check: Kernel parameter for "aio-max-nr"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 1048576 1048576 1048576 passed
collabn2 1048576 1048576 1048576 passed
Result: Kernel parameter check passed for "aio-max-nr"
Check: Package existence for "binutils"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 binutils-2.20.51.0.2-5.36.el6 binutils-2.20.51.0.2 passed
collabn1 binutils-2.20.51.0.2-5.36.el6 binutils-2.20.51.0.2 passed
Result: Package existence check passed for "binutils"
Check: Package existence for "compat-libcap1"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 compat-libcap1-1.10-1 compat-libcap1-1.10 passed
collabn1 compat-libcap1-1.10-1 compat-libcap1-1.10 passed
Result: Package existence check passed for "compat-libcap1"
Check: Package existence for "compat-libstdc++-33(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 compat-libstdc++-33(x86_64)-3.2.3-69.el6 compat-libstdc++-33(x86_64)-3.2.3 passed
collabn1 compat-libstdc++-33(x86_64)-3.2.3-69.el6 compat-libstdc++-33(x86_64)-3.2.3 passed
Result: Package existence check passed for "compat-libstdc++-33(x86_64)"
Check: Package existence for "libgcc(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libgcc(x86_64)-4.4.7-4.el6 libgcc(x86_64)-4.4.4 passed
collabn1 libgcc(x86_64)-4.4.7-4.el6 libgcc(x86_64)-4.4.4 passed
Result: Package existence check passed for "libgcc(x86_64)"
Check: Package existence for "libstdc++(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libstdc++(x86_64)-4.4.7-4.el6 libstdc++(x86_64)-4.4.4 passed
collabn1 libstdc++(x86_64)-4.4.7-4.el6 libstdc++(x86_64)-4.4.4 passed
Result: Package existence check passed for "libstdc++(x86_64)"
Check: Package existence for "libstdc++-devel(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libstdc++-devel(x86_64)-4.4.7-4.el6 libstdc++-devel(x86_64)-4.4.4 passed
collabn1 libstdc++-devel(x86_64)-4.4.7-4.el6 libstdc++-devel(x86_64)-4.4.4 passed
Result: Package existence check passed for "libstdc++-devel(x86_64)"
Check: Package existence for "sysstat"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 sysstat-9.0.4-22.el6 sysstat-9.0.4 passed
collabn1 sysstat-9.0.4-22.el6 sysstat-9.0.4 passed
Result: Package existence check passed for "sysstat"
Check: Package existence for "gcc"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 gcc-4.4.7-4.el6 gcc-4.4.4 passed
collabn1 gcc-4.4.7-4.el6 gcc-4.4.4 passed
Result: Package existence check passed for "gcc"
Check: Package existence for "gcc-c++"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 gcc-c++-4.4.7-4.el6 gcc-c++-4.4.4 passed
collabn1 gcc-c++-4.4.7-4.el6 gcc-c++-4.4.4 passed
Result: Package existence check passed for "gcc-c++"
Check: Package existence for "ksh"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 ksh ksh passed
collabn1 ksh ksh passed
Result: Package existence check passed for "ksh"
Check: Package existence for "make"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 make-3.81-20.el6 make-3.81 passed
collabn1 make-3.81-20.el6 make-3.81 passed
Result: Package existence check passed for "make"
Check: Package existence for "glibc(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 glibc(x86_64)-2.12-1.132.el6 glibc(x86_64)-2.12 passed
collabn1 glibc(x86_64)-2.12-1.132.el6 glibc(x86_64)-2.12 passed
Result: Package existence check passed for "glibc(x86_64)"
Check: Package existence for "glibc-devel(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 glibc-devel(x86_64)-2.12-1.132.el6 glibc-devel(x86_64)-2.12 passed
collabn1 glibc-devel(x86_64)-2.12-1.132.el6 glibc-devel(x86_64)-2.12 passed
Result: Package existence check passed for "glibc-devel(x86_64)"
Check: Package existence for "libaio(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libaio(x86_64)-0.3.107-10.el6 libaio(x86_64)-0.3.107 passed
collabn1 libaio(x86_64)-0.3.107-10.el6 libaio(x86_64)-0.3.107 passed
Result: Package existence check passed for "libaio(x86_64)"
Check: Package existence for "libaio-devel(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libaio-devel(x86_64)-0.3.107-10.el6 libaio-devel(x86_64)-0.3.107 passed
collabn1 libaio-devel(x86_64)-0.3.107-10.el6 libaio-devel(x86_64)-0.3.107 passed
Result: Package existence check passed for "libaio-devel(x86_64)"
Checking for multiple users with UID value 0
Result: Check for multiple users with UID value 0 passed
Check: Current group ID
Result: Current group ID check passed
Starting check for consistency of primary group of root user
Node Name Status
------------------------------------ ------------------------
collabn2 passed
collabn1 passed
Check for consistency of root user's primary group passed
Check default user file creation mask
Node Name Available Required Comment
------------ ------------------------ ------------------------ ----------
collabn2 0022 0022 passed
collabn1 0022 0022 passed
Result: Default user file creation mask check passed
Checking CRS integrity...
Clusterware version consistency passed.
The Oracle Clusterware is healthy on node "collabn1"
The Oracle Clusterware is healthy on node "collabn2"
CRS integrity check passed
Checking Cluster manager integrity...
Checking CSS daemon...
Node Name Status
------------------------------------ ------------------------
collabn1 running
collabn2 running
Oracle Cluster Synchronization Services appear to be online.
Cluster manager integrity check passed
Checking node application existence...
Checking existence of VIP node application (required)
Node Name Required Running? Comment
------------ ------------------------ ------------------------ ----------
collabn1 yes yes passed
collabn2 yes yes passed
VIP node application check passed
Checking existence of NETWORK node application (required)
Node Name Required Running? Comment
------------ ------------------------ ------------------------ ----------
collabn1 yes yes passed
collabn2 yes yes passed
NETWORK node application check passed
Checking existence of ONS node application (optional)
Node Name Required Running? Comment
------------ ------------------------ ------------------------ ----------
collabn1 no yes passed
collabn2 no yes passed
ONS node application check passed
Checking if Clusterware is installed on all nodes...
Oracle Clusterware is installed on all nodes.
Checking if CTSS Resource is running on all nodes...
Check: CTSS Resource running on all nodes
Node Name Status
------------------------------------ ------------------------
collabn1 passed
collabn2 passed
CTSS resource check passed
Querying CTSS for time offset on all nodes...
Query of CTSS for time offset passed
Check CTSS state started...
Check: CTSS state
Node Name State
------------------------------------ ------------------------
collabn2 Active
collabn1 Active
CTSS is in Active state. Proceeding with check of clock time offsets on all nodes...
Reference Time Offset Limit: 1000.0 msecs
Check: Reference Time Offset
Node Name Time Offset Status
------------ ------------------------ ------------------------
collabn2 0.0 passed
collabn1 0.0 passed
Time offset is within the specified limits on the following set of nodes:
"[collabn2, collabn1]"
Result: Check of clock time offsets passed
Oracle Cluster Time Synchronization Services check passed
Checking integrity of file "/etc/resolv.conf" across nodes
Checking the file "/etc/resolv.conf" to make sure only one of 'domain' and 'search' entries is defined
"domain" and "search" entries do not coexist in any "/etc/resolv.conf" file
Checking if 'domain' entry in file "/etc/resolv.conf" is consistent across the nodes...
"domain" entry does not exist in any "/etc/resolv.conf" file
Checking if 'search' entry in file "/etc/resolv.conf" is consistent across the nodes...
Checking file "/etc/resolv.conf" to make sure that only one 'search' entry is defined
More than one "search" entry does not exist in any "/etc/resolv.conf" file
All nodes have same "search" order defined in file "/etc/resolv.conf"
Checking DNS response time for an unreachable node
Node Name Status
------------------------------------ ------------------------
collabn1 failed
collabn2 failed
PRVF-5636 : The DNS response time for an unreachable node exceeded "15000" ms on following nodes: collabn1,collabn2
checking DNS response from all servers in "/etc/resolv.conf"
checking response for name "collabn2" from each of the name servers specified in "/etc/resolv.conf"
Node Name Source Comment Status
------------ ------------------------ ------------------------ ----------
collabn2 192.168.78.51 IPv4 passed
checking response for name "collabn1" from each of the name servers specified in "/etc/resolv.conf"
Node Name Source Comment Status
------------ ------------------------ ------------------------ ----------
collabn1 192.168.78.51 IPv4 passed
Check for integrity of file "/etc/resolv.conf" failed
Check: Time zone consistency
Result: Time zone consistency check passed
Checking Single Client Access Name (SCAN)...
SCAN Name Node Running? ListenerName Port Running?
---------------- ------------ ------------ ------------ ------------ ------------
collabn-cluster-scan.racattack collabn2 true LISTENER_SCAN1 1521 true
collabn-cluster-scan.racattack collabn1 true LISTENER_SCAN2 1521 true
collabn-cluster-scan.racattack collabn1 true LISTENER_SCAN3 1521 true
Checking TCP connectivity to SCAN listeners...
Node ListenerName TCP connectivity?
------------ ------------------------ ------------------------
collabn1 LISTENER_SCAN1 yes
collabn1 LISTENER_SCAN2 yes
collabn1 LISTENER_SCAN3 yes
TCP connectivity to SCAN listeners exists on all cluster nodes
Checking name resolution setup for "collabn-cluster-scan.racattack"...
Checking integrity of name service switch configuration file "/etc/nsswitch.conf" ...
Checking if "hosts" entry in file "/etc/nsswitch.conf" is consistent across nodes...
Checking file "/etc/nsswitch.conf" to make sure that only one "hosts" entry is defined
More than one "hosts" entry does not exist in any "/etc/nsswitch.conf" file
All nodes have same "hosts" entry defined in file "/etc/nsswitch.conf"
Check for integrity of name service switch configuration file "/etc/nsswitch.conf" passed
SCAN Name IP Address Status Comment
------------ ------------------------ ------------------------ ----------
collabn-cluster-scan.racattack 192.168.78.251 passed
collabn-cluster-scan.racattack 192.168.78.253 passed
collabn-cluster-scan.racattack 192.168.78.252 passed
Checking SCAN IP addresses...
Check of SCAN IP addresses passed
Verification of SCAN VIP and listener setup passed
Checking VIP configuration.
Checking VIP Subnet configuration.
Check for VIP Subnet configuration passed.
Checking VIP reachability
Check for VIP reachability passed.
Checking stale database schema statistics...
PRVG-11143 : The following error occurred during stale database schema statistics check.
PRVG-11115 : Following error occurred while establishing connection to database "rac"
PRCQ-1000 : An error occurred while establishing connection to database with user name "DBSNMP" and connect descriptor:
(DESCRIPTION = (LOAD_BALANCE=on) (ADDRESS = (PROTOCOL = TCP)(HOST = collabn-cluster-scan.racattack)(PORT = 1521)) (CONNECT_DATA =(SERVER = DEDICATED)(SERVICE_NAME = rac.racattack)))
ORA-28000: the account is locked
Checking Database and Clusterware version compatibility
Checking ASM and CRS version compatibility
ASM and CRS versions are compatible
Database version "12.1.0.2.0" is compatible with the Clusterware version "12.1.0.2.0".
Database Clusterware version compatibility passed.
Checking OS user consistency for database upgrade
Result: OS user consistency check for upgrade successful
Check: Group existence for "dba"
Node Name Status Comment
------------ ------------------------ ------------------------
collabn2 passed exists
collabn1 passed exists
Result: Group existence check passed for "dba"
Check: Membership of user "oracle" in group "dba"
Node Name User Exists Group Exists User in Group Status
---------------- ------------ ------------ ------------ ----------------
collabn2 yes yes yes passed
collabn1 yes yes yes passed
Result: Membership check for user "oracle" in group "dba" passed
Pre-check for database installation was unsuccessful on all the nodes.
NOTE:
No fixable verification failures to fix
#################################
How to expand /u01 volume:
#################################
$ df -h
[oracle@collabn1 dev]$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_collabn1-lv_root 35G 25G 8.6G 75% /
tmpfs 2.2G 167M 2.1G 8% /dev/shm
/dev/sda1 477M 130M 319M 29% /boot
12cR1 680G 643G 37G 95% /media/sf_12cR1
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_collabn1-lv_root 35G 27G 6.3G 82% /
tmpfs 2.2G 631M 1.6G 29% /dev/shm
/dev/sda1 477M 122M 326M 28% /boot
12cR1 680G 644G 36G 95% /media/sf_12cR1
Before:
$ ls -l /dev/sd*
brw-rw---- 1 root disk 8, 0 Mar 29 13:34 /dev/sda
brw-rw---- 1 root disk 8, 1 Mar 29 13:34 /dev/sda1
brw-rw---- 1 root disk 8, 2 Mar 29 13:34 /dev/sda2
brw-rw---- 1 root disk 8, 16 Mar 29 13:34 /dev/sdb
brw-rw---- 1 root disk 8, 32 Mar 29 13:34 /dev/sdc
brw-rw---- 1 root disk 8, 48 Mar 29 13:34 /dev/sdd
brw-rw---- 1 root disk 8, 64 Mar 29 13:34 /dev/sde
After:
$ ls -l /dev/sd*
brw-rw---- 1 root disk 8, 0 Mar 29 14:38 /dev/sda
brw-rw---- 1 root disk 8, 1 Mar 29 14:38 /dev/sda1
brw-rw---- 1 root disk 8, 2 Mar 29 14:38 /dev/sda2
brw-rw---- 1 root disk 8, 16 Mar 29 14:38 /dev/sdb
brw-rw---- 1 root disk 8, 32 Mar 29 14:38 /dev/sdc
brw-rw---- 1 root disk 8, 48 Mar 29 14:38 /dev/sdd
brw-rw---- 1 root disk 8, 64 Mar 29 14:38 /dev/sde
brw-rw---- 1 root disk 8, 80 Mar 29 14:38 /dev/sdf
# lvdisplay
--- Logical volume ---
LV Path /dev/vg_collabn1/lv_root
LV Name lv_root
VG Name vg_collabn1
LV UUID EMuv3M-N0vM-iFWb-uwtl-z8uB-mlZh-cK2ZEB
LV Write Access read/write
LV Creation host, time collabn1.racattack, 2013-10-11 10:50:12 -0400
LV Status available
# open 1
LV Size 35.63 GiB
Current LE 9122
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 252:0
--- Logical volume ---
LV Path /dev/vg_collabn1/lv_swap
LV Name lv_swap
VG Name vg_collabn1
LV UUID d30rgg-fv7J-Nl94-mriB-yFw4-FAzS-ArPoFq
LV Write Access read/write
LV Creation host, time collabn1.racattack, 2013-10-11 10:50:28 -0400
LV Status available
# open 2
LV Size 3.88 GiB
Current LE 992
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 252:1
# fdisk /dev/sdf
: p
: n
: p
: 1
: enter
: enter
: t
: L
: 8e
: w
# fdisk /dev/sdf
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel with disk identifier 0x796ab167.
Changes will remain in memory only, until you decide to write them.
After that, of course, the previous content won't be recoverable.
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
WARNING: DOS-compatible mode is deprecated. It's strongly recommended to
switch off the mode (command 'c') and change display units to
sectors (command 'u').
Command (m for help): p
Disk /dev/sdf: 10.7 GB, 10737418240 bytes
255 heads, 63 sectors/track, 1305 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x796ab167
Device Boot Start End Blocks Id System
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-1305, default 1):
Using default value 1
Last cylinder, +cylinders or +size{K,M,G} (1-1305, default 1305):
Using default value 1305
Command (m for help): t
Selected partition 1
Hex code (type L to list codes): L
0 Empty 24 NEC DOS 81 Minix / old Lin bf Solaris
1 FAT12 39 Plan 9 82 Linux swap / So c1 DRDOS/sec (FAT-
2 XENIX root 3c PartitionMagic 83 Linux c4 DRDOS/sec (FAT-
3 XENIX usr 40 Venix 80286 84 OS/2 hidden C: c6 DRDOS/sec (FAT-
4 FAT16 <32M 41 PPC PReP Boot 85 Linux extended c7 Syrinx
5 Extended 42 SFS 86 NTFS volume set da Non-FS data
6 FAT16 4d QNX4.x 87 NTFS volume set db CP/M / CTOS / .
7 HPFS/NTFS 4e QNX4.x 2nd part 88 Linux plaintext de Dell Utility
8 AIX 4f QNX4.x 3rd part 8e Linux LVM df BootIt
9 AIX bootable 50 OnTrack DM 93 Amoeba e1 DOS access
a OS/2 Boot Manag 51 OnTrack DM6 Aux 94 Amoeba BBT e3 DOS R/O
b W95 FAT32 52 CP/M 9f BSD/OS e4 SpeedStor
c W95 FAT32 (LBA) 53 OnTrack DM6 Aux a0 IBM Thinkpad hi eb BeOS fs
e W95 FAT16 (LBA) 54 OnTrackDM6 a5 FreeBSD ee GPT
f W95 Ext'd (LBA) 55 EZ-Drive a6 OpenBSD ef EFI (FAT-12/16/
10 OPUS 56 Golden Bow a7 NeXTSTEP f0 Linux/PA-RISC b
11 Hidden FAT12 5c Priam Edisk a8 Darwin UFS f1 SpeedStor
12 Compaq diagnost 61 SpeedStor a9 NetBSD f4 SpeedStor
14 Hidden FAT16 <3 63 GNU HURD or Sys ab Darwin boot f2 DOS secondary
16 Hidden FAT16 64 Novell Netware af HFS / HFS+ fb VMware VMFS
17 Hidden HPFS/NTF 65 Novell Netware b7 BSDI fs fc VMware VMKCORE
18 AST SmartSleep 70 DiskSecure Mult b8 BSDI swap fd Linux raid auto
1b Hidden W95 FAT3 75 PC/IX bb Boot Wizard hid fe LANstep
1c Hidden W95 FAT3 80 Old Minix be Solaris boot ff BBT
1e Hidden W95 FAT1
Hex code (type L to list codes): 8e
Changed system type of partition 1 to 8e (Linux LVM)
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
Syncing disks.
# pvcreate /dev/sdf1
dev_is_mpath: failed to get device for 8:81
Physical volume "/dev/sdf1" successfully created
# vgextend vg_collabn1 /dev/sdf1
Volume group "vg_collabn1" successfully extended
# lvdisplay
# lvextend -L 45G /dev/vg_collabn1/lv_root
Extending logical volume lv_root to 45.00 GiB
Logical volume lv_root successfully resized
# resize2fs /dev/vg_collabn1/lv_root 45G
resize2fs 1.43-WIP (20-Jun-2013)
Filesystem at /dev/vg_collabn1/lv_root is mounted on /; on-line resizing required
old_desc_blocks = 3, new_desc_blocks = 3
The filesystem on /dev/vg_collabn1/lv_root is now 11796480 blocks long.
# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_collabn1-lv_root 45G 25G 18G 59% /
tmpfs 2.2G 167M 2.1G 8% /dev/shm
/dev/sda1 477M 130M 319M 29% /boot
12cR1 680G 644G 36G 95% /media/sf_12cR1
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_collabn1-lv_root 45G 27G 16G 65% /
tmpfs 2.2G 631M 1.6G 29% /dev/shm
/dev/sda1 477M 122M 326M 28% /boot
12cR1 680G 644G 36G 95% /media/sf_12cR1
$ srvctl stop database -d RAC
$ crsctl stat res -t
$ srvctl remove database -d RAC
$ srvctl add database -d RAC -o '/u01/products/rdbms_12102'
$ srvctl add instance -d RAC -i RAC1 -n collabn1
$ srvctl add instance -d RAC -i RAC2 -n collabn2
$ srvctl start database -d RAC
Problem: 12.1.0.2 GI: oratab being wrongly modified after instance restarts [Doc ID 1922908.1]
--*********************************************************************************************
-- 1. Download and Unzip the Latest OPatch (Specific to Database Release) to all cluster nodes:
--*********************************************************************************************
$ rm -rf /u01/products/grid_12102/OPatch/*
$ rm -rf /u01/products/rdbms_12102/OPatch/*
$ unzip /media/sf_12cR1/Patch/p6880880_121010_Linux-x86-64.zip -d /u01/products/grid_12102/
$ unzip /media/sf_12cR1/Patch/p6880880_121010_Linux-x86-64.zip -d /u01/products/rdbms_12102/
$ /u01/products/grid_12102/OPatch/opatch version
$ /u01/products/rdbms_12102/OPatch/opatch version
--***************************************************************
-- 2. Validate and Record Pre-Patch information :
--***************************************************************
Validate using the following commands :
$ cd $GRID_HOME/OPatch
$ ./opatch lsinventory -oh /u01/products/grid_11203/
$ cd $ORACLE_HOME/OPatch
$ ./opatch lsinventory -oh /u01/products/rdbms_12102
--***************************************************************
-- 3. Create OCM Response File If It Does Not Exist :
--***************************************************************
Create ocm response file using the following command and provide appropriate values for the prompts.
# cd /u01/products/grid_12102/OPatch/ocm/bin/
# ./emocmrsp
# cd /u01/products/rdbms_12102/OPatch/ocm/bin/
# ./emocmrsp
Verify the created file using,
$ emocmrsp -verbose ocm.rsp
--***************************************************************
-- 4. Download and Unzip the JUL2014 PSU patch : (as grid user)
--***************************************************************
# mkdir -p /u01/products/patches
# chown -R oracle:oinstall /u01/products/patches
# chmod -R 775 /u01/products/patches
unzip /media/sf_12cR1/Patch/p18894342_121020_Linux-x86-64.zip -d /u01/products/patches
--***************************************************************
-- 7. Patch Application :
--***************************************************************
$ export PATH=$PATH:/u01/products/grid_12102/OPatch
# opatchauto apply /u01/products/patches/18894342 -ocmrf /u01/products/rdbms_12102/OPatch/ocm/bin/ocm.rsp
--***************************************************************
-- 9. Verification of Patch application :
--***************************************************************
Validate using the following commands :
$ cd $GRID_HOME/OPatch
$ ./opatch lsinventory -detail -oh /u01/products/grid_12102/
$ cd $ORACLE_HOME/OPatch
$ ./opatch lsinventory -detail -oh /u01/products/rdbms_12102
$ sqlplus / as sysdba
set linesize 500
set pagesize 500
column comp_name format a40
column version format a15
column status format a13
select comp_name,version,status from dba_registry
/
--***************************************************************************
-- 10. (If Required) Roll Back the Oracle RAC Database Homes and GI Together
--***************************************************************************
GI Home and Database Homes that are not shared and ACFS file system is not configured.
As root user, execute the following command on each node of the cluster.
# cd $GRID_HOME/OPatch
# opatchauto rollback /u01/products/patches/18894342
+ASM1:/u01/app/12.1.0/grid:N: # line added by Agent
RAC1:/u01/app/oracle/product/12.1.0/dbhome_1:Y: # line added by Agent
RAC:/u01/app/oracle/product/12.1.0/dbhome_1:N: # line added by Agent
[oracle@collabn1 ~]$ export ORACLE_SID=+ASM1
[oracle@collabn1 ~]$ . oraenv
ORACLE_SID = [+ASM1] ?
The Oracle base remains unchanged with value /u01/app/oracle
[oracle@collabn1 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.DATA.dg
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
ora.FRA.dg
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
ora.LISTENER.lsnr
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
ora.asm
ONLINE ONLINE collabn1 Started,STABLE
ONLINE ONLINE collabn2 Started,STABLE
ora.net1.network
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
ora.ons
ONLINE ONLINE collabn1 STABLE
ONLINE ONLINE collabn2 STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE collabn1 STABLE
ora.LISTENER_SCAN2.lsnr
1 ONLINE ONLINE collabn2 STABLE
ora.LISTENER_SCAN3.lsnr
1 ONLINE ONLINE collabn2 STABLE
ora.collabn1.vip
1 ONLINE ONLINE collabn1 STABLE
ora.collabn2.vip
1 ONLINE ONLINE collabn2 STABLE
ora.cvu
1 ONLINE ONLINE collabn2 STABLE
ora.oc4j
1 OFFLINE OFFLINE STABLE
ora.rac.db
1 ONLINE ONLINE collabn1 Open,STABLE
2 ONLINE ONLINE collabn2 Open,STABLE
ora.scan1.vip
1 ONLINE ONLINE collabn1 STABLE
ora.scan2.vip
1 ONLINE ONLINE collabn2 STABLE
ora.scan3.vip
1 ONLINE ONLINE collabn2 STABLE
--------------------------------------------------------------------------------
$ export ORACLE_SID=+ASM1
$ . oraenv
$ ocrconfig -showbackup
PROT-24: Auto backups for the Oracle Cluster Registry are not available
collabn1 2014/02/18 21:58:23 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_215823.ocr
collabn1 2014/02/18 21:58:05 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_215805.ocr
collabn1 2014/02/18 21:48:20 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_214820.ocr
collabn1 2014/02/18 21:43:24 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_214324.ocr
$ su
Password:
[root@collabn1 orachk]# ocrconfig -manualbackup
$ ocrconfig -showbackup
PROT-24: Auto backups for the Oracle Cluster Registry are not available
collabn2 2016/03/01 11:25:46 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20160301_112546.ocr
collabn1 2014/02/18 21:58:23 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_215823.ocr
collabn1 2014/02/18 21:58:05 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_215805.ocr
collabn1 2014/02/18 21:48:20 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_214820.ocr
collabn1 2014/02/18 21:43:24 /u01/app/12.1.0/grid/cdata/collabn-cluster/backup_20140218_214324.ocr
# mkdir -p /u01/products/grid_12102
# mkdir -p /u01/scripts
# chown -R oracle:oinstall /u01/products
# chown -R oracle:oinstall /u01/scripts
# chmod -R 775 /u01/scripts
# chmod -R 775 /u01/products
$ mkdir -p /u01/scripts/orachk
$ cd /media/sf_12cR1/DB_TOOLS
$ cp orachk.zip /u01/scripts/orachk
$ cd /u01/scripts/orachk
$ unzip orachk.zip
$ export ORACLE_SID=+ASM1
$ . oraenv
$ crsctl query crs releaseversion
Oracle High Availability Services release version on the local node is [12.1.0.1.0]
$ crsctl query crs softwareversion
Oracle Clusterware version on node [collabn1] is [12.1.0.1.0]
$ crsctl query crs activeversion -f
Oracle Clusterware active version on the cluster is [12.1.0.1.0]. The cluster upgrade state is [NORMAL]. The cluster active patch level is [0].
$ crsctl stat res -t
$ export ORACLE_SID=RAC1
$ . oraenv
$ sqlplus / as sysdba
set linesize 500
set pagesize 500
column HOST_NAME format a20
column STATUS format a6
column INSTANCE_NAME format a13
column STIME format a35
column uptime format a55
select Host_Name, Status, database_status, Instance_Name
,'Started At: ' || to_char(startup_time,'DD-MON-YYYY HH24:MI:SS') stime
,'Uptime: ' || floor(sysdate - startup_time) || ' days(s) ' ||
trunc( 24*((sysdate-startup_time) -
trunc(sysdate-startup_time))) || ' hour(s) ' ||
mod(trunc(1440*((sysdate-startup_time) -
trunc(sysdate-startup_time))), 60) ||' minute(s) ' ||
mod(trunc(86400*((sysdate-startup_time) -
trunc(sysdate-startup_time))), 60) ||' seconds' uptime
from v$instance
/
HOST_NAME STATUS DATABASE_STATUS INSTANCE_NAME STIME UPTIME
-------------------- ------ ----------------- ------------- ----------------------------------- -------------------------------------------------------
collabn1.racattack OPEN ACTIVE RAC1 Started At: 01-MAR-2016 11:03:29 Uptime: 0 days(s) 0 hour(s) 16 minute(s) 51 seconds
ssh collabn2
# mkdir -p /u01/products/grid_12102
# mkdir -p /u01/scripts
# chown -R oracle:oinstall /u01/products
# chown -R oracle:oinstall /u01/scripts
# chmod -R 775 /u01/scripts
# chmod -R 775 /u01/products
$ mkdir -p /u01/scripts/orachk
$ cd /media/sf_12cR1/DB_TOOLS
$ cp orachk.zip /u01/scripts/orachk
$ cd /u01/scripts/orachk
$ unzip orachk.zip
$ export ORACLE_SID=+ASM2
$ . oraenv
$ crsctl query crs releaseversion
Oracle High Availability Services release version on the local node is [12.1.0.1.0]
$ crsctl query crs softwareversion
Oracle Clusterware version on node [collabn1] is [12.1.0.1.0]
$ crsctl query crs activeversion -f
Oracle Clusterware active version on the cluster is [12.1.0.1.0]. The cluster upgrade state is [NORMAL]. The cluster active patch level is [0].
$ crsctl stat res -t
$ export ORACLE_SID=RAC2
$ . oraenv
$ sqlplus / as sysdba
set linesize 500
set pagesize 500
column HOST_NAME format a20
column STATUS format a6
column INSTANCE_NAME format a13
column STIME format a35
column uptime format a55
select Host_Name, Status, database_status, Instance_Name
,'Started At: ' || to_char(startup_time,'DD-MON-YYYY HH24:MI:SS') stime
,'Uptime: ' || floor(sysdate - startup_time) || ' days(s) ' ||
trunc( 24*((sysdate-startup_time) -
trunc(sysdate-startup_time))) || ' hour(s) ' ||
mod(trunc(1440*((sysdate-startup_time) -
trunc(sysdate-startup_time))), 60) ||' minute(s) ' ||
mod(trunc(86400*((sysdate-startup_time) -
trunc(sysdate-startup_time))), 60) ||' seconds' uptime
from v$instance
/
HOST_NAME STATUS DATABASE_STATUS INSTANCE_NAME STIME UPTIME
-------------------- ------ ----------------- ------------- ----------------------------------- -------------------------------------------------------
collabn2.racattack OPEN ACTIVE RAC2 Started At: 01-MAR-2016 11:04:52 Uptime: 0 days(s) 0 hour(s) 13 minute(s) 54 seconds
$ cd /u01/scripts/orachk
./orachk -u -o pre
$ cd /media/sf_12cR1/12102_INSTALL/grid
$ ./runcluvfy.sh stage -pre crsinst -upgrade -rolling -src_crshome /u01/app/12.1.0/grid -dest_crshome /u01/products/grid_12102 -dest_version 12.1.0.2.0 -fixup -verbose
[root@collabn1 grid]# dd if=/dev/zero of=/home/swapfile bs=2048 count=1048576
1048576+0 records in
1048576+0 records out
2147483648 bytes (2.1 GB) copied, 29.3987 s, 73.0 MB/s
[root@collabn1 grid]# mkswap /home/swapfile
mkswap: /home/swapfile: warning: don't erase bootbits sectors
on whole disk. Use -f to force.
Setting up swapspace version 1, size = 2097148 KiB
no label, UUID=5f6d4b0f-d763-4e62-b6d4-d40c647776f8
[root@collabn1 grid]# swapon /home/swapfile
[root@collabn1 grid]# swapon -a
[root@collabn1 grid]# swapon -s
Filename Type Size Used Priority
/dev/dm-1 partition 4063228 105108 -1
/home/swapfile file 2097148 0 -2
[root@collabn1 home]# vi /etc/fstab
Add the following line to keep the swap file after reboot:
/home/swapfile none swap sw 0 0
# vi /etc/resolv.conf
Add the following line:
options timeout:1 attempts:1 rotate
# cat /etc/resolv.conf
# Generated by NetworkManager
search racattack
nameserver 192.168.78.51
nameserver 192.168.78.52
options timeout:1 attempts:1 rotate
Checking integrity of file "/etc/resolv.conf" across nodes
Checking the file "/etc/resolv.conf" to make sure only one of 'domain' and 'search' entries is defined
"domain" and "search" entries do not coexist in any "/etc/resolv.conf" file
Checking if 'domain' entry in file "/etc/resolv.conf" is consistent across the nodes...
"domain" entry does not exist in any "/etc/resolv.conf" file
Checking if 'search' entry in file "/etc/resolv.conf" is consistent across the nodes...
Checking file "/etc/resolv.conf" to make sure that only one 'search' entry is defined
More than one "search" entry does not exist in any "/etc/resolv.conf" file
All nodes have same "search" order defined in file "/etc/resolv.conf"
Checking DNS response time for an unreachable node
Node Name Status
------------------------------------ ------------------------
collabn1 failed
collabn2 failed
PRVF-5636 : The DNS response time for an unreachable node exceeded "15000" ms on following nodes: collabn1,collabn2
checking DNS response from all servers in "/etc/resolv.conf"
******************************************************************************************
Following is the list of fixable prerequisites selected to fix in this session
******************************************************************************************
-------------- --------------- ----------------
Check failed. Failed on nodes Reboot required?
-------------- --------------- ----------------
OS Kernel Parameter: collabn2,collabn1 no
panic_on_oops
Execute "/tmp/CVU_12.1.0.2.0_oracle/runfixup.sh" as root user on nodes "collabn2,collabn1" to perform the fix up operations manually
Press ENTER key to continue after execution of "/tmp/CVU_12.1.0.2.0_oracle/runfixup.sh" has completed on nodes "collabn2,collabn1"
Fix: OS Kernel Parameter: panic_on_oops
Node Name Status
------------------------------------ ------------------------
collabn2 failed
collabn1 failed
ERROR:
PRVG-9023 : Manual fix up command "/tmp/CVU_12.1.0.2.0_oracle/runfixup.sh" was not issued by root user on node "collabn2"
PRVG-9023 : Manual fix up command "/tmp/CVU_12.1.0.2.0_oracle/runfixup.sh" was not issued by root user on node "collabn1"
/media/sf_12cR1/DB_TOOLS/
change: the following lines in /ect/named.conf
zone "." IN {
type hint;
file "named.ca";
};
to:
zone "." IN {
type hint;
file "/dev/null";
};
$ chmod 775 $ORACLE_BASE/cfgtoollogs
$ chmod 770 $ORACLE_BASE/cfgtoollogs/dbca
$ chmod 770 $ORACLE_BASE/admin
#############################################################################################################
RAC DATABASE UPGRADE
#############################################################################################################
# mkdir -p /u01/products/rdbms_12102
# chown -R oracle:oinstall /u01/products/rdbms_12102
# chmod -R 775 /u01/products/rdbms_12102
$ cd /media/sf_12cR1/12102_INSTALL/database
$ ./runInstaller
# cd /u01/products/rdbms_12102
[root@collabn1 rdbms_12102]# ./root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /u01/products/rdbms_12102
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
# cd /u01/products/rdbms_12102
[root@collabn2 rdbms_12102]# ./root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /u01/products/rdbms_12102
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
[oracle@collabn1]
$ mkdir -p /u01/app/oracle/product/12.1.0/dbhome_1/temp
$ cd /u01/app/oracle/product/12.1.0/dbhome_1/temp
$ cp /u01/products/rdbms_12102/rdbms/admin/preupgrd.sql .
$ cp /u01/products/rdbms_12102/rdbms/admin/utluppkg.sql .
export ORACLE_SID=RAC1
. oraenv
$ sqlplus / as sysdba
SQL> @preupgrd.sql
[oracle@collabn2]
$ mkdir -p /u01/app/oracle/product/12.1.0/dbhome_1/temp
$ cd /u01/app/oracle/product/12.1.0/dbhome_1/temp
$ cp /u01/products/rdbms_12102/rdbms/admin/preupgrd.sql .
$ cp /u01/products/rdbms_12102/rdbms/admin/utluppkg.sql .
export ORACLE_SID=RAC2
. oraenv
$ sqlplus / as sysdba
SQL> @preupgrd.sql
Loading Pre-Upgrade Package...
***************************************************************************
Executing Pre-Upgrade Checks in RAC...
***************************************************************************
************************************************************
====>> ERRORS FOUND for RAC <<====
The following are *** ERROR LEVEL CONDITIONS *** that must be addressed
prior to attempting your upgrade.
Failure to do so will result in a failed upgrade.
You MUST resolve the above errors prior to upgrade
************************************************************
************************************************************
====>> PRE-UPGRADE RESULTS for RAC <<====
ACTIONS REQUIRED:
1. Review results of the pre-upgrade checks:
/u01/app/oracle/cfgtoollogs/RAC/preupgrade/preupgrade.log
2. Execute in the SOURCE environment BEFORE upgrade:
/u01/app/oracle/cfgtoollogs/RAC/preupgrade/preupgrade_fixups.sql
3. Execute in the NEW environment AFTER upgrade:
/u01/app/oracle/cfgtoollogs/RAC/preupgrade/postupgrade_fixups.sql
************************************************************
***************************************************************************
Pre-Upgrade Checks in RAC Completed.
***************************************************************************
***************************************************************************
***************************************************************************
$ cd /media/sf_12cR1/12102_INSTALL/grid
$ ./runcluvfy.sh stage -pre dbinst -upgrade -src_dbhome /u01/app/oracle/product/12.1.0/dbhome_1 -dest_dbhome /u01/products/rdbms_12102 -dest_version 12.1.0.2.0 -fixup -verbose
[oracle@collabn1 grid]$ ./runcluvfy.sh stage -pre dbinst -upgrade -src_dbhome /u01/app/oracle/product/12.1.0/dbhome_1 -dest_dbhome /u01/products/rdbms_12102 -dest_version 12.1.0.2.0 -fixup -verbose
Performing pre-checks for database installation
Checking node reachability...
Check: Node reachability from node "collabn1"
Destination Node Reachable?
------------------------------------ ------------------------
collabn1 yes
collabn2 yes
Result: Node reachability check passed from node "collabn1"
Checking user equivalence...
Check: User equivalence for user "oracle"
Node Name Status
------------------------------------ ------------------------
collabn2 passed
collabn1 passed
Result: User equivalence check passed for user "oracle"
Specify user name for database "rac" [default "DBSNMP"] :
Specify password for user "DBSNMP" in database "rac" :
Checking node connectivity...
Checking hosts config file...
Node Name Status
------------------------------------ ------------------------
collabn1 passed
collabn2 passed
Verification of the hosts config file successful
Interface information for node "collabn1"
Name IP Address Subnet Gateway Def. Gateway HW Address MTU
------ --------------- --------------- --------------- --------------- ----------------- ------
eth0 192.168.78.51 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:C0:4A:14 1500
eth0 192.168.78.61 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:C0:4A:14 1500
eth0 192.168.78.253 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:C0:4A:14 1500
eth0 192.168.78.251 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:C0:4A:14 1500
eth1 172.16.100.51 172.16.100.0 0.0.0.0 10.0.4.2 08:00:27:2B:4B:56 1500
eth1 169.254.200.132 169.254.0.0 0.0.0.0 10.0.4.2 08:00:27:2B:4B:56 1500
Interface information for node "collabn2"
Name IP Address Subnet Gateway Def. Gateway HW Address MTU
------ --------------- --------------- --------------- --------------- ----------------- ------
eth0 192.168.78.52 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:3C:18:FE 1500
eth0 192.168.78.62 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:3C:18:FE 1500
eth0 192.168.78.252 192.168.78.0 0.0.0.0 10.0.4.2 08:00:27:3C:18:FE 1500
eth1 172.16.100.52 172.16.100.0 0.0.0.0 10.0.4.2 08:00:27:E1:BA:80 1500
eth1 169.254.223.245 169.254.0.0 0.0.0.0 10.0.4.2 08:00:27:E1:BA:80 1500
Check: Node connectivity using interfaces on subnet "192.168.78.0"
Check: Node connectivity of subnet "192.168.78.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
collabn1[192.168.78.51] collabn1[192.168.78.251] yes
collabn1[192.168.78.51] collabn2[192.168.78.252] yes
collabn1[192.168.78.51] collabn2[192.168.78.62] yes
collabn1[192.168.78.51] collabn1[192.168.78.253] yes
collabn1[192.168.78.51] collabn1[192.168.78.61] yes
collabn1[192.168.78.51] collabn2[192.168.78.52] yes
collabn1[192.168.78.251] collabn2[192.168.78.252] yes
collabn1[192.168.78.251] collabn2[192.168.78.62] yes
collabn1[192.168.78.251] collabn1[192.168.78.253] yes
collabn1[192.168.78.251] collabn1[192.168.78.61] yes
collabn1[192.168.78.251] collabn2[192.168.78.52] yes
collabn2[192.168.78.252] collabn2[192.168.78.62] yes
collabn2[192.168.78.252] collabn1[192.168.78.253] yes
collabn2[192.168.78.252] collabn1[192.168.78.61] yes
collabn2[192.168.78.252] collabn2[192.168.78.52] yes
collabn2[192.168.78.62] collabn1[192.168.78.253] yes
collabn2[192.168.78.62] collabn1[192.168.78.61] yes
collabn2[192.168.78.62] collabn2[192.168.78.52] yes
collabn1[192.168.78.253] collabn1[192.168.78.61] yes
collabn1[192.168.78.253] collabn2[192.168.78.52] yes
collabn1[192.168.78.61] collabn2[192.168.78.52] yes
Result: Node connectivity passed for subnet "192.168.78.0" with node(s) collabn1,collabn2
Check: TCP connectivity of subnet "192.168.78.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
collabn1 : 192.168.78.51 collabn1 : 192.168.78.51 passed
collabn1 : 192.168.78.251 collabn1 : 192.168.78.51 passed
collabn2 : 192.168.78.252 collabn1 : 192.168.78.51 passed
collabn2 : 192.168.78.62 collabn1 : 192.168.78.51 passed
collabn1 : 192.168.78.253 collabn1 : 192.168.78.51 passed
collabn1 : 192.168.78.61 collabn1 : 192.168.78.51 passed
collabn2 : 192.168.78.52 collabn1 : 192.168.78.51 passed
collabn1 : 192.168.78.51 collabn1 : 192.168.78.251 passed
collabn1 : 192.168.78.251 collabn1 : 192.168.78.251 passed
collabn2 : 192.168.78.252 collabn1 : 192.168.78.251 passed
collabn2 : 192.168.78.62 collabn1 : 192.168.78.251 passed
collabn1 : 192.168.78.253 collabn1 : 192.168.78.251 passed
collabn1 : 192.168.78.61 collabn1 : 192.168.78.251 passed
collabn2 : 192.168.78.52 collabn1 : 192.168.78.251 passed
collabn1 : 192.168.78.51 collabn2 : 192.168.78.252 passed
collabn1 : 192.168.78.251 collabn2 : 192.168.78.252 passed
collabn2 : 192.168.78.252 collabn2 : 192.168.78.252 passed
collabn2 : 192.168.78.62 collabn2 : 192.168.78.252 passed
collabn1 : 192.168.78.253 collabn2 : 192.168.78.252 passed
collabn1 : 192.168.78.61 collabn2 : 192.168.78.252 passed
collabn2 : 192.168.78.52 collabn2 : 192.168.78.252 passed
collabn1 : 192.168.78.51 collabn2 : 192.168.78.62 passed
collabn1 : 192.168.78.251 collabn2 : 192.168.78.62 passed
collabn2 : 192.168.78.252 collabn2 : 192.168.78.62 passed
collabn2 : 192.168.78.62 collabn2 : 192.168.78.62 passed
collabn1 : 192.168.78.253 collabn2 : 192.168.78.62 passed
collabn1 : 192.168.78.61 collabn2 : 192.168.78.62 passed
collabn2 : 192.168.78.52 collabn2 : 192.168.78.62 passed
collabn1 : 192.168.78.51 collabn1 : 192.168.78.253 passed
collabn1 : 192.168.78.251 collabn1 : 192.168.78.253 passed
collabn2 : 192.168.78.252 collabn1 : 192.168.78.253 passed
collabn2 : 192.168.78.62 collabn1 : 192.168.78.253 passed
collabn1 : 192.168.78.253 collabn1 : 192.168.78.253 passed
collabn1 : 192.168.78.61 collabn1 : 192.168.78.253 passed
collabn2 : 192.168.78.52 collabn1 : 192.168.78.253 passed
collabn1 : 192.168.78.51 collabn1 : 192.168.78.61 passed
collabn1 : 192.168.78.251 collabn1 : 192.168.78.61 passed
collabn2 : 192.168.78.252 collabn1 : 192.168.78.61 passed
collabn2 : 192.168.78.62 collabn1 : 192.168.78.61 passed
collabn1 : 192.168.78.253 collabn1 : 192.168.78.61 passed
collabn1 : 192.168.78.61 collabn1 : 192.168.78.61 passed
collabn2 : 192.168.78.52 collabn1 : 192.168.78.61 passed
collabn1 : 192.168.78.51 collabn2 : 192.168.78.52 passed
collabn1 : 192.168.78.251 collabn2 : 192.168.78.52 passed
collabn2 : 192.168.78.252 collabn2 : 192.168.78.52 passed
collabn2 : 192.168.78.62 collabn2 : 192.168.78.52 passed
collabn1 : 192.168.78.253 collabn2 : 192.168.78.52 passed
collabn1 : 192.168.78.61 collabn2 : 192.168.78.52 passed
collabn2 : 192.168.78.52 collabn2 : 192.168.78.52 passed
Result: TCP connectivity check passed for subnet "192.168.78.0"
Check: Node connectivity using interfaces on subnet "172.16.100.0"
Check: Node connectivity of subnet "172.16.100.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
collabn2[172.16.100.52] collabn1[172.16.100.51] yes
Result: Node connectivity passed for subnet "172.16.100.0" with node(s) collabn2,collabn1
Check: TCP connectivity of subnet "172.16.100.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
collabn2 : 172.16.100.52 collabn2 : 172.16.100.52 passed
collabn1 : 172.16.100.51 collabn2 : 172.16.100.52 passed
collabn2 : 172.16.100.52 collabn1 : 172.16.100.51 passed
collabn1 : 172.16.100.51 collabn1 : 172.16.100.51 passed
Result: TCP connectivity check passed for subnet "172.16.100.0"
Checking subnet mask consistency...
Subnet mask consistency check passed for subnet "192.168.78.0".
Subnet mask consistency check passed for subnet "172.16.100.0".
Subnet mask consistency check passed.
Result: Node connectivity check passed
Checking multicast communication...
Checking subnet "172.16.100.0" for multicast communication with multicast group "224.0.0.251"...
Check of subnet "172.16.100.0" for multicast communication with multicast group "224.0.0.251" passed.
Check of multicast communication passed.
Check: Total memory
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 4.3535GB (4564940.0KB) 1GB (1048576.0KB) passed
collabn1 4.3535GB (4564940.0KB) 1GB (1048576.0KB) passed
Result: Total memory check passed
Check: Available memory
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 3.0986GB (3249148.0KB) 50MB (51200.0KB) passed
collabn1 2.7023GB (2833548.0KB) 50MB (51200.0KB) passed
Result: Available memory check passed
Check: Swap space
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 5.875GB (6160376.0KB) 4.3535GB (4564940.0KB) passed
collabn1 5.875GB (6160376.0KB) 4.3535GB (4564940.0KB) passed
Result: Swap space check passed
Check: Free disk space for "collabn2:/u01/products/rdbms_12102,collabn2:/tmp"
Path Node Name Mount point Available Required Status
---------------- ------------ ------------ ------------ ------------ ------------
/u01/products/rdbms_12102 collabn2 / 6.625GB 7.4GB failed
/tmp collabn2 / 6.625GB 7.4GB failed
Result: Free disk space check failed for "collabn2:/u01/products/rdbms_12102,collabn2:/tmp"
Check: Free disk space for "collabn1:/u01/products/rdbms_12102,collabn1:/tmp"
Path Node Name Mount point Available Required Status
---------------- ------------ ------------ ------------ ------------ ------------
/u01/products/rdbms_12102 collabn1 / 8.2351GB 7.4GB passed
/tmp collabn1 / 8.2351GB 7.4GB passed
Result: Free disk space check passed for "collabn1:/u01/products/rdbms_12102,collabn1:/tmp"
Check: User existence for "oracle"
Node Name Status Comment
------------ ------------------------ ------------------------
collabn2 passed exists(54321)
collabn1 passed exists(54321)
Checking for multiple users with UID value 54321
Result: Check for multiple users with UID value 54321 passed
Result: User existence check passed for "oracle"
Check: Group existence for "oinstall"
Node Name Status Comment
------------ ------------------------ ------------------------
collabn2 passed exists
collabn1 passed exists
Result: Group existence check passed for "oinstall"
Check: Group existence for "dba"
Node Name Status Comment
------------ ------------------------ ------------------------
collabn2 passed exists
collabn1 passed exists
Result: Group existence check passed for "dba"
Check: Membership of user "oracle" in group "oinstall" [as Primary]
Node Name User Exists Group Exists User in Group Primary Status
---------------- ------------ ------------ ------------ ------------ ------------
collabn2 yes yes yes yes passed
collabn1 yes yes yes yes passed
Result: Membership check for user "oracle" in group "oinstall" [as Primary] passed
Check: Membership of user "oracle" in group "dba"
Node Name User Exists Group Exists User in Group Status
---------------- ------------ ------------ ------------ ----------------
collabn2 yes yes yes passed
collabn1 yes yes yes passed
Result: Membership check for user "oracle" in group "dba" passed
Check: Run level
Node Name run level Required Status
------------ ------------------------ ------------------------ ----------
collabn2 5 3,5 passed
collabn1 5 3,5 passed
Result: Run level check passed
Check: Hard limits for "maximum open file descriptors"
Node Name Type Available Required Status
---------------- ------------ ------------ ------------ ----------------
collabn2 hard 65536 65536 passed
collabn1 hard 65536 65536 passed
Result: Hard limits check passed for "maximum open file descriptors"
Check: Soft limits for "maximum open file descriptors"
Node Name Type Available Required Status
---------------- ------------ ------------ ------------ ----------------
collabn2 soft 1024 1024 passed
collabn1 soft 65536 1024 passed
Result: Soft limits check passed for "maximum open file descriptors"
Check: Hard limits for "maximum user processes"
Node Name Type Available Required Status
---------------- ------------ ------------ ------------ ----------------
collabn2 hard 16384 16384 passed
collabn1 hard 16384 16384 passed
Result: Hard limits check passed for "maximum user processes"
Check: Soft limits for "maximum user processes"
Node Name Type Available Required Status
---------------- ------------ ------------ ------------ ----------------
collabn2 soft 16384 2047 passed
collabn1 soft 16384 2047 passed
Result: Soft limits check passed for "maximum user processes"
There are no oracle patches required for home "/u01/app/oracle/product/12.1.0/dbhome_1".
There are no oracle patches required for home "/u01/products/rdbms_12102".
Check: System architecture
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 x86_64 x86_64 passed
collabn1 x86_64 x86_64 passed
Result: System architecture check passed
Check: Kernel version
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 3.8.13-26.1.1.el6uek.x86_64 2.6.39 passed
collabn1 3.8.13-26.1.1.el6uek.x86_64 2.6.39 passed
Result: Kernel version check passed
Check: Kernel parameter for "semmsl"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 250 250 250 passed
collabn2 250 250 250 passed
Result: Kernel parameter check passed for "semmsl"
Check: Kernel parameter for "semmns"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 32000 32000 32000 passed
collabn2 32000 32000 32000 passed
Result: Kernel parameter check passed for "semmns"
Check: Kernel parameter for "semopm"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 100 100 100 passed
collabn2 100 100 100 passed
Result: Kernel parameter check passed for "semopm"
Check: Kernel parameter for "semmni"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 128 128 128 passed
collabn2 128 128 128 passed
Result: Kernel parameter check passed for "semmni"
Check: Kernel parameter for "shmmax"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 4398046511104 4398046511104 2337249280 passed
collabn2 4398046511104 4398046511104 2337249280 passed
Result: Kernel parameter check passed for "shmmax"
Check: Kernel parameter for "shmmni"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 4096 4096 4096 passed
collabn2 4096 4096 4096 passed
Result: Kernel parameter check passed for "shmmni"
Check: Kernel parameter for "shmall"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 4294967296 4294967296 456494 passed
collabn2 4294967296 4294967296 456494 passed
Result: Kernel parameter check passed for "shmall"
Check: Kernel parameter for "file-max"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 6815744 6815744 6815744 passed
collabn2 6815744 6815744 6815744 passed
Result: Kernel parameter check passed for "file-max"
Check: Kernel parameter for "ip_local_port_range"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 between 9000 & 65500 between 9000 & 65500 between 9000 & 65535 passed
collabn2 between 9000 & 65500 between 9000 & 65500 between 9000 & 65535 passed
Result: Kernel parameter check passed for "ip_local_port_range"
Check: Kernel parameter for "rmem_default"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 262144 262144 262144 passed
collabn2 262144 262144 262144 passed
Result: Kernel parameter check passed for "rmem_default"
Check: Kernel parameter for "rmem_max"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 4194304 4194304 4194304 passed
collabn2 4194304 4194304 4194304 passed
Result: Kernel parameter check passed for "rmem_max"
Check: Kernel parameter for "wmem_default"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 262144 262144 262144 passed
collabn2 262144 262144 262144 passed
Result: Kernel parameter check passed for "wmem_default"
Check: Kernel parameter for "wmem_max"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 1048576 1048576 1048576 passed
collabn2 1048576 1048576 1048576 passed
Result: Kernel parameter check passed for "wmem_max"
Check: Kernel parameter for "aio-max-nr"
Node Name Current Configured Required Status Comment
---------------- ------------ ------------ ------------ ------------ ------------
collabn1 1048576 1048576 1048576 passed
collabn2 1048576 1048576 1048576 passed
Result: Kernel parameter check passed for "aio-max-nr"
Check: Package existence for "binutils"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 binutils-2.20.51.0.2-5.36.el6 binutils-2.20.51.0.2 passed
collabn1 binutils-2.20.51.0.2-5.36.el6 binutils-2.20.51.0.2 passed
Result: Package existence check passed for "binutils"
Check: Package existence for "compat-libcap1"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 compat-libcap1-1.10-1 compat-libcap1-1.10 passed
collabn1 compat-libcap1-1.10-1 compat-libcap1-1.10 passed
Result: Package existence check passed for "compat-libcap1"
Check: Package existence for "compat-libstdc++-33(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 compat-libstdc++-33(x86_64)-3.2.3-69.el6 compat-libstdc++-33(x86_64)-3.2.3 passed
collabn1 compat-libstdc++-33(x86_64)-3.2.3-69.el6 compat-libstdc++-33(x86_64)-3.2.3 passed
Result: Package existence check passed for "compat-libstdc++-33(x86_64)"
Check: Package existence for "libgcc(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libgcc(x86_64)-4.4.7-4.el6 libgcc(x86_64)-4.4.4 passed
collabn1 libgcc(x86_64)-4.4.7-4.el6 libgcc(x86_64)-4.4.4 passed
Result: Package existence check passed for "libgcc(x86_64)"
Check: Package existence for "libstdc++(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libstdc++(x86_64)-4.4.7-4.el6 libstdc++(x86_64)-4.4.4 passed
collabn1 libstdc++(x86_64)-4.4.7-4.el6 libstdc++(x86_64)-4.4.4 passed
Result: Package existence check passed for "libstdc++(x86_64)"
Check: Package existence for "libstdc++-devel(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libstdc++-devel(x86_64)-4.4.7-4.el6 libstdc++-devel(x86_64)-4.4.4 passed
collabn1 libstdc++-devel(x86_64)-4.4.7-4.el6 libstdc++-devel(x86_64)-4.4.4 passed
Result: Package existence check passed for "libstdc++-devel(x86_64)"
Check: Package existence for "sysstat"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 sysstat-9.0.4-22.el6 sysstat-9.0.4 passed
collabn1 sysstat-9.0.4-22.el6 sysstat-9.0.4 passed
Result: Package existence check passed for "sysstat"
Check: Package existence for "gcc"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 gcc-4.4.7-4.el6 gcc-4.4.4 passed
collabn1 gcc-4.4.7-4.el6 gcc-4.4.4 passed
Result: Package existence check passed for "gcc"
Check: Package existence for "gcc-c++"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 gcc-c++-4.4.7-4.el6 gcc-c++-4.4.4 passed
collabn1 gcc-c++-4.4.7-4.el6 gcc-c++-4.4.4 passed
Result: Package existence check passed for "gcc-c++"
Check: Package existence for "ksh"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 ksh ksh passed
collabn1 ksh ksh passed
Result: Package existence check passed for "ksh"
Check: Package existence for "make"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 make-3.81-20.el6 make-3.81 passed
collabn1 make-3.81-20.el6 make-3.81 passed
Result: Package existence check passed for "make"
Check: Package existence for "glibc(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 glibc(x86_64)-2.12-1.132.el6 glibc(x86_64)-2.12 passed
collabn1 glibc(x86_64)-2.12-1.132.el6 glibc(x86_64)-2.12 passed
Result: Package existence check passed for "glibc(x86_64)"
Check: Package existence for "glibc-devel(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 glibc-devel(x86_64)-2.12-1.132.el6 glibc-devel(x86_64)-2.12 passed
collabn1 glibc-devel(x86_64)-2.12-1.132.el6 glibc-devel(x86_64)-2.12 passed
Result: Package existence check passed for "glibc-devel(x86_64)"
Check: Package existence for "libaio(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libaio(x86_64)-0.3.107-10.el6 libaio(x86_64)-0.3.107 passed
collabn1 libaio(x86_64)-0.3.107-10.el6 libaio(x86_64)-0.3.107 passed
Result: Package existence check passed for "libaio(x86_64)"
Check: Package existence for "libaio-devel(x86_64)"
Node Name Available Required Status
------------ ------------------------ ------------------------ ----------
collabn2 libaio-devel(x86_64)-0.3.107-10.el6 libaio-devel(x86_64)-0.3.107 passed
collabn1 libaio-devel(x86_64)-0.3.107-10.el6 libaio-devel(x86_64)-0.3.107 passed
Result: Package existence check passed for "libaio-devel(x86_64)"
Checking for multiple users with UID value 0
Result: Check for multiple users with UID value 0 passed
Check: Current group ID
Result: Current group ID check passed
Starting check for consistency of primary group of root user
Node Name Status
------------------------------------ ------------------------
collabn2 passed
collabn1 passed
Check for consistency of root user's primary group passed
Check default user file creation mask
Node Name Available Required Comment
------------ ------------------------ ------------------------ ----------
collabn2 0022 0022 passed
collabn1 0022 0022 passed
Result: Default user file creation mask check passed
Checking CRS integrity...
Clusterware version consistency passed.
The Oracle Clusterware is healthy on node "collabn1"
The Oracle Clusterware is healthy on node "collabn2"
CRS integrity check passed
Checking Cluster manager integrity...
Checking CSS daemon...
Node Name Status
------------------------------------ ------------------------
collabn1 running
collabn2 running
Oracle Cluster Synchronization Services appear to be online.
Cluster manager integrity check passed
Checking node application existence...
Checking existence of VIP node application (required)
Node Name Required Running? Comment
------------ ------------------------ ------------------------ ----------
collabn1 yes yes passed
collabn2 yes yes passed
VIP node application check passed
Checking existence of NETWORK node application (required)
Node Name Required Running? Comment
------------ ------------------------ ------------------------ ----------
collabn1 yes yes passed
collabn2 yes yes passed
NETWORK node application check passed
Checking existence of ONS node application (optional)
Node Name Required Running? Comment
------------ ------------------------ ------------------------ ----------
collabn1 no yes passed
collabn2 no yes passed
ONS node application check passed
Checking if Clusterware is installed on all nodes...
Oracle Clusterware is installed on all nodes.
Checking if CTSS Resource is running on all nodes...
Check: CTSS Resource running on all nodes
Node Name Status
------------------------------------ ------------------------
collabn1 passed
collabn2 passed
CTSS resource check passed
Querying CTSS for time offset on all nodes...
Query of CTSS for time offset passed
Check CTSS state started...
Check: CTSS state
Node Name State
------------------------------------ ------------------------
collabn2 Active
collabn1 Active
CTSS is in Active state. Proceeding with check of clock time offsets on all nodes...
Reference Time Offset Limit: 1000.0 msecs
Check: Reference Time Offset
Node Name Time Offset Status
------------ ------------------------ ------------------------
collabn2 0.0 passed
collabn1 0.0 passed
Time offset is within the specified limits on the following set of nodes:
"[collabn2, collabn1]"
Result: Check of clock time offsets passed
Oracle Cluster Time Synchronization Services check passed
Checking integrity of file "/etc/resolv.conf" across nodes
Checking the file "/etc/resolv.conf" to make sure only one of 'domain' and 'search' entries is defined
"domain" and "search" entries do not coexist in any "/etc/resolv.conf" file
Checking if 'domain' entry in file "/etc/resolv.conf" is consistent across the nodes...
"domain" entry does not exist in any "/etc/resolv.conf" file
Checking if 'search' entry in file "/etc/resolv.conf" is consistent across the nodes...
Checking file "/etc/resolv.conf" to make sure that only one 'search' entry is defined
More than one "search" entry does not exist in any "/etc/resolv.conf" file
All nodes have same "search" order defined in file "/etc/resolv.conf"
Checking DNS response time for an unreachable node
Node Name Status
------------------------------------ ------------------------
collabn1 failed
collabn2 failed
PRVF-5636 : The DNS response time for an unreachable node exceeded "15000" ms on following nodes: collabn1,collabn2
checking DNS response from all servers in "/etc/resolv.conf"
checking response for name "collabn2" from each of the name servers specified in "/etc/resolv.conf"
Node Name Source Comment Status
------------ ------------------------ ------------------------ ----------
collabn2 192.168.78.51 IPv4 passed
checking response for name "collabn1" from each of the name servers specified in "/etc/resolv.conf"
Node Name Source Comment Status
------------ ------------------------ ------------------------ ----------
collabn1 192.168.78.51 IPv4 passed
Check for integrity of file "/etc/resolv.conf" failed
Check: Time zone consistency
Result: Time zone consistency check passed
Checking Single Client Access Name (SCAN)...
SCAN Name Node Running? ListenerName Port Running?
---------------- ------------ ------------ ------------ ------------ ------------
collabn-cluster-scan.racattack collabn2 true LISTENER_SCAN1 1521 true
collabn-cluster-scan.racattack collabn1 true LISTENER_SCAN2 1521 true
collabn-cluster-scan.racattack collabn1 true LISTENER_SCAN3 1521 true
Checking TCP connectivity to SCAN listeners...
Node ListenerName TCP connectivity?
------------ ------------------------ ------------------------
collabn1 LISTENER_SCAN1 yes
collabn1 LISTENER_SCAN2 yes
collabn1 LISTENER_SCAN3 yes
TCP connectivity to SCAN listeners exists on all cluster nodes
Checking name resolution setup for "collabn-cluster-scan.racattack"...
Checking integrity of name service switch configuration file "/etc/nsswitch.conf" ...
Checking if "hosts" entry in file "/etc/nsswitch.conf" is consistent across nodes...
Checking file "/etc/nsswitch.conf" to make sure that only one "hosts" entry is defined
More than one "hosts" entry does not exist in any "/etc/nsswitch.conf" file
All nodes have same "hosts" entry defined in file "/etc/nsswitch.conf"
Check for integrity of name service switch configuration file "/etc/nsswitch.conf" passed
SCAN Name IP Address Status Comment
------------ ------------------------ ------------------------ ----------
collabn-cluster-scan.racattack 192.168.78.251 passed
collabn-cluster-scan.racattack 192.168.78.253 passed
collabn-cluster-scan.racattack 192.168.78.252 passed
Checking SCAN IP addresses...
Check of SCAN IP addresses passed
Verification of SCAN VIP and listener setup passed
Checking VIP configuration.
Checking VIP Subnet configuration.
Check for VIP Subnet configuration passed.
Checking VIP reachability
Check for VIP reachability passed.
Checking stale database schema statistics...
PRVG-11143 : The following error occurred during stale database schema statistics check.
PRVG-11115 : Following error occurred while establishing connection to database "rac"
PRCQ-1000 : An error occurred while establishing connection to database with user name "DBSNMP" and connect descriptor:
(DESCRIPTION = (LOAD_BALANCE=on) (ADDRESS = (PROTOCOL = TCP)(HOST = collabn-cluster-scan.racattack)(PORT = 1521)) (CONNECT_DATA =(SERVER = DEDICATED)(SERVICE_NAME = rac.racattack)))
ORA-28000: the account is locked
Checking Database and Clusterware version compatibility
Checking ASM and CRS version compatibility
ASM and CRS versions are compatible
Database version "12.1.0.2.0" is compatible with the Clusterware version "12.1.0.2.0".
Database Clusterware version compatibility passed.
Checking OS user consistency for database upgrade
Result: OS user consistency check for upgrade successful
Check: Group existence for "dba"
Node Name Status Comment
------------ ------------------------ ------------------------
collabn2 passed exists
collabn1 passed exists
Result: Group existence check passed for "dba"
Check: Membership of user "oracle" in group "dba"
Node Name User Exists Group Exists User in Group Status
---------------- ------------ ------------ ------------ ----------------
collabn2 yes yes yes passed
collabn1 yes yes yes passed
Result: Membership check for user "oracle" in group "dba" passed
Pre-check for database installation was unsuccessful on all the nodes.
NOTE:
No fixable verification failures to fix
#################################
How to expand /u01 volume:
#################################
$ df -h
[oracle@collabn1 dev]$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_collabn1-lv_root 35G 25G 8.6G 75% /
tmpfs 2.2G 167M 2.1G 8% /dev/shm
/dev/sda1 477M 130M 319M 29% /boot
12cR1 680G 643G 37G 95% /media/sf_12cR1
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_collabn1-lv_root 35G 27G 6.3G 82% /
tmpfs 2.2G 631M 1.6G 29% /dev/shm
/dev/sda1 477M 122M 326M 28% /boot
12cR1 680G 644G 36G 95% /media/sf_12cR1
Before:
$ ls -l /dev/sd*
brw-rw---- 1 root disk 8, 0 Mar 29 13:34 /dev/sda
brw-rw---- 1 root disk 8, 1 Mar 29 13:34 /dev/sda1
brw-rw---- 1 root disk 8, 2 Mar 29 13:34 /dev/sda2
brw-rw---- 1 root disk 8, 16 Mar 29 13:34 /dev/sdb
brw-rw---- 1 root disk 8, 32 Mar 29 13:34 /dev/sdc
brw-rw---- 1 root disk 8, 48 Mar 29 13:34 /dev/sdd
brw-rw---- 1 root disk 8, 64 Mar 29 13:34 /dev/sde
After:
$ ls -l /dev/sd*
brw-rw---- 1 root disk 8, 0 Mar 29 14:38 /dev/sda
brw-rw---- 1 root disk 8, 1 Mar 29 14:38 /dev/sda1
brw-rw---- 1 root disk 8, 2 Mar 29 14:38 /dev/sda2
brw-rw---- 1 root disk 8, 16 Mar 29 14:38 /dev/sdb
brw-rw---- 1 root disk 8, 32 Mar 29 14:38 /dev/sdc
brw-rw---- 1 root disk 8, 48 Mar 29 14:38 /dev/sdd
brw-rw---- 1 root disk 8, 64 Mar 29 14:38 /dev/sde
brw-rw---- 1 root disk 8, 80 Mar 29 14:38 /dev/sdf
# lvdisplay
--- Logical volume ---
LV Path /dev/vg_collabn1/lv_root
LV Name lv_root
VG Name vg_collabn1
LV UUID EMuv3M-N0vM-iFWb-uwtl-z8uB-mlZh-cK2ZEB
LV Write Access read/write
LV Creation host, time collabn1.racattack, 2013-10-11 10:50:12 -0400
LV Status available
# open 1
LV Size 35.63 GiB
Current LE 9122
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 252:0
--- Logical volume ---
LV Path /dev/vg_collabn1/lv_swap
LV Name lv_swap
VG Name vg_collabn1
LV UUID d30rgg-fv7J-Nl94-mriB-yFw4-FAzS-ArPoFq
LV Write Access read/write
LV Creation host, time collabn1.racattack, 2013-10-11 10:50:28 -0400
LV Status available
# open 2
LV Size 3.88 GiB
Current LE 992
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 252:1
# fdisk /dev/sdf
: p
: n
: p
: 1
: enter
: enter
: t
: L
: 8e
: w
# fdisk /dev/sdf
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel with disk identifier 0x796ab167.
Changes will remain in memory only, until you decide to write them.
After that, of course, the previous content won't be recoverable.
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
WARNING: DOS-compatible mode is deprecated. It's strongly recommended to
switch off the mode (command 'c') and change display units to
sectors (command 'u').
Command (m for help): p
Disk /dev/sdf: 10.7 GB, 10737418240 bytes
255 heads, 63 sectors/track, 1305 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x796ab167
Device Boot Start End Blocks Id System
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-1305, default 1):
Using default value 1
Last cylinder, +cylinders or +size{K,M,G} (1-1305, default 1305):
Using default value 1305
Command (m for help): t
Selected partition 1
Hex code (type L to list codes): L
0 Empty 24 NEC DOS 81 Minix / old Lin bf Solaris
1 FAT12 39 Plan 9 82 Linux swap / So c1 DRDOS/sec (FAT-
2 XENIX root 3c PartitionMagic 83 Linux c4 DRDOS/sec (FAT-
3 XENIX usr 40 Venix 80286 84 OS/2 hidden C: c6 DRDOS/sec (FAT-
4 FAT16 <32M 41 PPC PReP Boot 85 Linux extended c7 Syrinx
5 Extended 42 SFS 86 NTFS volume set da Non-FS data
6 FAT16 4d QNX4.x 87 NTFS volume set db CP/M / CTOS / .
7 HPFS/NTFS 4e QNX4.x 2nd part 88 Linux plaintext de Dell Utility
8 AIX 4f QNX4.x 3rd part 8e Linux LVM df BootIt
9 AIX bootable 50 OnTrack DM 93 Amoeba e1 DOS access
a OS/2 Boot Manag 51 OnTrack DM6 Aux 94 Amoeba BBT e3 DOS R/O
b W95 FAT32 52 CP/M 9f BSD/OS e4 SpeedStor
c W95 FAT32 (LBA) 53 OnTrack DM6 Aux a0 IBM Thinkpad hi eb BeOS fs
e W95 FAT16 (LBA) 54 OnTrackDM6 a5 FreeBSD ee GPT
f W95 Ext'd (LBA) 55 EZ-Drive a6 OpenBSD ef EFI (FAT-12/16/
10 OPUS 56 Golden Bow a7 NeXTSTEP f0 Linux/PA-RISC b
11 Hidden FAT12 5c Priam Edisk a8 Darwin UFS f1 SpeedStor
12 Compaq diagnost 61 SpeedStor a9 NetBSD f4 SpeedStor
14 Hidden FAT16 <3 63 GNU HURD or Sys ab Darwin boot f2 DOS secondary
16 Hidden FAT16 64 Novell Netware af HFS / HFS+ fb VMware VMFS
17 Hidden HPFS/NTF 65 Novell Netware b7 BSDI fs fc VMware VMKCORE
18 AST SmartSleep 70 DiskSecure Mult b8 BSDI swap fd Linux raid auto
1b Hidden W95 FAT3 75 PC/IX bb Boot Wizard hid fe LANstep
1c Hidden W95 FAT3 80 Old Minix be Solaris boot ff BBT
1e Hidden W95 FAT1
Hex code (type L to list codes): 8e
Changed system type of partition 1 to 8e (Linux LVM)
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
Syncing disks.
# pvcreate /dev/sdf1
dev_is_mpath: failed to get device for 8:81
Physical volume "/dev/sdf1" successfully created
# vgextend vg_collabn1 /dev/sdf1
Volume group "vg_collabn1" successfully extended
# lvdisplay
# lvextend -L 45G /dev/vg_collabn1/lv_root
Extending logical volume lv_root to 45.00 GiB
Logical volume lv_root successfully resized
# resize2fs /dev/vg_collabn1/lv_root 45G
resize2fs 1.43-WIP (20-Jun-2013)
Filesystem at /dev/vg_collabn1/lv_root is mounted on /; on-line resizing required
old_desc_blocks = 3, new_desc_blocks = 3
The filesystem on /dev/vg_collabn1/lv_root is now 11796480 blocks long.
# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_collabn1-lv_root 45G 25G 18G 59% /
tmpfs 2.2G 167M 2.1G 8% /dev/shm
/dev/sda1 477M 130M 319M 29% /boot
12cR1 680G 644G 36G 95% /media/sf_12cR1
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_collabn1-lv_root 45G 27G 16G 65% /
tmpfs 2.2G 631M 1.6G 29% /dev/shm
/dev/sda1 477M 122M 326M 28% /boot
12cR1 680G 644G 36G 95% /media/sf_12cR1
$ srvctl stop database -d RAC
$ crsctl stat res -t
$ srvctl remove database -d RAC
$ srvctl add database -d RAC -o '/u01/products/rdbms_12102'
$ srvctl add instance -d RAC -i RAC1 -n collabn1
$ srvctl add instance -d RAC -i RAC2 -n collabn2
$ srvctl start database -d RAC
Problem: 12.1.0.2 GI: oratab being wrongly modified after instance restarts [Doc ID 1922908.1]
--*********************************************************************************************
-- 1. Download and Unzip the Latest OPatch (Specific to Database Release) to all cluster nodes:
--*********************************************************************************************
$ rm -rf /u01/products/grid_12102/OPatch/*
$ rm -rf /u01/products/rdbms_12102/OPatch/*
$ unzip /media/sf_12cR1/Patch/p6880880_121010_Linux-x86-64.zip -d /u01/products/grid_12102/
$ unzip /media/sf_12cR1/Patch/p6880880_121010_Linux-x86-64.zip -d /u01/products/rdbms_12102/
$ /u01/products/grid_12102/OPatch/opatch version
$ /u01/products/rdbms_12102/OPatch/opatch version
--***************************************************************
-- 2. Validate and Record Pre-Patch information :
--***************************************************************
Validate using the following commands :
$ cd $GRID_HOME/OPatch
$ ./opatch lsinventory -oh /u01/products/grid_11203/
$ cd $ORACLE_HOME/OPatch
$ ./opatch lsinventory -oh /u01/products/rdbms_12102
--***************************************************************
-- 3. Create OCM Response File If It Does Not Exist :
--***************************************************************
Create ocm response file using the following command and provide appropriate values for the prompts.
# cd /u01/products/grid_12102/OPatch/ocm/bin/
# ./emocmrsp
# cd /u01/products/rdbms_12102/OPatch/ocm/bin/
# ./emocmrsp
Verify the created file using,
$ emocmrsp -verbose ocm.rsp
--***************************************************************
-- 4. Download and Unzip the JUL2014 PSU patch : (as grid user)
--***************************************************************
# mkdir -p /u01/products/patches
# chown -R oracle:oinstall /u01/products/patches
# chmod -R 775 /u01/products/patches
unzip /media/sf_12cR1/Patch/p18894342_121020_Linux-x86-64.zip -d /u01/products/patches
--***************************************************************
-- 7. Patch Application :
--***************************************************************
$ export PATH=$PATH:/u01/products/grid_12102/OPatch
# opatchauto apply /u01/products/patches/18894342 -ocmrf /u01/products/rdbms_12102/OPatch/ocm/bin/ocm.rsp
--***************************************************************
-- 9. Verification of Patch application :
--***************************************************************
Validate using the following commands :
$ cd $GRID_HOME/OPatch
$ ./opatch lsinventory -detail -oh /u01/products/grid_12102/
$ cd $ORACLE_HOME/OPatch
$ ./opatch lsinventory -detail -oh /u01/products/rdbms_12102
$ sqlplus / as sysdba
set linesize 500
set pagesize 500
column comp_name format a40
column version format a15
column status format a13
select comp_name,version,status from dba_registry
/
--***************************************************************************
-- 10. (If Required) Roll Back the Oracle RAC Database Homes and GI Together
--***************************************************************************
GI Home and Database Homes that are not shared and ACFS file system is not configured.
As root user, execute the following command on each node of the cluster.
# cd $GRID_HOME/OPatch
# opatchauto rollback /u01/products/patches/18894342
Subscribe to:
Posts (Atom)