react-nativereact-native-firebase

How do I overcome "Permission Denial....obtain access using ACTION_OPEN_DOCUMENT or related APIs"?


I'm using react-native-firebase and react-native-document-picker and I'm trying to follow the face detection tutorial.

Currently getting the following error despite having read access through PermissionsAndroid:

Permission Denial: reading com.android.provides.media.MediaDocumentsProvider uri [uri] from pid=4746, uid=10135 requires that you obtain access using ACTION_OPEN_DOCUMENT or related APIs

I am able to display the selected image by the user on the screen but the react-native-firebase functions seems to not be able to have permission. The error happens at this call: const faces = await vision().faceDetectorProcessImage(localPath);.

Any suggestions on how to give the face detection function access or what am I doing wrong?

My AndroidManifest.xml file contains the following:

<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.CAMERA" />
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />

Here is all the code in that component for reference:

import React, {useState} from 'react';
import { Button, Text, Image, PermissionsAndroid } from 'react-native';
import vision, { VisionFaceContourType } from '@react-native-firebase/ml-vision';
import DocumentPicker from 'react-native-document-picker';



async function processFaces(localPath) {

  console.log(localPath)
  const faces = await vision().faceDetectorProcessImage(localPath);
  console.log("Got faces")

  faces.forEach(face => {
    console.log('Head rotation on Y axis: ', face.headEulerAngleY);
    console.log('Head rotation on Z axis: ', face.headEulerAngleZ);

    console.log('Left eye open probability: ', face.leftEyeOpenProbability);
    console.log('Right eye open probability: ', face.rightEyeOpenProbability);
    console.log('Smiling probability: ', face.smilingProbability);

    face.faceContours.forEach(contour => {
      if (contour.type === VisionFaceContourType.FACE) {
        console.log('Face outline points: ', contour.points);
      }
    });
  });
}

async function pickFile () {
    // Pick a single file
    try {
        const res = await DocumentPicker.pick({
            type: [DocumentPicker.types.images],
        });
        console.log(
            res.uri,
            res.type, // mime type
            res.name,
            res.size
        );
        return res
    } catch (err) {
        if (DocumentPicker.isCancel(err)) {
        // User cancelled the picker, exit any dialogs or menus and move on
            console.log("User cancelled")
        } else {
            console.log("Error picking file or processing faces")
            throw err;
        }
    }
}

const requestPermission = async () => {
    try {
      const granted = await PermissionsAndroid.request(
        PermissionsAndroid.PERMISSIONS.READ_EXTERNAL_STORAGE,
        {
          title: "Files Permission",
          message:
            "App needs access to your files " +
            "so you can run face detection.",
          buttonNeutral: "Ask Me Later",
          buttonNegative: "Cancel",
          buttonPositive: "OK"
        }
      );
      if (granted === PermissionsAndroid.RESULTS.GRANTED) {
        console.log("We can now read files");
      } else {
        console.log("File read permission denied");
      }
      return granted
    } catch (err) {
      console.warn(err);
    }
  };

function FaceDetectionScreen ({navigation}) {
    const [image, setImage] = useState("");
    return (
        <>
            <Text>This is the Face detection screen.</Text>
            <Button title="Select Image to detect faces" onPress={async () => {
                const permission = await requestPermission();
                if (permission === PermissionsAndroid.RESULTS.GRANTED) {
                    const pickedImage = await pickFile();
                    const pickedImageUri = pickedImage.uri
                    setImage(pickedImageUri);
                    processFaces(pickedImageUri).then(() => console.log('Finished processing file.'));
                }
                }}/>
            <Image style={{flex: 1}} source={{ uri: image}}/>
        </>
    ); 
}

export default FaceDetectionScreen;

Solution

  • Thanks to this comment on a github issue I was able to update my code and get it to work by updating the first three lines of processFaces as:

    async function processFaces(contentUri) {
      const stat = await RNFetchBlob.fs.stat(contentUri)
      const faces = await vision().faceDetectorProcessImage(stat.path);
    

    after importing import RNFetchBlob from 'rn-fetch-blob'.

    rn-fetch-blob