javascriptclinuxmacrosjsctypes

FD_SET and FD_ISSET macros written in javascript


My friend and I worked on this a while ago. It is meant for use with js-ctypes. In Linux there are these macros for dealing with adding a list of file descriptors (uint32's) to byte array: FD_SET and FD_IS_SET. The docs are here - http://linux.die.net/man/2/select

I was wondering if anyone would be able to check if I did this right or does anyone know of anyone that has done in this in javascript? I need to complete 32bit/64bit support for big and little endian but if it's already out there I would love to see it as when we worked on this we had so many uncertainties.

Here is the code, the fd_set_get_idx was the helper function this is all based on.

var MACROS = {
        fd_set_set: function(fdset, fd) {
            let { elem8, bitpos8 } = MACROS.fd_set_get_idx(fd);
            console.info('elem8:', elem8.toString());
            console.info('bitpos8:', bitpos8.toString());
            fdset[elem8] = 1 << bitpos8;
        },
        fd_set_isset: function(fdset, fd) {
            let { elem8, bitpos8 } = MACROS.fd_set_get_idx(fd);
            console.info('elem8:', elem8.toString());
            console.info('bitpos8:', bitpos8.toString());
            return !!(fdset[elem8] & (1 << bitpos8));
        },
  fd_set_get_idx: function(fd) {
            if (osname == 'darwin' /*is_mac*/) {
                // We have an array of int32. This should hopefully work on Darwin
                // 32 and 64 bit.
                let elem32 = Math.floor(fd / 32);
                let bitpos32 = fd % 32;
                let elem8 = elem32 * 8;
                let bitpos8 = bitpos32;
                if (bitpos8 >= 8) {     // 8
                    bitpos8 -= 8;
                    elem8++;
                }
                if (bitpos8 >= 8) {     // 16
                    bitpos8 -= 8;
                    elem8++;
                }
                if (bitpos8 >= 8) {     // 24
                    bitpos8 -= 8;
                    elem8++;
                }

                return {'elem8': elem8, 'bitpos8': bitpos8};
            } else { // else if (osname == 'linux' /*is_linux*/) { // removed the else if so this supports bsd and solaris now
                // :todo: add 32bit support
                // Unfortunately, we actually have an array of long ints, which is
                // a) platform dependent and b) not handled by typed arrays. We manually
                // figure out which byte we should be in. We assume a 64-bit platform
                // that is little endian (aka x86_64 linux).
                let elem64 = Math.floor(fd / 64);
                let bitpos64 = fd % 64;
                let elem8 = elem64 * 8;
                let bitpos8 = bitpos64;
                if (bitpos8 >= 8) {     // 8
                    bitpos8 -= 8;
                    elem8++;
                }
                if (bitpos8 >= 8) {     // 16
                    bitpos8 -= 8;
                    elem8++;
                }
                if (bitpos8 >= 8) {     // 24
                    bitpos8 -= 8;
                    elem8++;
                }
                if (bitpos8 >= 8) {     // 32
                    bitpos8 -= 8;
                    elem8++;
                }
                if (bitpos8 >= 8) {     // 40
                    bitpos8 -= 8;
                    elem8++;
                }
                if (bitpos8 >= 8) {     // 48
                    bitpos8 -= 8;
                    elem8++;
                }
                if (bitpos8 >= 8) {     // 56
                    bitpos8 -= 8;
                    elem8++;
                }

                return {'elem8': elem8, 'bitpos8': bitpos8};
            }
        }
};

Solution

  • I leave the burden of identify the endianness and the word size to you.
    The code below emulates the FD_XXX functions and lets you specify the endianness and the size.

    <!doctype>
    <html>
        <head>
            <script>
                var SIZE_32 = 4
                var SIZE_64 = 8
    
                var LITTLE_ENDIAN = [0, 1, 2, 3, 4, 5, 6, 7];
                var BIG_ENDIAN = [7, 6, 5, 4, 3, 2, 1, 0];
    
                function fdset(setSize, endianness, size)
                {
                    var buffer = new Uint8Array(div(setSize + 7, 8));
    
    
    
                    function div(a, b)
                    {
                        return Math.floor(a / b);
                    }
    
                    function make_index(index)
                    {
                        return div(index, 8 * size) * size + endianness[div(index % (8 * size), 8)] % size;
                    }
    
                    buffer.set_bit = function(index)
                    {
                        buffer[make_index(index)] |= 1 << (index % 8);
                    };
    
                    buffer.clear_bit = function(index)
                    {
                        buffer[make_index(index)] &= ~(index % 8);
                    };
    
                    buffer.get_bit = function(index)
                    {
                        return buffer[make_index(index)] & 1 << (index % 8);
                    };
    
                    buffer.zero = function()
                    {
                        buffer.fill(0);
                    }
    
    
                    return buffer;
                }
    
                function FD_SET(fd, fdset)
                {
                    fdset.set_bit(fd);
                }
    
                function FD_ISSET(fd, fdset)
                {
                    return !!fdset.get_bit(fd);
                }
    
                function FD_CLR(fd, fdset)
                {
                    return fdset.clear_bit(fd);
                }
    
                function FD_ZERO(fdset)
                {
                    return fdset.zero();
                }
    
    
            </script>
        </head>
        <body>
            <script>
                var s = fdset(128, LITTLE_ENDIAN, SIZE_64);
    
                //s in an Uint8Array
    
                console.log(s);
    
                FD_SET(0, s);    //Byte 0 = 1
                FD_SET(9, s);    //Byte 1 = 2
                FD_SET(18, s);   //Byte 2 = 4
                FD_SET(27, s);   //Byte 3 = 8
                FD_SET(36, s);   //Byte 4 = 16
                FD_SET(45, s);   //Byte 5 = 32
                FD_SET(54, s);   //Byte 6 = 64
                FD_SET(63, s);   //Byte 7 = 128
    
                FD_SET(120, s);  //Byte 15 = 1
                FD_SET(113, s);  //Byte 14 = 2
                FD_SET(106, s);  //Byte 13 = 4
                FD_SET(99, s);   //Byte 12 = 8
                FD_SET(92, s);   //Byte 11 = 16
                FD_SET(85, s);   //Byte 10 = 32
                FD_SET(78, s);   //Byte 9 = 64
                FD_SET(71, s);   //Byte 8 = 128
    
                console.log(s);
    
                //64 bits, BE: [128, 64, 32, 16, 8, 4, 2, 1, 1, 2, 4, 8, 16, 32, 64, 128]
                //64 bits, LE: [1, 2, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 2, 1]
                //32 bits, BE: [8, 4, 2, 1, 128, 64, 32, 16, 16, 32, 64, 128, 1, 2, 4, 8]
                //32 bits, LE: [1, 2, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 2, 1]
            </script>
        </body>
    </html>
    

    The fdset function return an Uint8Array that you can pass to a native function or further elaborate.
    The setSize sets the maximum file descriptor supported.


    Note that js ctypes already has an array type and the usual [U]intXX_t types in the native endianness, alas there is not a type that map to 32/64 bits integers based on the platform and there is not a sizeof operator2, so you still need to perform an external check to detect the word size.

    Using ctypes would be more natural.
    For reference purpose here is the official implementation of the FD_XXX functions.

    You could define a struct with a single field of type array of uint32/64_t.
    Then mimic then behavior of the C source code, being careful to use UInt64 when needed1 and avoiding shifts.


    1 JS has only double numbers, these numbers have 53 bits of mantissa, 10 bits of exponent and 1 bit of sign. When the bit operators are used the double number is converted into an integer, since it is the mantissa that specify the precision (exponent is just a scale, sign just an inversion), this number can at most carry as much information as a 53 bits number.

    2 That I am aware of, I am not an expert at all in js ctypes.